hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
c284e8b2636297515bf1e299808f0cb228815c25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from bicgstab into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgstab_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex omega,
magmaDoubleComplex *r,
magmaDoubleComplex *v,
magmaDoubleComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = r[ i+j*num_rows ] +
beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] );
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = r + beta * ( p - omega * v )
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgstab_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, omega,
r, v, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s = r - alpha v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in,out]
s magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgstab_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, r, v, s );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex *p,
magmaDoubleComplex *s,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = s[ i+j*num_rows ];
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * p[ i+j*num_rows ] + omega * tmp;
r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * p + omega * s
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
p magmaDoubleComplex_ptr
vector
@param[in]
s magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgstab_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, p, s, t, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *s,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ];
r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * y + omega * z
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
y magmaDoubleComplex_ptr
vector
@param[in]
z magmaDoubleComplex_ptr
vector
@param[in]
s magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgstab_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, y, z, s, t, x, r );
return MAGMA_SUCCESS;
}
| c284e8b2636297515bf1e299808f0cb228815c25.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from bicgstab into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgstab_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex omega,
magmaDoubleComplex *r,
magmaDoubleComplex *v,
magmaDoubleComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = r[ i+j*num_rows ] +
beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] );
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = r + beta * ( p - omega * v )
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zbicgstab_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, omega,
r, v, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s = r - alpha v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in,out]
s magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zbicgstab_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, r, v, s );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex *p,
magmaDoubleComplex *s,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = s[ i+j*num_rows ];
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * p[ i+j*num_rows ] + omega * tmp;
r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * p + omega * s
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
p magmaDoubleComplex_ptr
vector
@param[in]
s magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zbicgstab_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, p, s, t, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *s,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ];
r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * y + omega * z
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
y magmaDoubleComplex_ptr
vector
@param[in]
z magmaDoubleComplex_ptr
vector
@param[in]
s magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zbicgstab_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, y, z, s, t, x, r );
return MAGMA_SUCCESS;
}
|
59b02bb3f55f37c9c557d623848eadaf5a27984e.hip | // !!! This is a file automatically generated by hipify!!!
//Author: Ugo Varetto
//Parallel dot product with timing. Link with librt (-lrt)
//#include <hip/hip_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
#include <numeric>
#include <ctime>
typedef double real_t;
const size_t BLOCK_SIZE = 1024;
//------------------------------------------------------------------------------
double time_diff_ms(const timespec& start, const timespec& end) {
return end.tv_sec * 1E3 + end.tv_nsec / 1E6
- (start.tv_sec * 1E3 + start.tv_nsec / 1E6);
}
__global__ void partial_dot( const real_t* v1, const real_t* v2, real_t* out, int N ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
if( threadIdx.x == 0 ) out[ blockIdx.x ] = cache[ 0 ];
}
real_t dot( const real_t* v1, const real_t* v2, int N ) {
real_t s = 0;
for( int i = 0; i != N; ++i ) {
s += v1[ i ] * v2[ i ];
}
return s;
}
real_t dot_block( const real_t* v1, const real_t* v2, int N, int block_size ) {
std::vector< real_t > b1(block_size);
std::vector< real_t > b2(block_size);
real_t s = 0;
for( int i = 0; i < N; i += block_size ) {
std::copy(v1 + i, v1 + i + block_size, b1.begin());
std::copy(v2 + i, v2 + i + block_size, b2.begin());
s += dot(&b1[0], &b2[0], block_size);
}
return s;
}
__global__ void init_vector( real_t* v, int N ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
while( i < N ) {
v[ i ] = 1.0f;//real_t( i ) / 1000000.f;
i += gridDim.x * blockDim.x;
}
}
//------------------------------------------------------------------------------
int main(int argc, char** argv ) {
const size_t ARRAY_SIZE = 1024 * 1024 * 256; //1Mi elements
const int THREADS_PER_BLOCK = 1024;
const int BLOCKS = ARRAY_SIZE / THREADS_PER_BLOCK;//512;
const size_t SIZE = ARRAY_SIZE * sizeof( real_t );
// device storage
real_t* dev_v1 = 0; // vector 1
real_t* dev_v2 = 0; // vector 2
real_t* dev_vout = 0; // partial redution = number of blocks
hipMalloc( &dev_v1, SIZE );
hipMalloc( &dev_v2, SIZE );
hipMalloc( &dev_vout, BLOCKS * sizeof( real_t ) );
// host storage
std::vector< real_t > host_v1( ARRAY_SIZE );
std::vector< real_t > host_v2( ARRAY_SIZE );
std::vector< real_t > host_vout( BLOCKS );
// initialize vector 1 with kernel; much faster than using for loops on the cpu
hipLaunchKernelGGL(( init_vector), dim3(BLOCKS), dim3(THREADS_PER_BLOCK) , 0, 0, dev_v1, ARRAY_SIZE );
hipMemcpy( &host_v1[ 0 ], dev_v1, SIZE, hipMemcpyDeviceToHost );
// initialize vector 2 with kernel; much faster than using for loops on the cpu
hipLaunchKernelGGL(( init_vector), dim3(BLOCKS), dim3(THREADS_PER_BLOCK) , 0, 0, dev_v2, ARRAY_SIZE );
hipMemcpy( &host_v2[ 0 ], dev_v2, SIZE, hipMemcpyDeviceToHost );
timespec s, e;
clock_gettime(CLOCK_MONOTONIC, &s);
// execute kernel
hipLaunchKernelGGL(( partial_dot), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_v1, dev_v2, dev_vout, ARRAY_SIZE );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &e);
const double elapsed = time_diff_ms(s, e);
// copy output data from device(gpu) to host(cpu)
clock_gettime(CLOCK_MONOTONIC, &s);
hipMemcpy( &host_vout[ 0 ], dev_vout, BLOCKS * sizeof( real_t ), hipMemcpyDeviceToHost );
clock_gettime(CLOCK_MONOTONIC, &e);
const double transferTime = time_diff_ms(s, e);
clock_gettime(CLOCK_MONOTONIC, &s);
const real_t device_dot = std::accumulate( host_vout.begin(), host_vout.end(), real_t( 0 ) );
clock_gettime(CLOCK_MONOTONIC, &e);
const double acc = time_diff_ms(s, e);
//dot product on host
clock_gettime(CLOCK_MONOTONIC, &s);
//const real_t host_dot = std::inner_product(host_v1.begin(), host_v1.end(), host_v2.begin(), real_t(0));
const real_t host_dot = dot_block( &host_v1[ 0 ], &host_v2[ 0 ], ARRAY_SIZE, 16384);
clock_gettime(CLOCK_MONOTONIC, &e);
const double host_time = time_diff_ms(s, e);
// print dot product by summing up the partially reduced vectors
std::cout << "GPU: " << device_dot << std::endl;
// print dot product on cpu
std::cout << "CPU: " << host_dot << std::endl;
//std::cout << "CPU: " << dot( &host_v1[ 0 ], &host_v2[ 0 ], ARRAY_SIZE ) << std::endl;
std::cout << "ELAPSED TIME(ms) kernel + cpu sum: " << elapsed << " + " << acc << " = " << (elapsed + acc) << std::endl;
std::cout << "TRANSFER TIME(ms): " << transferTime << std::endl;
std::cout << "HOST TIME: " << host_time << std::endl;
// free memory
hipFree( dev_v1 );
hipFree( dev_v2 );
hipFree( dev_vout );
return 0;
}
| 59b02bb3f55f37c9c557d623848eadaf5a27984e.cu | //Author: Ugo Varetto
//Parallel dot product with timing. Link with librt (-lrt)
//#include <cuda_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
#include <numeric>
#include <ctime>
typedef double real_t;
const size_t BLOCK_SIZE = 1024;
//------------------------------------------------------------------------------
double time_diff_ms(const timespec& start, const timespec& end) {
return end.tv_sec * 1E3 + end.tv_nsec / 1E6
- (start.tv_sec * 1E3 + start.tv_nsec / 1E6);
}
__global__ void partial_dot( const real_t* v1, const real_t* v2, real_t* out, int N ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
if( threadIdx.x == 0 ) out[ blockIdx.x ] = cache[ 0 ];
}
real_t dot( const real_t* v1, const real_t* v2, int N ) {
real_t s = 0;
for( int i = 0; i != N; ++i ) {
s += v1[ i ] * v2[ i ];
}
return s;
}
real_t dot_block( const real_t* v1, const real_t* v2, int N, int block_size ) {
std::vector< real_t > b1(block_size);
std::vector< real_t > b2(block_size);
real_t s = 0;
for( int i = 0; i < N; i += block_size ) {
std::copy(v1 + i, v1 + i + block_size, b1.begin());
std::copy(v2 + i, v2 + i + block_size, b2.begin());
s += dot(&b1[0], &b2[0], block_size);
}
return s;
}
__global__ void init_vector( real_t* v, int N ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
while( i < N ) {
v[ i ] = 1.0f;//real_t( i ) / 1000000.f;
i += gridDim.x * blockDim.x;
}
}
//------------------------------------------------------------------------------
int main(int argc, char** argv ) {
const size_t ARRAY_SIZE = 1024 * 1024 * 256; //1Mi elements
const int THREADS_PER_BLOCK = 1024;
const int BLOCKS = ARRAY_SIZE / THREADS_PER_BLOCK;//512;
const size_t SIZE = ARRAY_SIZE * sizeof( real_t );
// device storage
real_t* dev_v1 = 0; // vector 1
real_t* dev_v2 = 0; // vector 2
real_t* dev_vout = 0; // partial redution = number of blocks
cudaMalloc( &dev_v1, SIZE );
cudaMalloc( &dev_v2, SIZE );
cudaMalloc( &dev_vout, BLOCKS * sizeof( real_t ) );
// host storage
std::vector< real_t > host_v1( ARRAY_SIZE );
std::vector< real_t > host_v2( ARRAY_SIZE );
std::vector< real_t > host_vout( BLOCKS );
// initialize vector 1 with kernel; much faster than using for loops on the cpu
init_vector<<< BLOCKS, THREADS_PER_BLOCK >>>( dev_v1, ARRAY_SIZE );
cudaMemcpy( &host_v1[ 0 ], dev_v1, SIZE, cudaMemcpyDeviceToHost );
// initialize vector 2 with kernel; much faster than using for loops on the cpu
init_vector<<< BLOCKS, THREADS_PER_BLOCK >>>( dev_v2, ARRAY_SIZE );
cudaMemcpy( &host_v2[ 0 ], dev_v2, SIZE, cudaMemcpyDeviceToHost );
timespec s, e;
clock_gettime(CLOCK_MONOTONIC, &s);
// execute kernel
partial_dot<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_v1, dev_v2, dev_vout, ARRAY_SIZE );
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &e);
const double elapsed = time_diff_ms(s, e);
// copy output data from device(gpu) to host(cpu)
clock_gettime(CLOCK_MONOTONIC, &s);
cudaMemcpy( &host_vout[ 0 ], dev_vout, BLOCKS * sizeof( real_t ), cudaMemcpyDeviceToHost );
clock_gettime(CLOCK_MONOTONIC, &e);
const double transferTime = time_diff_ms(s, e);
clock_gettime(CLOCK_MONOTONIC, &s);
const real_t device_dot = std::accumulate( host_vout.begin(), host_vout.end(), real_t( 0 ) );
clock_gettime(CLOCK_MONOTONIC, &e);
const double acc = time_diff_ms(s, e);
//dot product on host
clock_gettime(CLOCK_MONOTONIC, &s);
//const real_t host_dot = std::inner_product(host_v1.begin(), host_v1.end(), host_v2.begin(), real_t(0));
const real_t host_dot = dot_block( &host_v1[ 0 ], &host_v2[ 0 ], ARRAY_SIZE, 16384);
clock_gettime(CLOCK_MONOTONIC, &e);
const double host_time = time_diff_ms(s, e);
// print dot product by summing up the partially reduced vectors
std::cout << "GPU: " << device_dot << std::endl;
// print dot product on cpu
std::cout << "CPU: " << host_dot << std::endl;
//std::cout << "CPU: " << dot( &host_v1[ 0 ], &host_v2[ 0 ], ARRAY_SIZE ) << std::endl;
std::cout << "ELAPSED TIME(ms) kernel + cpu sum: " << elapsed << " + " << acc << " = " << (elapsed + acc) << std::endl;
std::cout << "TRANSFER TIME(ms): " << transferTime << std::endl;
std::cout << "HOST TIME: " << host_time << std::endl;
// free memory
cudaFree( dev_v1 );
cudaFree( dev_v2 );
cudaFree( dev_vout );
return 0;
}
|
9eabed7c0939c6ca24464f673e62f4adc3b6d258.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#define SIZE 100
#define n 2
using namespace std;
__global__ void vec_add(int *x,int *y,int *z)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
z[id]=x[id]+y[id];
}
__global__ void vec_mat_mul(int *mat,int *vec,int *o)
{
int x = threadIdx.x;
printf("\n%d",x);
o[x]=0;
for(int k=0;k<n;k++)
o[x]=o[x]+vec[k]*mat[k*n+k];
}
__global__ void mat_mul(int *a,int *b,int* c)
{
int x = threadIdx.x;
int y = threadIdx.y;
c[n*y+x]=0; //here col2
for(int k=0;k<n;k++) //here col1
c[n*y+x]=c[n*y+x]+a[n*y+k]*b[n*k+x]; //col2,col2,col1,col2
}
int main()
{
//vec_add
int a[SIZE],b[SIZE],c[SIZE];
int *d,*e,*f;
for(int i=0;i<SIZE;i++)
a[i]=b[i]=i;
hipMalloc((void**)&d,SIZE*sizeof(int));
hipMalloc((void**)&e,SIZE*sizeof(int));
hipMalloc((void**)&f,SIZE*sizeof(int));
hipMemcpy(d,a,SIZE*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(e,b,SIZE*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL((
vec_add), dim3(5),dim3(20), 0, 0, d,e,f);
hipMemcpy(c,f,SIZE*sizeof(int),hipMemcpyDeviceToHost);
printf("%d",c[50]);
//mat_mul
int mat1[n][n],mat2[n][n],mat3[n][n];
int *g,*h,*l;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
mat1[i][j]=mat2[i][j]=1;
}}
hipMalloc((void**)&g,n*n*sizeof(int));
hipMalloc((void**)&h,n*n*sizeof(int));
hipMalloc((void**)&l,n*n*sizeof(int));
hipMemcpy(g,mat1,n*n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(h,mat2,n*n*sizeof(int),hipMemcpyHostToDevice);
dim3 threadBlock(n,n); //col2 row1hipLaunchKernelGGL((
mat_mul), dim3(1),dim3(threadBlock), 0, 0, g,h,l);
hipMemcpy(mat3,l,n*n*sizeof(int),hipMemcpyDeviceToHost);
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
printf("%d",mat3[i][j]);
}}
int mat4[n][n];
int *w;
int vec4[n],o4[n];
int *u,*out4;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
mat4[i][j]=1;
}}
for(int i=0;i<n;i++)
vec4[i]=1;
hipMalloc((void**)&w,n*n*sizeof(int));
hipMalloc((void**)&u,n*sizeof(int));
hipMalloc((void**)&out4,n*sizeof(int));
hipMemcpy(w,mat4,n*n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(u,vec4,n*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL((
vec_mat_mul), dim3(1),dim3(n), 0, 0, w,u,out4);
hipMemcpy(o4,out4,n*sizeof(int),hipMemcpyDeviceToHost);
cout<<"\n\n";
for(int i=0;i<n;i++)
cout<<o4[i];
return 0;
}
| 9eabed7c0939c6ca24464f673e62f4adc3b6d258.cu | #include <cuda.h>
#include <stdio.h>
#include <iostream>
#define SIZE 100
#define n 2
using namespace std;
__global__ void vec_add(int *x,int *y,int *z)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
z[id]=x[id]+y[id];
}
__global__ void vec_mat_mul(int *mat,int *vec,int *o)
{
int x = threadIdx.x;
printf("\n%d",x);
o[x]=0;
for(int k=0;k<n;k++)
o[x]=o[x]+vec[k]*mat[k*n+k];
}
__global__ void mat_mul(int *a,int *b,int* c)
{
int x = threadIdx.x;
int y = threadIdx.y;
c[n*y+x]=0; //here col2
for(int k=0;k<n;k++) //here col1
c[n*y+x]=c[n*y+x]+a[n*y+k]*b[n*k+x]; //col2,col2,col1,col2
}
int main()
{
//vec_add
int a[SIZE],b[SIZE],c[SIZE];
int *d,*e,*f;
for(int i=0;i<SIZE;i++)
a[i]=b[i]=i;
cudaMalloc((void**)&d,SIZE*sizeof(int));
cudaMalloc((void**)&e,SIZE*sizeof(int));
cudaMalloc((void**)&f,SIZE*sizeof(int));
cudaMemcpy(d,a,SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(e,b,SIZE*sizeof(int),cudaMemcpyHostToDevice);
vec_add<<<5,20>>>(d,e,f);
cudaMemcpy(c,f,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
printf("%d",c[50]);
//mat_mul
int mat1[n][n],mat2[n][n],mat3[n][n];
int *g,*h,*l;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
mat1[i][j]=mat2[i][j]=1;
}}
cudaMalloc((void**)&g,n*n*sizeof(int));
cudaMalloc((void**)&h,n*n*sizeof(int));
cudaMalloc((void**)&l,n*n*sizeof(int));
cudaMemcpy(g,mat1,n*n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(h,mat2,n*n*sizeof(int),cudaMemcpyHostToDevice);
dim3 threadBlock(n,n); //col2 row1
mat_mul<<<1,threadBlock>>>(g,h,l);
cudaMemcpy(mat3,l,n*n*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
printf("%d",mat3[i][j]);
}}
int mat4[n][n];
int *w;
int vec4[n],o4[n];
int *u,*out4;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
mat4[i][j]=1;
}}
for(int i=0;i<n;i++)
vec4[i]=1;
cudaMalloc((void**)&w,n*n*sizeof(int));
cudaMalloc((void**)&u,n*sizeof(int));
cudaMalloc((void**)&out4,n*sizeof(int));
cudaMemcpy(w,mat4,n*n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(u,vec4,n*sizeof(int),cudaMemcpyHostToDevice);
vec_mat_mul<<<1,n>>>(w,u,out4);
cudaMemcpy(o4,out4,n*sizeof(int),cudaMemcpyDeviceToHost);
cout<<"\n\n";
for(int i=0;i<n;i++)
cout<<o4[i];
return 0;
}
|
28af9d95020049114b46f4e26366fcee1e23a572.hip | // !!! This is a file automatically generated by hipify!!!
#include "../include/header.h"
using namespace std;
void h_malloc(float **train_set, float **test_set, int **trainLabels, int **testLabels, int trainSize, int testSize, int window_size, int n_feat, int cls){
unsigned long long int trainBytes, testBytes;
if (cls < 2)
trainBytes = trainSize * window_size * n_feat * sizeof(float);
else
trainBytes = 2 * trainSize * window_size * n_feat * sizeof(float);
testBytes = testSize * window_size * n_feat * sizeof(float);
*train_set = (float *)malloc(trainBytes);
*test_set = (float *)malloc(testBytes);
*trainLabels = (int *)malloc(trainSize * sizeof(int));
*testLabels = (int *)malloc(testSize * sizeof(int));
// random initialization of train data and label set
initializeArray(*train_set, trainSize * window_size * n_feat);
initializeArray(*test_set, testSize * window_size * n_feat);
initializeArray(*trainLabels, trainSize);
initializeArray(*testLabels, testSize);
}
void h_malloc(int t_size, int q_size, int n_feat, float **t_series, float **q_series, float **owp){
int nss = t_size - q_size + 1;
unsigned long long int t_bytes = t_size * n_feat * sizeof(float);
unsigned long long int q_bytes = q_size * n_feat * sizeof(float);
/* *************** CPU MEMORY ALLOCATION *************** */
*t_series = (float *)malloc(t_bytes);
*q_series = (float *)malloc(q_bytes);
*owp = (float *)malloc(nss * sizeof(float));
memset(*owp, 0, nss * sizeof(float));
// random initialization the two sequences
initializeArray(*t_series, t_size * n_feat);
initializeArray(*q_series, q_size * n_feat);
}
void h_free(float **train_set, float **test_set, int **trainLabels, int **testLabels, float *h_Out){
free(*train_set);
free(*test_set);
free(*trainLabels);
free(*testLabels);
free(h_Out);
}
void h_free(float **q_series, float **t_series, float **owp){
free(*q_series);
free(*t_series);
free(*owp);
}
void run_benchmark(int nss, int t_size, int q_size, int blockSize, int n_feat, hipDeviceProp_t deviceProp,
float *t_series, float *q_series, float *d_t_series, float *d_q_series, float *d_owp, float *owp, int task){
int ind_min_val = 0;
struct timeval stop_CPU, start_CPU;
hipEvent_t start_GPU, stop_GPU;
char *distance_type[] = {"ED","DTW"};
float time_cpu, time_gpu;
switch(task){
case 0:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MD_D-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
MDD_SIM_MES_CPU(nss, t_series, q_series, t_size, q_size, n_feat, *(distance_type + i), 0, owp, &ind_min_val);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
hipEventCreate(&start_GPU);
hipEventCreate(&stop_GPU);
hipEventRecord(start_GPU, 0);
MDD_SIM_MES_GPU(nss, d_t_series, d_q_series, t_size, q_size, n_feat, blockSize, deviceProp, *(distance_type + i), 0, owp, d_owp, &ind_min_val);
hipEventRecord(stop_GPU, 0);
hipEventSynchronize(stop_GPU);
hipEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
hipEventDestroy(start_GPU);
hipEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
}
break;
case 1:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MD_I-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
MDI_SIM_MES_CPU(nss, t_series, q_series, t_size, q_size, n_feat, *(distance_type + i), 0, owp, &ind_min_val);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
hipEventCreate(&start_GPU);
hipEventCreate(&stop_GPU);
hipEventRecord(start_GPU, 0);
MDD_SIM_MES_GPU(nss, d_t_series, d_q_series, t_size, q_size, n_feat, blockSize, deviceProp, *(distance_type + i), 0, owp, d_owp, &ind_min_val);
hipEventRecord(stop_GPU, 0);
hipEventSynchronize(stop_GPU);
hipEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
hipEventDestroy(start_GPU);
hipEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
}
break;
}
}
void run_benchmark(int trainSize, int testSize, int blockSize, int window_size, int n_feat, hipDeviceProp_t deviceProp,
int *trainLabels, int *testLabels, float *train_set, float *test_set, float *d_train, float *d_test, float *d_Out, float *h_Out, int task){
int ERR_CPU, ERR_GPU,ERR_NR_CPU,ERR_NR_GPU;
struct timeval stop_CPU, start_CPU;
hipEvent_t start_GPU, stop_GPU;
char *distance_type[] = {"ED","DTW"};
float time_cpu, time_gpu;
switch(task){
//benchmark for Dependent-Similarity Measure Distance among CPU and GPU version
case 0:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MD_D-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
ERR_CPU = MDD_SIM_MES_CPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, window_size, n_feat, *(distance_type + i), 0);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
hipEventCreate(&start_GPU);
hipEventCreate(&stop_GPU);
hipEventRecord(start_GPU, 0);
ERR_GPU = MDD_SIM_MES_GPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, window_size, n_feat, 512, deviceProp, *(distance_type + i), 0);
hipEventRecord(stop_GPU, 0);
hipEventSynchronize(stop_GPU);
hipEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
hipEventDestroy(start_GPU);
hipEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
}
break;
//benchmark for independent-Similarity Measure Distance among CPU and GPU version
case 1:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MD_I-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
ERR_CPU = MDI_SIM_MES_CPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, window_size, n_feat, *(distance_type + i), 0);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
hipEventCreate(&start_GPU);
hipEventCreate(&stop_GPU);
hipEventRecord(start_GPU, 0);
ERR_GPU = MDI_SIM_MES_GPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, window_size, n_feat, blockSize, deviceProp, *(distance_type + i), 0);
hipEventRecord(stop_GPU, 0);
hipEventSynchronize(stop_GPU);
hipEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
hipEventDestroy(start_GPU);
hipEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
// printf("\n");
}
break;
//benchmark for Rotation Dependent-Similarity Measure Distance among CPU and GPU version
case 3:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MDR-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
MDR_SIM_MES_CPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, window_size, n_feat, *(distance_type + i), 0, &ERR_CPU, &ERR_NR_CPU);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
hipEventCreate(&start_GPU);
hipEventCreate(&stop_GPU);
hipEventRecord(start_GPU, 0);
MDR_SIM_MES_GPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, window_size, n_feat, blockSize, deviceProp, *(distance_type + i), 0, &ERR_GPU, &ERR_NR_GPU);
hipEventRecord(stop_GPU, 0);
hipEventSynchronize(stop_GPU);
hipEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
hipEventDestroy(start_GPU);
hipEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
// printf("\n");
}
break;
}
}
int main(int argc, char **argv) {
float *train_set = 0, *test_set = 0, *d_train = 0, *d_test = 0, *d_Out = 0, *h_Out = 0;
int *trainLabels = 0, *testLabels = 0;
int testSize = 0;
int trainSize = 0;
int window_size = 0;
int n_feat = 0;
int blockSize = 0;
//SETTING PARAMETERS
int start_iter = 0;
int end_iter = 12,i=0,j=0,k=0,l=0,p=0;
int grid_params[12][5] = {
{10, 100, 15, 1,2},
{30, 200, 30, 3,4},
{50, 250, 50, 5,8},
{70, 300, 100, 7,16},
{100, 350, 170, 10,32},
{150, 400, 200, 13,64},
{200, 500, 250, 15,128},
{250, 700, 300, 17,256},
{300, 1000, 350, 20,512},
{350, 1300, 400, 25,1024},
{400, 1500, 500, 30,1024},
{500, 2000, 1000, 50,1024}
};
hipDeviceProp_t deviceProp = getDevProp(0);
//CLASSIFICATION TASK
for (i = start_iter; i < end_iter; i++)
{
testSize = grid_params[i][0];
for (j = start_iter; j < end_iter; j++)
{
trainSize = grid_params[j][1];
for (k = start_iter; k < end_iter; k++)
{
window_size = grid_params[k][2];
for (l = start_iter; l < end_iter; l++)
{
n_feat = grid_params[l][3];
for (p = start_iter; p < end_iter; p++)
{
blockSize = grid_params[p][4];
printf("\nRunning benchmarks on classification task with trainSize[%d], testSize[%d], window_size[%d], n_feat[%d] \n", trainSize, testSize, window_size, n_feat);
/* HOST MEMORY ALLOCATION */
h_malloc(&train_set, &test_set, &trainLabels, &testLabels, trainSize, testSize, window_size, n_feat, 1);
/* DEVICE MEMORY ALLOCATION */
unsigned long long int trainBytes;
trainBytes = trainSize * window_size * n_feat * sizeof(float);
hipMalloc((void **)&d_Out, trainSize * sizeof(float));
hipMemset(d_Out, 0, trainSize * sizeof(float));
h_Out = (float *)malloc(trainSize * sizeof(float));
memset(h_Out, 0, trainSize * sizeof(float));
hipMalloc((void **)&d_train, trainBytes);
hipMemcpy(d_train, train_set, trainBytes, hipMemcpyHostToDevice);
hipMalloc((void **)&d_test, n_feat * window_size * sizeof(float));
/* DEVICE MEMORY ALLOCATION */
run_benchmark(trainSize, testSize, blockSize, window_size, n_feat, deviceProp,
trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, 0);
run_benchmark(trainSize, testSize, blockSize, window_size, n_feat, deviceProp,
trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, 1);
// /*--------------------- Rotation Invariant ---------------------*/
trainBytes = 2 * trainSize * window_size * n_feat * sizeof(float);
/* HOST MEMORY ALLOCATION */
/* DEVICE MEMORY ALLOCATION */
hipMalloc((void **)&d_Out, trainSize * window_size * sizeof(float));
hipMemset(d_Out, 0, trainSize * window_size * sizeof(float));
h_Out = (float *)malloc(trainSize * window_size * sizeof(float));
memset(h_Out, 0, trainSize * window_size * sizeof(float));
hipMalloc((void **)&d_train, trainBytes);
hipMemcpy(d_train, train_set, trainBytes, hipMemcpyHostToDevice);
hipMalloc((void **)&d_test, n_feat * window_size * sizeof(float));
/* DEVICE MEMORY ALLOCATION */
run_benchmark(trainSize, testSize, blockSize, window_size, n_feat, deviceProp,
trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, 3);
h_free(&train_set, &test_set, &trainLabels, &testLabels, h_Out);
hipFree(d_train);
hipFree(d_test);
hipFree(d_Out);
}
}
}
}
}
//SUB-SEQ SEARCH
int t_size = 0;
int q_size = 0;
int nss = 0;
float *t_series = 0, *q_series = 0, *owp = 0;
start_iter = 0;
end_iter = 12;
int grid_params_2[12][4] = {
{50, 10, 1, 2},
{100, 75, 3, 4},
{300, 100, 5, 8},
{500, 125, 7, 16},
{700, 150, 10, 32},
{800, 200, 13, 64},
{1000, 300, 15, 128},
{1200, 500, 17, 256},
{1300, 600, 20, 512},
{1500, 700, 25, 1024},
{1800, 1000, 30, 1024},
{2000, 1300, 50, 1024}
};
for (i = start_iter; i < end_iter; i++)
{
t_size = grid_params_2[i][0];
for (j = start_iter; j < end_iter; j++)
{
q_size = grid_params_2[j][1];
for (k = start_iter; i < end_iter; k++)
{
n_feat = grid_params_2[k][2];
for (p = start_iter; p < end_iter; p++)
{
blockSize = grid_params_2[p][3];
nss = t_size - q_size + 1;
h_malloc(t_size, q_size, n_feat, &t_series, &q_series, &owp);
/* *************** DEVICE MEMORY ALLOCATION *************** */
unsigned long long int t_bytes = t_size * n_feat * sizeof(float);
unsigned long long int q_bytes = q_size * n_feat * sizeof(float);
float *d_t_series = 0, *d_owp = 0, *d_q_series = 0;
hipMalloc((void **)&d_t_series, t_bytes);
hipMemcpy(d_t_series, t_series, t_bytes, hipMemcpyHostToDevice);
hipMalloc((void **)&d_q_series, q_bytes);
hipMemcpy(d_q_series, q_series, q_bytes, hipMemcpyHostToDevice);
hipMalloc((void **)&d_owp, nss * sizeof(float));
hipMemset(d_owp, 0, nss * sizeof(float));
/* *************** DEVICE MEMORY ALLOCATION *************** */
run_benchmark(nss, t_size, q_size, blockSize, n_feat, deviceProp, t_series, q_series, d_t_series, d_q_series, d_owp, owp, 0);
h_free(&t_series, &q_series, &owp);
hipFree(d_t_series);
hipFree(d_q_series);
hipFree(d_owp);
}
}
}
}
} | 28af9d95020049114b46f4e26366fcee1e23a572.cu | #include "../include/header.h"
using namespace std;
void h_malloc(float **train_set, float **test_set, int **trainLabels, int **testLabels, int trainSize, int testSize, int window_size, int n_feat, int cls){
unsigned long long int trainBytes, testBytes;
if (cls < 2)
trainBytes = trainSize * window_size * n_feat * sizeof(float);
else
trainBytes = 2 * trainSize * window_size * n_feat * sizeof(float);
testBytes = testSize * window_size * n_feat * sizeof(float);
*train_set = (float *)malloc(trainBytes);
*test_set = (float *)malloc(testBytes);
*trainLabels = (int *)malloc(trainSize * sizeof(int));
*testLabels = (int *)malloc(testSize * sizeof(int));
// random initialization of train data and label set
initializeArray(*train_set, trainSize * window_size * n_feat);
initializeArray(*test_set, testSize * window_size * n_feat);
initializeArray(*trainLabels, trainSize);
initializeArray(*testLabels, testSize);
}
void h_malloc(int t_size, int q_size, int n_feat, float **t_series, float **q_series, float **owp){
int nss = t_size - q_size + 1;
unsigned long long int t_bytes = t_size * n_feat * sizeof(float);
unsigned long long int q_bytes = q_size * n_feat * sizeof(float);
/* *************** CPU MEMORY ALLOCATION *************** */
*t_series = (float *)malloc(t_bytes);
*q_series = (float *)malloc(q_bytes);
*owp = (float *)malloc(nss * sizeof(float));
memset(*owp, 0, nss * sizeof(float));
// random initialization the two sequences
initializeArray(*t_series, t_size * n_feat);
initializeArray(*q_series, q_size * n_feat);
}
void h_free(float **train_set, float **test_set, int **trainLabels, int **testLabels, float *h_Out){
free(*train_set);
free(*test_set);
free(*trainLabels);
free(*testLabels);
free(h_Out);
}
void h_free(float **q_series, float **t_series, float **owp){
free(*q_series);
free(*t_series);
free(*owp);
}
void run_benchmark(int nss, int t_size, int q_size, int blockSize, int n_feat, cudaDeviceProp deviceProp,
float *t_series, float *q_series, float *d_t_series, float *d_q_series, float *d_owp, float *owp, int task){
int ind_min_val = 0;
struct timeval stop_CPU, start_CPU;
cudaEvent_t start_GPU, stop_GPU;
char *distance_type[] = {"ED","DTW"};
float time_cpu, time_gpu;
switch(task){
case 0:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MD_D-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
MDD_SIM_MES_CPU(nss, t_series, q_series, t_size, q_size, n_feat, *(distance_type + i), 0, owp, &ind_min_val);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
cudaEventCreate(&start_GPU);
cudaEventCreate(&stop_GPU);
cudaEventRecord(start_GPU, 0);
MDD_SIM_MES_GPU(nss, d_t_series, d_q_series, t_size, q_size, n_feat, blockSize, deviceProp, *(distance_type + i), 0, owp, d_owp, &ind_min_val);
cudaEventRecord(stop_GPU, 0);
cudaEventSynchronize(stop_GPU);
cudaEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
cudaEventDestroy(start_GPU);
cudaEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
}
break;
case 1:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MD_I-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
MDI_SIM_MES_CPU(nss, t_series, q_series, t_size, q_size, n_feat, *(distance_type + i), 0, owp, &ind_min_val);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
cudaEventCreate(&start_GPU);
cudaEventCreate(&stop_GPU);
cudaEventRecord(start_GPU, 0);
MDD_SIM_MES_GPU(nss, d_t_series, d_q_series, t_size, q_size, n_feat, blockSize, deviceProp, *(distance_type + i), 0, owp, d_owp, &ind_min_val);
cudaEventRecord(stop_GPU, 0);
cudaEventSynchronize(stop_GPU);
cudaEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
cudaEventDestroy(start_GPU);
cudaEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
}
break;
}
}
void run_benchmark(int trainSize, int testSize, int blockSize, int window_size, int n_feat, cudaDeviceProp deviceProp,
int *trainLabels, int *testLabels, float *train_set, float *test_set, float *d_train, float *d_test, float *d_Out, float *h_Out, int task){
int ERR_CPU, ERR_GPU,ERR_NR_CPU,ERR_NR_GPU;
struct timeval stop_CPU, start_CPU;
cudaEvent_t start_GPU, stop_GPU;
char *distance_type[] = {"ED","DTW"};
float time_cpu, time_gpu;
switch(task){
//benchmark for Dependent-Similarity Measure Distance among CPU and GPU version
case 0:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MD_D-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
ERR_CPU = MDD_SIM_MES_CPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, window_size, n_feat, *(distance_type + i), 0);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
cudaEventCreate(&start_GPU);
cudaEventCreate(&stop_GPU);
cudaEventRecord(start_GPU, 0);
ERR_GPU = MDD_SIM_MES_GPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, window_size, n_feat, 512, deviceProp, *(distance_type + i), 0);
cudaEventRecord(stop_GPU, 0);
cudaEventSynchronize(stop_GPU);
cudaEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
cudaEventDestroy(start_GPU);
cudaEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
}
break;
//benchmark for independent-Similarity Measure Distance among CPU and GPU version
case 1:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MD_I-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
ERR_CPU = MDI_SIM_MES_CPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, window_size, n_feat, *(distance_type + i), 0);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
cudaEventCreate(&start_GPU);
cudaEventCreate(&stop_GPU);
cudaEventRecord(start_GPU, 0);
ERR_GPU = MDI_SIM_MES_GPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, window_size, n_feat, blockSize, deviceProp, *(distance_type + i), 0);
cudaEventRecord(stop_GPU, 0);
cudaEventSynchronize(stop_GPU);
cudaEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
cudaEventDestroy(start_GPU);
cudaEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
// printf("\n");
}
break;
//benchmark for Rotation Dependent-Similarity Measure Distance among CPU and GPU version
case 3:
for (int i = 0; i < 2; ++i)
{
printf("RUNNING BENCHMARK ON MDR-%s...\n", *(distance_type + i));
gettimeofday(&start_CPU, NULL);
MDR_SIM_MES_CPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, window_size, n_feat, *(distance_type + i), 0, &ERR_CPU, &ERR_NR_CPU);
gettimeofday(&stop_CPU, NULL);
time_cpu = timedifference_msec(start_CPU, stop_CPU);
cudaEventCreate(&start_GPU);
cudaEventCreate(&stop_GPU);
cudaEventRecord(start_GPU, 0);
MDR_SIM_MES_GPU(trainSize, testSize, trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, window_size, n_feat, blockSize, deviceProp, *(distance_type + i), 0, &ERR_GPU, &ERR_NR_GPU);
cudaEventRecord(stop_GPU, 0);
cudaEventSynchronize(stop_GPU);
cudaEventElapsedTime(&time_gpu, start_GPU, stop_GPU);
cudaEventDestroy(start_GPU);
cudaEventDestroy(stop_GPU);
printf("CPU %f ms vs GPU %f ms\n", time_cpu, time_gpu);
// printf("\n");
}
break;
}
}
int main(int argc, char **argv) {
float *train_set = 0, *test_set = 0, *d_train = 0, *d_test = 0, *d_Out = 0, *h_Out = 0;
int *trainLabels = 0, *testLabels = 0;
int testSize = 0;
int trainSize = 0;
int window_size = 0;
int n_feat = 0;
int blockSize = 0;
//SETTING PARAMETERS
int start_iter = 0;
int end_iter = 12,i=0,j=0,k=0,l=0,p=0;
int grid_params[12][5] = {
{10, 100, 15, 1,2},
{30, 200, 30, 3,4},
{50, 250, 50, 5,8},
{70, 300, 100, 7,16},
{100, 350, 170, 10,32},
{150, 400, 200, 13,64},
{200, 500, 250, 15,128},
{250, 700, 300, 17,256},
{300, 1000, 350, 20,512},
{350, 1300, 400, 25,1024},
{400, 1500, 500, 30,1024},
{500, 2000, 1000, 50,1024}
};
cudaDeviceProp deviceProp = getDevProp(0);
//CLASSIFICATION TASK
for (i = start_iter; i < end_iter; i++)
{
testSize = grid_params[i][0];
for (j = start_iter; j < end_iter; j++)
{
trainSize = grid_params[j][1];
for (k = start_iter; k < end_iter; k++)
{
window_size = grid_params[k][2];
for (l = start_iter; l < end_iter; l++)
{
n_feat = grid_params[l][3];
for (p = start_iter; p < end_iter; p++)
{
blockSize = grid_params[p][4];
printf("\nRunning benchmarks on classification task with trainSize[%d], testSize[%d], window_size[%d], n_feat[%d] \n", trainSize, testSize, window_size, n_feat);
/* HOST MEMORY ALLOCATION */
h_malloc(&train_set, &test_set, &trainLabels, &testLabels, trainSize, testSize, window_size, n_feat, 1);
/* DEVICE MEMORY ALLOCATION */
unsigned long long int trainBytes;
trainBytes = trainSize * window_size * n_feat * sizeof(float);
cudaMalloc((void **)&d_Out, trainSize * sizeof(float));
cudaMemset(d_Out, 0, trainSize * sizeof(float));
h_Out = (float *)malloc(trainSize * sizeof(float));
memset(h_Out, 0, trainSize * sizeof(float));
cudaMalloc((void **)&d_train, trainBytes);
cudaMemcpy(d_train, train_set, trainBytes, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_test, n_feat * window_size * sizeof(float));
/* DEVICE MEMORY ALLOCATION */
run_benchmark(trainSize, testSize, blockSize, window_size, n_feat, deviceProp,
trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, 0);
run_benchmark(trainSize, testSize, blockSize, window_size, n_feat, deviceProp,
trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, 1);
// /*--------------------- Rotation Invariant ---------------------*/
trainBytes = 2 * trainSize * window_size * n_feat * sizeof(float);
/* HOST MEMORY ALLOCATION */
/* DEVICE MEMORY ALLOCATION */
cudaMalloc((void **)&d_Out, trainSize * window_size * sizeof(float));
cudaMemset(d_Out, 0, trainSize * window_size * sizeof(float));
h_Out = (float *)malloc(trainSize * window_size * sizeof(float));
memset(h_Out, 0, trainSize * window_size * sizeof(float));
cudaMalloc((void **)&d_train, trainBytes);
cudaMemcpy(d_train, train_set, trainBytes, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_test, n_feat * window_size * sizeof(float));
/* DEVICE MEMORY ALLOCATION */
run_benchmark(trainSize, testSize, blockSize, window_size, n_feat, deviceProp,
trainLabels, testLabels, train_set, test_set, d_train, d_test, d_Out, h_Out, 3);
h_free(&train_set, &test_set, &trainLabels, &testLabels, h_Out);
cudaFree(d_train);
cudaFree(d_test);
cudaFree(d_Out);
}
}
}
}
}
//SUB-SEQ SEARCH
int t_size = 0;
int q_size = 0;
int nss = 0;
float *t_series = 0, *q_series = 0, *owp = 0;
start_iter = 0;
end_iter = 12;
int grid_params_2[12][4] = {
{50, 10, 1, 2},
{100, 75, 3, 4},
{300, 100, 5, 8},
{500, 125, 7, 16},
{700, 150, 10, 32},
{800, 200, 13, 64},
{1000, 300, 15, 128},
{1200, 500, 17, 256},
{1300, 600, 20, 512},
{1500, 700, 25, 1024},
{1800, 1000, 30, 1024},
{2000, 1300, 50, 1024}
};
for (i = start_iter; i < end_iter; i++)
{
t_size = grid_params_2[i][0];
for (j = start_iter; j < end_iter; j++)
{
q_size = grid_params_2[j][1];
for (k = start_iter; i < end_iter; k++)
{
n_feat = grid_params_2[k][2];
for (p = start_iter; p < end_iter; p++)
{
blockSize = grid_params_2[p][3];
nss = t_size - q_size + 1;
h_malloc(t_size, q_size, n_feat, &t_series, &q_series, &owp);
/* *************** DEVICE MEMORY ALLOCATION *************** */
unsigned long long int t_bytes = t_size * n_feat * sizeof(float);
unsigned long long int q_bytes = q_size * n_feat * sizeof(float);
float *d_t_series = 0, *d_owp = 0, *d_q_series = 0;
cudaMalloc((void **)&d_t_series, t_bytes);
cudaMemcpy(d_t_series, t_series, t_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_q_series, q_bytes);
cudaMemcpy(d_q_series, q_series, q_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_owp, nss * sizeof(float));
cudaMemset(d_owp, 0, nss * sizeof(float));
/* *************** DEVICE MEMORY ALLOCATION *************** */
run_benchmark(nss, t_size, q_size, blockSize, n_feat, deviceProp, t_series, q_series, d_t_series, d_q_series, d_owp, owp, 0);
h_free(&t_series, &q_series, &owp);
cudaFree(d_t_series);
cudaFree(d_q_series);
cudaFree(d_owp);
}
}
}
}
} |
4c540ffd9440d6728b86ba6de9ae5da1f720bbcd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cmath>
#include <cfloat>
#include <vector>
#include "caffe/layers/cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void CrossEntropyForward(const int nthread, const Dtype* prob, const Dtype* labels,
Dtype* entropy, Dtype alpha)
{
CUDA_KERNEL_LOOP(index, nthread)
{
const Dtype label = labels[index];
const Dtype p = max(prob[index], Dtype(kLOG_THRESHOLD));
entropy[index] = label*log(p)*(1-alpha)+(1-label)*log(1-p)*alpha;
}
}
template <typename Dtype>
void CrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* entropy = bottom[1]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype loss = 0;
Dtype alpha = 0.0;
if(compensate_imbalance_)
{
caffe_gpu_asum(count, bottom_label, &alpha); // the positive ratio
alpha /= count;
}
else
alpha = 0.5;
hipLaunchKernelGGL(( CrossEntropyForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom_label, entropy, alpha);
caffe_gpu_asum(count, entropy, &loss);
const int num = top[0]->num();
top[0]->mutable_cpu_data()[0] = loss / num;
}
template <typename Dtype>
__global__ void CrossEntropyBackward(const int nthread, const Dtype* prob, const Dtype* labels,
Dtype* diff, Dtype alpha, Dtype scale)
{
CUDA_KERNEL_LOOP(index, nthread)
{
const Dtype label = labels[index];
const Dtype p = max(prob[index], Dtype(kLOG_THRESHOLD));
diff[index] = scale * ((1-alpha)*label/p - alpha*(1-label)/(1-p));
}
}
template <typename Dtype>
void CrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0), bottom_diff);
const int num = top[0]->num();
const Dtype scale = -1 * top[0]->cpu_diff()[0] / num;
Dtype alpha = 0.0;
if(compensate_imbalance_)
{
caffe_gpu_asum(count, bottom_label, &alpha); // the positive ratio
alpha /= count;
}
else
alpha = 0.5;
hipLaunchKernelGGL(( CrossEntropyBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom_label, bottom_diff, alpha, scale);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CrossEntropyLossLayer);
} // namespace caffe
| 4c540ffd9440d6728b86ba6de9ae5da1f720bbcd.cu | #include <algorithm>
#include <cmath>
#include <cfloat>
#include <vector>
#include "caffe/layers/cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void CrossEntropyForward(const int nthread, const Dtype* prob, const Dtype* labels,
Dtype* entropy, Dtype alpha)
{
CUDA_KERNEL_LOOP(index, nthread)
{
const Dtype label = labels[index];
const Dtype p = max(prob[index], Dtype(kLOG_THRESHOLD));
entropy[index] = label*log(p)*(1-alpha)+(1-label)*log(1-p)*alpha;
}
}
template <typename Dtype>
void CrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* entropy = bottom[1]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype loss = 0;
Dtype alpha = 0.0;
if(compensate_imbalance_)
{
caffe_gpu_asum(count, bottom_label, &alpha); // the positive ratio
alpha /= count;
}
else
alpha = 0.5;
CrossEntropyForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>
(count, bottom_data, bottom_label, entropy, alpha);
caffe_gpu_asum(count, entropy, &loss);
const int num = top[0]->num();
top[0]->mutable_cpu_data()[0] = loss / num;
}
template <typename Dtype>
__global__ void CrossEntropyBackward(const int nthread, const Dtype* prob, const Dtype* labels,
Dtype* diff, Dtype alpha, Dtype scale)
{
CUDA_KERNEL_LOOP(index, nthread)
{
const Dtype label = labels[index];
const Dtype p = max(prob[index], Dtype(kLOG_THRESHOLD));
diff[index] = scale * ((1-alpha)*label/p - alpha*(1-label)/(1-p));
}
}
template <typename Dtype>
void CrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0), bottom_diff);
const int num = top[0]->num();
const Dtype scale = -1 * top[0]->cpu_diff()[0] / num;
Dtype alpha = 0.0;
if(compensate_imbalance_)
{
caffe_gpu_asum(count, bottom_label, &alpha); // the positive ratio
alpha /= count;
}
else
alpha = 0.5;
CrossEntropyBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>
(count, bottom_data, bottom_label, bottom_diff, alpha, scale);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CrossEntropyLossLayer);
} // namespace caffe
|
965d9ed59c503f475f9b0e49d47280e39e440c3a.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Filename : algorithm.c
Author : Arthur Vernet, Simon Maulini
SCIPER : 245828, 248115
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <hip/hip_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
__global__ void kernel_row(double *input, double *output, int length) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int i = y * length + x;
//check if the coordinates are out of bounds or corresponding to the heat core
if(x >= length || y >= length || x == 0 || x == length - 1 || (y == length / 2 || y == length / 2 - 1)
&& (x == length / 2 - 1 || x == length / 2))
return;
output[i] = input[i];
output[i] += input[i - 1];
output[i] += input[i + 1];
}
__global__ void kernel_column(double *input, double *output, int length) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int i = y * length + x;
//check if the coordinates are out of bounds or corresponding to the heat core
if(x >= length || y >= length || y == 0 || y == length - 1 || (y == length / 2 || y == length / 2 - 1)
&& (x == length / 2 - 1 || x == length / 2))
return;
output[i] = input[i];
output[i] += input[i - length];
output[i] += input[i + length];
output[i] /= 9; //divide by 9 as this kernel is called the last
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
hipEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
hipEventCreate(&cpy_H2D_start);
hipEventCreate(&cpy_H2D_end);
hipEventCreate(&cpy_D2H_start);
hipEventCreate(&cpy_D2H_end);
hipEventCreate(&comp_start);
hipEventCreate(&comp_end);
/* Preprocessing goes here */
hipSetDevice(0);
size_t size = length*length*sizeof(double);
double* input_data;
double* output_data;
dim3 threadPerBlocks(32, 32);
dim3 blocks(4, 4);
// allocate array on device
if (hipMalloc((void **) &input_data, size) != hipSuccess)
cout << "error in hipMalloc" << endl;
if (hipMalloc((void **) &output_data, size) != hipSuccess)
cout << "error in hipMalloc" << endl;
hipEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
if (hipMemcpy(input_data, input, size, hipMemcpyHostToDevice) != hipSuccess)
cout << "error in hipMemcpy" << endl;
hipEventRecord(cpy_H2D_end);
hipEventSynchronize(cpy_H2D_end);
hipEventRecord(comp_start);
/* GPU calculation goes here */
for(int i = 0; i < iterations; ++i) {
hipLaunchKernelGGL(( kernel_row) , dim3(blocks), dim3(threadPerBlocks) , 0, 0, input_data, output_data, length);
hipLaunchKernelGGL(( kernel_column) , dim3(blocks), dim3(threadPerBlocks) , 0, 0, output_data, input_data, length);
hipDeviceSynchronize(); //synchronize at every iterations, works as a barrier
}
hipEventRecord(comp_end);
hipEventSynchronize(comp_end);
hipEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
if(hipMemcpy(output, output_data, size, hipMemcpyDeviceToHost) != hipSuccess)
cout << "Cuda Memcpy DeviceToHost Error: cannot copy output\n";
hipEventRecord(cpy_D2H_end);
hipEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
hipFree(input_data);
hipFree(output_data);
float time;
hipEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
hipEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
hipEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
}
| 965d9ed59c503f475f9b0e49d47280e39e440c3a.cu | /*
============================================================================
Filename : algorithm.c
Author : Arthur Vernet, Simon Maulini
SCIPER : 245828, 248115
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
__global__ void kernel_row(double *input, double *output, int length) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int i = y * length + x;
//check if the coordinates are out of bounds or corresponding to the heat core
if(x >= length || y >= length || x == 0 || x == length - 1 || (y == length / 2 || y == length / 2 - 1)
&& (x == length / 2 - 1 || x == length / 2))
return;
output[i] = input[i];
output[i] += input[i - 1];
output[i] += input[i + 1];
}
__global__ void kernel_column(double *input, double *output, int length) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int i = y * length + x;
//check if the coordinates are out of bounds or corresponding to the heat core
if(x >= length || y >= length || y == 0 || y == length - 1 || (y == length / 2 || y == length / 2 - 1)
&& (x == length / 2 - 1 || x == length / 2))
return;
output[i] = input[i];
output[i] += input[i - length];
output[i] += input[i + length];
output[i] /= 9; //divide by 9 as this kernel is called the last
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
cudaSetDevice(0);
size_t size = length*length*sizeof(double);
double* input_data;
double* output_data;
dim3 threadPerBlocks(32, 32);
dim3 blocks(4, 4);
// allocate array on device
if (cudaMalloc((void **) &input_data, size) != cudaSuccess)
cout << "error in cudaMalloc" << endl;
if (cudaMalloc((void **) &output_data, size) != cudaSuccess)
cout << "error in cudaMalloc" << endl;
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
if (cudaMemcpy(input_data, input, size, cudaMemcpyHostToDevice) != cudaSuccess)
cout << "error in cudaMemcpy" << endl;
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
cudaEventRecord(comp_start);
/* GPU calculation goes here */
for(int i = 0; i < iterations; ++i) {
kernel_row <<< blocks, threadPerBlocks >>> (input_data, output_data, length);
kernel_column <<< blocks, threadPerBlocks >>> (output_data, input_data, length);
cudaThreadSynchronize(); //synchronize at every iterations, works as a barrier
}
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
if(cudaMemcpy(output, output_data, size, cudaMemcpyDeviceToHost) != cudaSuccess)
cout << "Cuda Memcpy DeviceToHost Error: cannot copy output\n";
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
cudaFree(input_data);
cudaFree(output_data);
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
}
|
2e65a31dfbecbae46f0f96617a333d0e5eac82f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Duane Merrill
* Copyright (c) 2011-2018, NVIDIA CORPORATION
* Copyright (c) 2020 Savely Pototsky (SavaLione)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/******************************************************************************
* Test of BlockHistogram utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <string>
#include <typeinfo>
#include <newcub/block/block_histogram.cuh>
#include <newcub/block/block_load.cuh>
#include <newcub/block/block_store.cuh>
#include <newcub/util_allocator.cuh>
#include <test_util.h>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* BlockHistogram test kernel.
*/
template <
int BINS,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockHistogramAlgorithm ALGORITHM,
typename T,
typename HistoCounter>
__global__ void BlockHistogramKernel(
T *d_samples,
HistoCounter *d_histogram)
{
// Parameterize BlockHistogram type for our thread block
typedef BlockHistogram<T, BLOCK_THREADS, ITEMS_PER_THREAD, BINS, ALGORITHM> BlockHistogram;
// Allocate temp storage in shared memory
__shared__ typename BlockHistogram::TempStorage temp_storage;
// Per-thread tile data
T data[ITEMS_PER_THREAD];
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_samples, data);
// Test histo (writing directly to histogram buffer in global)
BlockHistogram(temp_storage).Histogram(data, d_histogram);
}
/**
* Initialize problem (and solution)
*/
template <
int BINS,
typename SampleT>
void Initialize(
GenMode gen_mode,
SampleT *h_samples,
int *h_histograms_linear,
int num_samples)
{
// Init bins
for (int bin = 0; bin < BINS; ++bin)
{
h_histograms_linear[bin] = 0;
}
if (g_verbose) printf("Samples: \n");
// Initialize interleaved channel samples and histogram them correspondingly
for (int i = 0; i < num_samples; ++i)
{
InitValue(gen_mode, h_samples[i], i);
h_samples[i] %= BINS;
if (g_verbose) std::cout << CoutCast(h_samples[i]) << ", ";
h_histograms_linear[h_samples[i]]++;
}
if (g_verbose) printf("\n\n");
}
/**
* Test BlockHistogram
*/
template <
typename SampleT,
int BINS,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockHistogramAlgorithm ALGORITHM>
void Test(
GenMode gen_mode)
{
int num_samples = BLOCK_THREADS * ITEMS_PER_THREAD;
printf("cub::BlockHistogram %s %d %s samples (%dB), %d bins, %d threads, gen-mode %s\n",
(ALGORITHM == BLOCK_HISTO_SORT) ? "BLOCK_HISTO_SORT" : "BLOCK_HISTO_ATOMIC",
num_samples,
typeid(SampleT).name(),
(int) sizeof(SampleT),
BINS,
BLOCK_THREADS,
(gen_mode == RANDOM) ? "RANDOM" : (gen_mode == INTEGER_SEED) ? "SEQUENTIAL" : "HOMOGENOUS");
fflush(stdout);
// Allocate host arrays
SampleT *h_samples = new SampleT[num_samples];
int *h_reference = new int[BINS];
// Initialize problem
Initialize<BINS>(gen_mode, h_samples, h_reference, num_samples);
// Allocate problem device arrays
SampleT *d_samples = NULL;
int *d_histogram = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * num_samples));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram, sizeof(int) * BINS));
// Initialize/clear device arrays
CubDebugExit(hipMemcpy(d_samples, h_samples, sizeof(SampleT) * num_samples, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_histogram, 0, sizeof(int) * BINS));
// Run kernel
hipLaunchKernelGGL(( BlockHistogramKernel<BINS, BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_samples,
d_histogram);
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults((int*) h_reference, d_histogram, BINS, g_verbose, g_verbose);
printf("\t%s\n\n", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Cleanup
if (h_samples) delete[] h_samples;
if (h_reference) delete[] h_reference;
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
if (d_histogram) CubDebugExit(g_allocator.DeviceFree(d_histogram));
// Correctness asserts
AssertEquals(0, compare);
}
/**
* Test different sample distributions
*/
template <
typename SampleT,
int BINS,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockHistogramAlgorithm ALGORITHM>
void Test()
{
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>(UNIFORM);
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>(INTEGER_SEED);
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>(RANDOM);
}
/**
* Test different ALGORITHM
*/
template <
typename SampleT,
int BINS,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void Test()
{
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_HISTO_SORT>();
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_HISTO_ATOMIC>();
}
/**
* Test different ITEMS_PER_THREAD
*/
template <
typename SampleT,
int BINS,
int BLOCK_THREADS>
void Test()
{
Test<SampleT, BINS, BLOCK_THREADS, 1>();
Test<SampleT, BINS, BLOCK_THREADS, 5>();
}
/**
* Test different BLOCK_THREADS
*/
template <
typename SampleT,
int BINS>
void Test()
{
Test<SampleT, BINS, 32>();
Test<SampleT, BINS, 96>();
Test<SampleT, BINS, 128>();
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<total input samples across all channels> "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
#ifdef QUICK_TEST
// Compile/run quick tests
Test<unsigned char, 256, 128, 4, BLOCK_HISTO_SORT>(RANDOM);
Test<unsigned char, 256, 128, 4, BLOCK_HISTO_ATOMIC>(RANDOM);
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
Test<unsigned char, 32>();
Test<unsigned char, 256>();
Test<unsigned short, 1024>();
}
#endif
return 0;
}
| 2e65a31dfbecbae46f0f96617a333d0e5eac82f0.cu | /*
* Copyright (c) 2011, Duane Merrill
* Copyright (c) 2011-2018, NVIDIA CORPORATION
* Copyright (c) 2020 Savely Pototsky (SavaLione)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/******************************************************************************
* Test of BlockHistogram utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <string>
#include <typeinfo>
#include <newcub/block/block_histogram.cuh>
#include <newcub/block/block_load.cuh>
#include <newcub/block/block_store.cuh>
#include <newcub/util_allocator.cuh>
#include <test_util.h>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* BlockHistogram test kernel.
*/
template <
int BINS,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockHistogramAlgorithm ALGORITHM,
typename T,
typename HistoCounter>
__global__ void BlockHistogramKernel(
T *d_samples,
HistoCounter *d_histogram)
{
// Parameterize BlockHistogram type for our thread block
typedef BlockHistogram<T, BLOCK_THREADS, ITEMS_PER_THREAD, BINS, ALGORITHM> BlockHistogram;
// Allocate temp storage in shared memory
__shared__ typename BlockHistogram::TempStorage temp_storage;
// Per-thread tile data
T data[ITEMS_PER_THREAD];
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_samples, data);
// Test histo (writing directly to histogram buffer in global)
BlockHistogram(temp_storage).Histogram(data, d_histogram);
}
/**
* Initialize problem (and solution)
*/
template <
int BINS,
typename SampleT>
void Initialize(
GenMode gen_mode,
SampleT *h_samples,
int *h_histograms_linear,
int num_samples)
{
// Init bins
for (int bin = 0; bin < BINS; ++bin)
{
h_histograms_linear[bin] = 0;
}
if (g_verbose) printf("Samples: \n");
// Initialize interleaved channel samples and histogram them correspondingly
for (int i = 0; i < num_samples; ++i)
{
InitValue(gen_mode, h_samples[i], i);
h_samples[i] %= BINS;
if (g_verbose) std::cout << CoutCast(h_samples[i]) << ", ";
h_histograms_linear[h_samples[i]]++;
}
if (g_verbose) printf("\n\n");
}
/**
* Test BlockHistogram
*/
template <
typename SampleT,
int BINS,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockHistogramAlgorithm ALGORITHM>
void Test(
GenMode gen_mode)
{
int num_samples = BLOCK_THREADS * ITEMS_PER_THREAD;
printf("cub::BlockHistogram %s %d %s samples (%dB), %d bins, %d threads, gen-mode %s\n",
(ALGORITHM == BLOCK_HISTO_SORT) ? "BLOCK_HISTO_SORT" : "BLOCK_HISTO_ATOMIC",
num_samples,
typeid(SampleT).name(),
(int) sizeof(SampleT),
BINS,
BLOCK_THREADS,
(gen_mode == RANDOM) ? "RANDOM" : (gen_mode == INTEGER_SEED) ? "SEQUENTIAL" : "HOMOGENOUS");
fflush(stdout);
// Allocate host arrays
SampleT *h_samples = new SampleT[num_samples];
int *h_reference = new int[BINS];
// Initialize problem
Initialize<BINS>(gen_mode, h_samples, h_reference, num_samples);
// Allocate problem device arrays
SampleT *d_samples = NULL;
int *d_histogram = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * num_samples));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram, sizeof(int) * BINS));
// Initialize/clear device arrays
CubDebugExit(cudaMemcpy(d_samples, h_samples, sizeof(SampleT) * num_samples, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_histogram, 0, sizeof(int) * BINS));
// Run kernel
BlockHistogramKernel<BINS, BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<1, BLOCK_THREADS>>>(
d_samples,
d_histogram);
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults((int*) h_reference, d_histogram, BINS, g_verbose, g_verbose);
printf("\t%s\n\n", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Cleanup
if (h_samples) delete[] h_samples;
if (h_reference) delete[] h_reference;
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
if (d_histogram) CubDebugExit(g_allocator.DeviceFree(d_histogram));
// Correctness asserts
AssertEquals(0, compare);
}
/**
* Test different sample distributions
*/
template <
typename SampleT,
int BINS,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockHistogramAlgorithm ALGORITHM>
void Test()
{
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>(UNIFORM);
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>(INTEGER_SEED);
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>(RANDOM);
}
/**
* Test different ALGORITHM
*/
template <
typename SampleT,
int BINS,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void Test()
{
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_HISTO_SORT>();
Test<SampleT, BINS, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_HISTO_ATOMIC>();
}
/**
* Test different ITEMS_PER_THREAD
*/
template <
typename SampleT,
int BINS,
int BLOCK_THREADS>
void Test()
{
Test<SampleT, BINS, BLOCK_THREADS, 1>();
Test<SampleT, BINS, BLOCK_THREADS, 5>();
}
/**
* Test different BLOCK_THREADS
*/
template <
typename SampleT,
int BINS>
void Test()
{
Test<SampleT, BINS, 32>();
Test<SampleT, BINS, 96>();
Test<SampleT, BINS, 128>();
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<total input samples across all channels> "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
#ifdef QUICK_TEST
// Compile/run quick tests
Test<unsigned char, 256, 128, 4, BLOCK_HISTO_SORT>(RANDOM);
Test<unsigned char, 256, 128, 4, BLOCK_HISTO_ATOMIC>(RANDOM);
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
Test<unsigned char, 32>();
Test<unsigned char, 256>();
Test<unsigned short, 1024>();
}
#endif
return 0;
}
|
1ab543f45c47ed3ac2268689a0b9a1f3b53220f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// Copyright Douglas Goddard 2016
// Licensed under the MIT license
// shout out to salix alba, you're a wizard mate
// http://stackoverflow.com/a/39862297/1176872
__global__ void map_index_to_prefix( uint8_t *hashes, uint32_t *sort_indices, uint32_t *comb_count, uint32_t *comb_sum, uint32_t *comb_prefix, uint32_t r, uint32_t size)
{
uint32_t t_index = blockDim.x * blockIdx.x + threadIdx.x;
if(t_index < size) {
uint32_t index = sort_indices[t_index];
unsigned char* hash = hashes+index*30*sizeof(unsigned char)+r*3;
uint32_t key = hash[0] << 16 | hash[1] << 8 | hash[2];
uint32_t count = comb_count[key];
uint32_t sum = comb_sum[key];
for(int i=(sum-count); i<sum; i++) {
comb_prefix[i] = key;
}
}
} | 1ab543f45c47ed3ac2268689a0b9a1f3b53220f0.cu | #include "includes.h"
// Copyright Douglas Goddard 2016
// Licensed under the MIT license
// shout out to salix alba, you're a wizard mate
// http://stackoverflow.com/a/39862297/1176872
__global__ void map_index_to_prefix( uint8_t *hashes, uint32_t *sort_indices, uint32_t *comb_count, uint32_t *comb_sum, uint32_t *comb_prefix, uint32_t r, uint32_t size)
{
uint32_t t_index = blockDim.x * blockIdx.x + threadIdx.x;
if(t_index < size) {
uint32_t index = sort_indices[t_index];
unsigned char* hash = hashes+index*30*sizeof(unsigned char)+r*3;
uint32_t key = hash[0] << 16 | hash[1] << 8 | hash[2];
uint32_t count = comb_count[key];
uint32_t sum = comb_sum[key];
for(int i=(sum-count); i<sum; i++) {
comb_prefix[i] = key;
}
}
} |
96f4f219810bdfc35c4a12a23a4640546c7cd44c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
#define THREADS_PER_BLOCK 1024
__global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] + y[index];
}
}
void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
hipDeviceSynchronize();
}
__global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] - y[index];
}
}
void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Fsubtract_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
hipDeviceSynchronize();
}
__global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * y[index];
}
}
void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Fmultiply_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
hipDeviceSynchronize();
}
__global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] / y[index];
}
}
void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Fdivide_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
hipDeviceSynchronize();
}
__global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * y;
}
}
void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) {
hipLaunchKernelGGL(( Fmultiply_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
hipDeviceSynchronize();
}
__global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] + y;
}
}
void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) {
hipLaunchKernelGGL(( Fadd_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
hipDeviceSynchronize();
}
__global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * x[index];
}
}
void Fsquare_impl(const dtype* x, dtype* r, int size) {
hipLaunchKernelGGL(( Fsquare_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size);
hipDeviceSynchronize();
}
__global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = tanh(x[index]);
}
}
void Ftanh_impl(const dtype* x, dtype* r, int size) {
hipLaunchKernelGGL(( Ftanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size);
hipDeviceSynchronize();
}
__global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = (1 + y[index]) * (1 - y[index]);
}
}
void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Dtanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
hipDeviceSynchronize();
}
__global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = 1.0 / (1.0 + exp(-x[index]));
}
}
void Fsigmoid_impl(const dtype* x, dtype* r, int size) {
hipLaunchKernelGGL(( Fsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size);
hipDeviceSynchronize();
}
__global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = (1 - y[index]) * y[index];
}
}
void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Dsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
hipDeviceSynchronize();
}
__global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = sqrt(x[index]);
}
}
void Fsqrt_impl(const dtype* x, dtype* r, int size) {
hipLaunchKernelGGL(( Fsqrt_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size);
hipDeviceSynchronize();
}
__global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dim) {
dst[offset + index] = src[index];
}
}
void concat_impl(const dtype *src, dtype* dst, int offset, int dim) {
hipLaunchKernelGGL(( concat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim);
hipDeviceSynchronize();
}
__global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dim) {
dst[index] = src[offset + index];
}
}
void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) {
hipLaunchKernelGGL(( unconcat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim);
hipDeviceSynchronize();
}
__global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index % dim0 * dim1 + index / dim0];
}
}
void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) {
hipLaunchKernelGGL(( Ftranspose_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, dim1, size);
hipDeviceSynchronize();
}
__global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
x[i] = val;
}
}
void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) {
hipLaunchKernelGGL(( set_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, dim0, col, size, val);
hipDeviceSynchronize();
}
__global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < r_size) {
int col_index = index / xdim0;
if(col_index < col_num) {
int col = cols[col_index];
int offset = index % xdim0;
int x_index = col * xdim0 + offset;
if(x_index < xdim0 * xdim1) {
r[index] = x[x_index];
}
}
}
}
void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
hipLaunchKernelGGL(( get_cols_kernel), dim3((r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0,
x, r, xdim0, xdim1, r_size, cols, col_num);
}
__global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
r[index] = x[i];
}
}
void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) {
hipLaunchKernelGGL(( get_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, col, size);
hipDeviceSynchronize();
}
__global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
x[i] = x[i] + y[index];
}
}
void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) {
hipLaunchKernelGGL(( Fadd_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, col, dim0, size);
hipDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fsumpooling_kernel(
const dtype *px, int skip, int n, dtype *py) {
__shared__ dtype temp[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (int i = tid; i < n; i += BLOCK_SIZE) temp[tid] += px[i * skip];
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0];
}
void Fsumpooling_impl(const dtype* x, dtype* y, int n, int r, int s) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::Fsumpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
hipDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Favgpooling_kernel(
const dtype *px, int skip, int n, dtype *py) {
__shared__ dtype temp[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (int i = tid; i < n; i += BLOCK_SIZE) temp[tid] += px[i * skip];
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0] / n;
}
void Favgpooling_impl(const dtype* x, dtype* y, int n, int r, int s) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::Favgpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
hipDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fmaxpooling_kernel(
const dtype *px, int skip, int n, dtype *py, int* index) {
__shared__ dtype temp[BLOCK_SIZE];
__shared__ int temp_index[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
dtype thread_max = NEGATIVE_INFINITY;
int index_start = bid % skip + (bid / skip) * skip * n;
int index_max;
for (int i = tid; i < n; i += BLOCK_SIZE) {
if(px[i * skip] > thread_max) {
thread_max = px[i * skip];
index_max = index_start + i * skip;
}
}
temp[tid] = thread_max;
temp_index[tid] = index_max;
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];}
}
void Fmaxpooling_impl(const dtype* x, dtype* y, int n, int r, int s, int* index) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::Fmaxpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
hipDeviceSynchronize();
}
__global__ void Dmaxpooling_kernel(
const dtype* x, const dtype* y, const dtype* gy, dtype* gx, int* index, int dim) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < dim) {
gx[index[i]] += gy[i];
}
}
void Dmaxpooling_impl(
const dtype* x, const dtype* y, const dtype* gy, dtype* gx, int* index, int dim) {
hipLaunchKernelGGL(( Dmaxpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, gy, gx, index, dim);
hipDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fminpooling_kernel(
const dtype *px, int skip, int n, dtype *py, int* index) {
__shared__ dtype temp[BLOCK_SIZE];
__shared__ int temp_index[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
dtype thread_min = POSITIVE_INFINITY;
int index_start = bid % skip + (bid / skip) * skip * n;
int index_min;
for (int i = tid; i < n; i += BLOCK_SIZE) {
if(px[i * skip] < thread_min) {
thread_min = px[i * skip];
index_min = index_start + i * skip;
}
}
temp[tid] = thread_min;
temp_index[tid] = index_min;
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];}
}
void Fminpooling_impl(const dtype* x, dtype* y, int n, int r, int s, int* index) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::Fminpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
}
__global__ void Dminpooling_kernel(
const dtype* x, const dtype* y, const dtype* gy, dtype* gx, int* index, int dim) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < dim) {
gx[index[i]] += gy[i];
}
}
void Dminpooling_impl(
const dtype* x, const dtype* y, const dtype* gy, dtype* gx, int* index, int dim) {
hipLaunchKernelGGL(( Dminpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, gy, gx, index, dim);
hipDeviceSynchronize();
}
| 96f4f219810bdfc35c4a12a23a4640546c7cd44c.cu | #include "kernel.cuh"
#define THREADS_PER_BLOCK 1024
__global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] + y[index];
}
}
void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
cudaDeviceSynchronize();
}
__global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] - y[index];
}
}
void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Fsubtract_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
cudaDeviceSynchronize();
}
__global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * y[index];
}
}
void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Fmultiply_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
cudaDeviceSynchronize();
}
__global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] / y[index];
}
}
void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Fdivide_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
cudaDeviceSynchronize();
}
__global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * y;
}
}
void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) {
Fmultiply_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
cudaDeviceSynchronize();
}
__global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] + y;
}
}
void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) {
Fadd_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
cudaDeviceSynchronize();
}
__global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * x[index];
}
}
void Fsquare_impl(const dtype* x, dtype* r, int size) {
Fsquare_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size);
cudaDeviceSynchronize();
}
__global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = tanh(x[index]);
}
}
void Ftanh_impl(const dtype* x, dtype* r, int size) {
Ftanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size);
cudaDeviceSynchronize();
}
__global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = (1 + y[index]) * (1 - y[index]);
}
}
void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Dtanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
cudaDeviceSynchronize();
}
__global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = 1.0 / (1.0 + exp(-x[index]));
}
}
void Fsigmoid_impl(const dtype* x, dtype* r, int size) {
Fsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size);
cudaDeviceSynchronize();
}
__global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = (1 - y[index]) * y[index];
}
}
void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Dsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
cudaDeviceSynchronize();
}
__global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = sqrt(x[index]);
}
}
void Fsqrt_impl(const dtype* x, dtype* r, int size) {
Fsqrt_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size);
cudaDeviceSynchronize();
}
__global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dim) {
dst[offset + index] = src[index];
}
}
void concat_impl(const dtype *src, dtype* dst, int offset, int dim) {
concat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim);
cudaDeviceSynchronize();
}
__global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dim) {
dst[index] = src[offset + index];
}
}
void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) {
unconcat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim);
cudaDeviceSynchronize();
}
__global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index % dim0 * dim1 + index / dim0];
}
}
void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) {
Ftranspose_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, dim1, size);
cudaDeviceSynchronize();
}
__global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
x[i] = val;
}
}
void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) {
set_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, dim0, col, size, val);
cudaDeviceSynchronize();
}
__global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < r_size) {
int col_index = index / xdim0;
if(col_index < col_num) {
int col = cols[col_index];
int offset = index % xdim0;
int x_index = col * xdim0 + offset;
if(x_index < xdim0 * xdim1) {
r[index] = x[x_index];
}
}
}
}
void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
get_cols_kernel<<<(r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>
(x, r, xdim0, xdim1, r_size, cols, col_num);
}
__global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
r[index] = x[i];
}
}
void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) {
get_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, col, size);
cudaDeviceSynchronize();
}
__global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
x[i] = x[i] + y[index];
}
}
void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) {
Fadd_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, col, dim0, size);
cudaDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fsumpooling_kernel(
const dtype *px, int skip, int n, dtype *py) {
__shared__ dtype temp[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (int i = tid; i < n; i += BLOCK_SIZE) temp[tid] += px[i * skip];
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0];
}
void Fsumpooling_impl(const dtype* x, dtype* y, int n, int r, int s) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k: ::Fsumpooling_kernel<k><<<r, k>>>(x, s, n, y); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
cudaDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Favgpooling_kernel(
const dtype *px, int skip, int n, dtype *py) {
__shared__ dtype temp[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (int i = tid; i < n; i += BLOCK_SIZE) temp[tid] += px[i * skip];
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0] / n;
}
void Favgpooling_impl(const dtype* x, dtype* y, int n, int r, int s) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k: ::Favgpooling_kernel<k><<<r, k>>>(x, s, n, y); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
cudaDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fmaxpooling_kernel(
const dtype *px, int skip, int n, dtype *py, int* index) {
__shared__ dtype temp[BLOCK_SIZE];
__shared__ int temp_index[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
dtype thread_max = NEGATIVE_INFINITY;
int index_start = bid % skip + (bid / skip) * skip * n;
int index_max;
for (int i = tid; i < n; i += BLOCK_SIZE) {
if(px[i * skip] > thread_max) {
thread_max = px[i * skip];
index_max = index_start + i * skip;
}
}
temp[tid] = thread_max;
temp_index[tid] = index_max;
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];}
}
void Fmaxpooling_impl(const dtype* x, dtype* y, int n, int r, int s, int* index) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k: ::Fmaxpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
cudaDeviceSynchronize();
}
__global__ void Dmaxpooling_kernel(
const dtype* x, const dtype* y, const dtype* gy, dtype* gx, int* index, int dim) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < dim) {
gx[index[i]] += gy[i];
}
}
void Dmaxpooling_impl(
const dtype* x, const dtype* y, const dtype* gy, dtype* gx, int* index, int dim) {
Dmaxpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, gy, gx, index, dim);
cudaDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fminpooling_kernel(
const dtype *px, int skip, int n, dtype *py, int* index) {
__shared__ dtype temp[BLOCK_SIZE];
__shared__ int temp_index[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
dtype thread_min = POSITIVE_INFINITY;
int index_start = bid % skip + (bid / skip) * skip * n;
int index_min;
for (int i = tid; i < n; i += BLOCK_SIZE) {
if(px[i * skip] < thread_min) {
thread_min = px[i * skip];
index_min = index_start + i * skip;
}
}
temp[tid] = thread_min;
temp_index[tid] = index_min;
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];}
}
void Fminpooling_impl(const dtype* x, dtype* y, int n, int r, int s, int* index) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k: ::Fminpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
}
__global__ void Dminpooling_kernel(
const dtype* x, const dtype* y, const dtype* gy, dtype* gx, int* index, int dim) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < dim) {
gx[index[i]] += gy[i];
}
}
void Dminpooling_impl(
const dtype* x, const dtype* y, const dtype* gy, dtype* gx, int* index, int dim) {
Dminpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, gy, gx, index, dim);
cudaDeviceSynchronize();
}
|
f1f4e1323e5d4404db44664947e6d07498fc850b.hip | // !!! This is a file automatically generated by hipify!!!
#include "edge.cuh"
#include <hip/hip_runtime.h>
edge_t* edge_init_gpu(int _id, int _n_start, int _n_end){
edge_t* e;
hipMallocManaged(&e, sizeof(edge_t));
e->id = _id;
e->n_start = _n_start;
e->n_end = _n_end;
return e;
}
| f1f4e1323e5d4404db44664947e6d07498fc850b.cu | #include "edge.cuh"
#include <cuda.h>
edge_t* edge_init_gpu(int _id, int _n_start, int _n_end){
edge_t* e;
cudaMallocManaged(&e, sizeof(edge_t));
e->id = _id;
e->n_start = _n_start;
e->n_end = _n_end;
return e;
}
|
aa246e4a2b272992f3404ae3a49d4b7a09bf7402.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "init.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
void callCudaKernel()
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
| aa246e4a2b272992f3404ae3a49d4b7a09bf7402.cu | #include "init.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
void callCudaKernel()
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
5cbab68085201273c1e89f7b627672dc9de2c268.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AlignColorMap.h"
#include "CudaHandleError.h"
#include "Parameters.h"
__global__ void kernelAlignProcess(uchar4* alignedColor, float* depth, uchar4* color, Intrinsics depthIntrinsics, Intrinsics colorIntrinsics, Transformation depth2color) {
const int MAX_SHIFT = DEPTH_W >> 4;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int2 colorPixel_shared[COLOR_W];
for (int i = threadIdx.x; i < COLOR_W; i += blockDim.x) {
float2 pixelFloat = make_float2((float)i * DEPTH_W / COLOR_W, (float)y * DEPTH_H / COLOR_H);
int2 pixel = make_int2((int)pixelFloat.x, (int)pixelFloat.y);
if (0 <= pixel.x && pixel.x < DEPTH_W && 0 <= pixel.y && pixel.y < DEPTH_H) {
float3 pos = depthIntrinsics.deproject(pixelFloat, depth[pixel.y * DEPTH_W + pixel.x]);
pos = depth2color.translate(pos);
int2 colorPixel = colorIntrinsics.translate(pos);
colorPixel_shared[i] = colorPixel;
} else {
colorPixel_shared[i] = make_int2(-1, -1);
}
}
__syncthreads();
if (x < COLOR_W && y < COLOR_H) {
uchar4 result = uchar4();
int2 colorPixel = colorPixel_shared[x];
if (0 <= colorPixel.x && colorPixel.x < COLOR_W && 0 <= colorPixel.y && colorPixel.y < COLOR_H) {
result = color[colorPixel.y * COLOR_W + colorPixel.x];
}
for (int shift = 1; shift <= MAX_SHIFT; shift++) {
if (x - shift >= 0 && colorPixel_shared[x - shift].x > colorPixel.x) {
result = uchar4();
break;
}
}
__syncthreads();
alignedColor[y * COLOR_W + x] = result;
}
}
extern "C"
void cudaAlignInit(RGBQUAD*& alignedColor_device) {
HANDLE_ERROR(hipMalloc(&alignedColor_device, MAX_CAMERAS * COLOR_H * COLOR_W * sizeof(RGBQUAD)));
}
extern "C"
void cudaAlignClean(RGBQUAD*& alignedColor_device) {
HANDLE_ERROR(hipFree(alignedColor_device));
}
extern "C"
void cudaAlignProcess(int cameras, RGBQUAD* alignedColor_device, float* depth_device, RGBQUAD* color_device, Intrinsics* depthIntrinsics, Intrinsics* colorIntrinsics, Transformation* depth2color) {
dim3 threadsPerBlock = dim3(512, 1);
dim3 blocksPerGrid = dim3((COLOR_W + threadsPerBlock.x - 1) / threadsPerBlock.x, (COLOR_H + threadsPerBlock.y - 1) / threadsPerBlock.y);
for (int i = 0; i < cameras; i++) {
hipLaunchKernelGGL(( kernelAlignProcess), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, (uchar4*)alignedColor_device + i * COLOR_H * COLOR_W, depth_device + i * DEPTH_H * DEPTH_W, (uchar4*)color_device + i * COLOR_H * COLOR_W, depthIntrinsics[i], colorIntrinsics[i], depth2color[i]);
hipGetLastError();
}
hipDeviceSynchronize();
} | 5cbab68085201273c1e89f7b627672dc9de2c268.cu | #include "AlignColorMap.h"
#include "CudaHandleError.h"
#include "Parameters.h"
__global__ void kernelAlignProcess(uchar4* alignedColor, float* depth, uchar4* color, Intrinsics depthIntrinsics, Intrinsics colorIntrinsics, Transformation depth2color) {
const int MAX_SHIFT = DEPTH_W >> 4;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int2 colorPixel_shared[COLOR_W];
for (int i = threadIdx.x; i < COLOR_W; i += blockDim.x) {
float2 pixelFloat = make_float2((float)i * DEPTH_W / COLOR_W, (float)y * DEPTH_H / COLOR_H);
int2 pixel = make_int2((int)pixelFloat.x, (int)pixelFloat.y);
if (0 <= pixel.x && pixel.x < DEPTH_W && 0 <= pixel.y && pixel.y < DEPTH_H) {
float3 pos = depthIntrinsics.deproject(pixelFloat, depth[pixel.y * DEPTH_W + pixel.x]);
pos = depth2color.translate(pos);
int2 colorPixel = colorIntrinsics.translate(pos);
colorPixel_shared[i] = colorPixel;
} else {
colorPixel_shared[i] = make_int2(-1, -1);
}
}
__syncthreads();
if (x < COLOR_W && y < COLOR_H) {
uchar4 result = uchar4();
int2 colorPixel = colorPixel_shared[x];
if (0 <= colorPixel.x && colorPixel.x < COLOR_W && 0 <= colorPixel.y && colorPixel.y < COLOR_H) {
result = color[colorPixel.y * COLOR_W + colorPixel.x];
}
for (int shift = 1; shift <= MAX_SHIFT; shift++) {
if (x - shift >= 0 && colorPixel_shared[x - shift].x > colorPixel.x) {
result = uchar4();
break;
}
}
__syncthreads();
alignedColor[y * COLOR_W + x] = result;
}
}
extern "C"
void cudaAlignInit(RGBQUAD*& alignedColor_device) {
HANDLE_ERROR(cudaMalloc(&alignedColor_device, MAX_CAMERAS * COLOR_H * COLOR_W * sizeof(RGBQUAD)));
}
extern "C"
void cudaAlignClean(RGBQUAD*& alignedColor_device) {
HANDLE_ERROR(cudaFree(alignedColor_device));
}
extern "C"
void cudaAlignProcess(int cameras, RGBQUAD* alignedColor_device, float* depth_device, RGBQUAD* color_device, Intrinsics* depthIntrinsics, Intrinsics* colorIntrinsics, Transformation* depth2color) {
dim3 threadsPerBlock = dim3(512, 1);
dim3 blocksPerGrid = dim3((COLOR_W + threadsPerBlock.x - 1) / threadsPerBlock.x, (COLOR_H + threadsPerBlock.y - 1) / threadsPerBlock.y);
for (int i = 0; i < cameras; i++) {
kernelAlignProcess<<<blocksPerGrid, threadsPerBlock>>>((uchar4*)alignedColor_device + i * COLOR_H * COLOR_W, depth_device + i * DEPTH_H * DEPTH_W, (uchar4*)color_device + i * COLOR_H * COLOR_W, depthIntrinsics[i], colorIntrinsics[i], depth2color[i]);
cudaGetLastError();
}
cudaThreadSynchronize();
} |
e6fbcbfd5dc7e4c6a6b17a7dd04f4cacd10f539e.hip | // !!! This is a file automatically generated by hipify!!!
#include <string.h>
#include <gauge_field.h>
#include <face_quda.h>
#include <typeinfo>
#include <misc_helpers.h>
#include <blas_quda.h>
namespace quda {
cudaGaugeField::cudaGaugeField(const GaugeFieldParam ¶m) :
GaugeField(param), gauge(0), even(0), odd(0), backed_up(false)
{
if ((order == QUDA_QDP_GAUGE_ORDER || order == QUDA_QDPJIT_GAUGE_ORDER) &&
create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("QDP ordering only supported for reference fields");
}
if (order == QUDA_QDP_GAUGE_ORDER || order == QUDA_MILC_GAUGE_ORDER ||
order == QUDA_TIFR_GAUGE_ORDER || order == QUDA_BQCD_GAUGE_ORDER ||
order == QUDA_CPS_WILSON_GAUGE_ORDER)
errorQuda("Field ordering %d presently disabled for this type", order);
#ifdef MULTI_GPU
if (link_type != QUDA_ASQTAD_MOM_LINKS &&
ghostExchange == QUDA_GHOST_EXCHANGE_PAD &&
isNative()) {
bool pad_check = true;
for (int i=0; i<nDim; i++)
if (pad < nFace*surfaceCB[i]) pad_check = false;
if (!pad_check)
errorQuda("cudaGaugeField being constructed with insufficient padding\n");
}
#endif
if(create != QUDA_NULL_FIELD_CREATE &&
create != QUDA_ZERO_FIELD_CREATE &&
create != QUDA_REFERENCE_FIELD_CREATE){
errorQuda("ERROR: create type(%d) not supported yet\n", create);
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
gauge = device_malloc(bytes);
if (create == QUDA_ZERO_FIELD_CREATE) hipMemset(gauge, 0, bytes);
} else {
gauge = param.gauge;
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
size_t nbytes = nFace * surface[i] * nInternal * precision;
ghost[i] = nbytes ? device_malloc(nbytes) : NULL;
}
}
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD) {
if (create == QUDA_REFERENCE_FIELD_CREATE) exchangeGhost();
}
even = gauge;
odd = (char*)gauge + bytes/2;
#ifdef USE_TEXTURE_OBJECTS
createTexObject(evenTex, even);
createTexObject(oddTex, odd);
if(reconstruct == QUDA_RECONSTRUCT_13 || reconstruct == QUDA_RECONSTRUCT_9)
{ // Create texture objects for the phases
const int isPhase = 1;
createTexObject(evenPhaseTex, (char*)even + phase_offset, isPhase);
createTexObject(oddPhaseTex, (char*)odd + phase_offset, isPhase);
}
#endif
}
#ifdef USE_TEXTURE_OBJECTS
void cudaGaugeField::createTexObject(hipTextureObject_t &tex, void *field, int isPhase) {
if( isNative() ){
// create the texture for the field components
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = hipChannelFormatKindFloat;
else desc.f = hipChannelFormatKindSigned; // half is short, double is int2
if(isPhase){
if(precision == QUDA_DOUBLE_PRECISION){
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 0;
desc.w = 0;
}else{
desc.x = 8*precision;
desc.y = desc.z = desc.w = 0;
}
}else{
// always four components regardless of precision
if (precision == QUDA_DOUBLE_PRECISION) {
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 8*sizeof(int);
desc.w = 8*sizeof(int);
} else {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = (reconstruct == 18) ? 0 : 8*precision; // float2 or short2 for 18 reconstruct
desc.w = (reconstruct == 18) ? 0 : 8*precision;
}
}
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = field;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = isPhase ? phase_bytes/2 : (bytes-phase_bytes)/2;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = hipReadModeNormalizedFloat;
else texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
}
}
void cudaGaugeField::destroyTexObject() {
if( isNative() ){
hipDestroyTextureObject(evenTex);
hipDestroyTextureObject(oddTex);
if(reconstruct == QUDA_RECONSTRUCT_9 || reconstruct == QUDA_RECONSTRUCT_13){
hipDestroyTextureObject(evenPhaseTex);
hipDestroyTextureObject(oddPhaseTex);
}
checkCudaError();
}
}
#endif
cudaGaugeField::~cudaGaugeField()
{
#ifdef USE_TEXTURE_OBJECTS
destroyTexObject();
#endif
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (gauge) device_free(gauge);
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
if (ghost[i]) device_free(ghost[i]);
}
}
}
// This does the exchange of the gauge field ghost zone and places it
// into the ghost array.
void cudaGaugeField::exchangeGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *send[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
send[d] = device_malloc(nFace*surface[d]*nInternal*precision);
}
// get the links into contiguous buffers
extractGaugeGhost(*this, send);
// communicate between nodes
exchange(ghost_, send);
for (int d=0; d<nDim; d++) device_free(send[d]);
if (isNative()) {
// copy from ghost into the padded region in gauge
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, 0, ghost_, 1);
for (int d=0; d<nDim; d++) device_free(ghost_[d]);
}
}
void cudaGaugeField::exchangeExtendedGhost(const int *R, bool no_comms_fill) {
void *send[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
void *send_d[QUDA_MAX_DIM];
void *recv_d[QUDA_MAX_DIM];
size_t bytes[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d) && !no_comms_fill) continue;
// store both parities and directions in each
bytes[d] = surface[d] * R[d] * geometry * nInternal * precision;
send_d[d] = device_malloc(2 * bytes[d]);
recv_d[d] = device_malloc(2 * bytes[d]);
}
#ifndef GPU_COMMS
void *send_h[QUDA_MAX_DIM];
void *recv_h[QUDA_MAX_DIM];
size_t total_bytes = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
total_bytes += 4*bytes[d]; // (2 from send/recv) x (2 from fwd/back)
}
resizeBufferPinned(total_bytes,0);
size_t offset = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
recv_h[d] = static_cast<char*>(bufferPinned[0]) + offset;
send_h[d] = static_cast<char*>(recv_h[d]) + 2*bytes[d];
offset += 4*bytes[d];
}
#endif
// do the exchange
MsgHandle *mh_recv_back[QUDA_MAX_DIM];
MsgHandle *mh_recv_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_back[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
recv[d] = recv_d[d];
send[d] = send_d[d];
#else
recv[d] = recv_h[d];
send[d] = send_h[d];
#endif
// look into storing these for later
mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]);
mh_recv_fwd[d] = comm_declare_receive_relative(static_cast<char*>(recv[d])+bytes[d],
d, +1, bytes[d]);
mh_send_back[d] = comm_declare_send_relative(send[d], d, -1, bytes[d]);
mh_send_fwd[d] = comm_declare_send_relative(static_cast<char*>(send[d])+bytes[d],
d, +1, bytes[d]);
}
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d) && !no_comms_fill) continue;
// FIXME why does this break if the order is switched?
// prepost the receives
if (commDimPartitioned(d)) {
comm_start(mh_recv_fwd[d]);
comm_start(mh_recv_back[d]);
}
//extract into a contiguous buffer
extractExtendedGaugeGhost(*this, d, R, send_d, true);
if (commDimPartitioned(d)) {
// pipeline the forwards and backwards sending
#ifndef GPU_COMMS
hipMemcpyAsync(send_h[d], send_d[d], bytes[d], hipMemcpyDeviceToHost, streams[0]);
hipMemcpyAsync(static_cast<char*>(send_h[d])+bytes[d],
static_cast<char*>(send_d[d])+bytes[d], bytes[d], hipMemcpyDeviceToHost, streams[1]);
#endif
#ifndef GPU_COMMS
hipStreamSynchronize(streams[0]);
#endif
comm_start(mh_send_back[d]);
#ifndef GPU_COMMS
hipStreamSynchronize(streams[1]);
#endif
comm_start(mh_send_fwd[d]);
// forwards recv
comm_wait(mh_send_back[d]);
comm_wait(mh_recv_fwd[d]);
#ifndef GPU_COMMS
hipMemcpyAsync(static_cast<char*>(recv_d[d])+bytes[d],
static_cast<char*>(recv_h[d])+bytes[d], bytes[d], hipMemcpyHostToDevice, streams[0]);
#endif
// backwards recv
comm_wait(mh_send_fwd[d]);
comm_wait(mh_recv_back[d]);
#ifndef GPU_COMMS
hipMemcpyAsync(recv_d[d], recv_h[d], bytes[d], hipMemcpyHostToDevice, streams[1]);
#endif
} else { // if just doing a local exchange to fill halo then need to swap faces
hipMemcpy(static_cast<char*>(recv_d[d])+bytes[d], send_d[d], bytes[d], hipMemcpyDeviceToDevice);
hipMemcpy(recv_d[d], static_cast<char*>(send_d[d])+bytes[d], bytes[d], hipMemcpyDeviceToDevice);
}
// inject back into the gauge field
extractExtendedGaugeGhost(*this, d, R, recv_d, false);
}
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d) && !no_comms_fill) continue;
if (commDimPartitioned(d)) {
comm_free(mh_send_fwd[d]);
comm_free(mh_send_back[d]);
comm_free(mh_recv_back[d]);
comm_free(mh_recv_fwd[d]);
}
device_free(send_d[d]);
device_free(recv_d[d]);
}
}
void cudaGaugeField::setGauge(void *gauge_)
{
if(create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("Setting gauge pointer is only allowed when create="
"QUDA_REFERENCE_FIELD_CREATE type\n");
}
gauge = gauge_;
}
void cudaGaugeField::copy(const GaugeField &src) {
if (this == &src) return;
checkField(src);
if (link_type == QUDA_ASQTAD_FAT_LINKS) {
fat_link_max = src.LinkMax();
if (precision == QUDA_HALF_PRECISION && fat_link_max == 0.0)
errorQuda("fat_link_max has not been computed");
} else {
fat_link_max = 1.0;
}
if (typeid(src) == typeid(cudaGaugeField)) {
// copy field and ghost zone into this field
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge,
static_cast<const cudaGaugeField&>(src).gauge);
} else if (typeid(src) == typeid(cpuGaugeField)) {
LatticeField::resizeBufferPinned(bytes,0);
// copy field and ghost zone into bufferPinned
copyGenericGauge(*this, src, QUDA_CPU_FIELD_LOCATION, bufferPinned[0],
static_cast<const cpuGaugeField&>(src).gauge);
// this copies over both even and odd
hipMemcpy(gauge, bufferPinned[0], bytes, hipMemcpyHostToDevice);
} else {
errorQuda("Invalid gauge field type");
}
// if we have copied from a source without a pad then we need to exchange
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD &&
src.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD) {
exchangeGhost();
}
checkCudaError();
}
void cudaGaugeField::loadCPUField(const cpuGaugeField &cpu, const QudaFieldLocation &pack_location)
{
if (pack_location == QUDA_CUDA_FIELD_LOCATION) {
if (cpu.Order() == QUDA_MILC_GAUGE_ORDER ||
cpu.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
resizeBufferPinned(cpu.Bytes(),0);
memcpy(bufferPinned[0], cpu.Gauge_p(), cpu.Bytes());
// run kernel directly using host-mapped input data
void *bufferPinnedMapped;
hipHostGetDevicePointer(&bufferPinnedMapped, bufferPinned[0], 0);
copyGenericGauge(*this, cpu, QUDA_CUDA_FIELD_LOCATION, gauge, bufferPinnedMapped);
} else {
errorQuda("Not implemented for order %d", cpu.Order());
}
} else if (pack_location == QUDA_CPU_FIELD_LOCATION) {
copy(cpu);
} else {
errorQuda("Invalid pack location %d", pack_location);
}
}
/*
Copies the device gauge field to the host.
- no reconstruction support
- device data is always Float2 ordered
- host data is a 1-dimensional array (MILC ordered)
- no support for half precision
- input and output precisions must match
*/
template<typename FloatN, typename Float>
static void storeGaugeField(Float* cpuGauge, FloatN *gauge, int bytes, int Nint,
int volumeCB, int stride, QudaPrecision prec)
{
hipStream_t streams[2];
for (int i=0; i<2; i++) hipStreamCreate(&streams[i]);
FloatN *even = gauge;
FloatN *odd = (FloatN*)((char*)gauge + bytes/2);
size_t datalen = 4*2*volumeCB*Nint*sizeof(Float); // both parities
void *unpacked = device_malloc(datalen);
void *unpackedEven = unpacked;
void *unpackedOdd = (char*)unpacked + datalen/2;
//unpack even data kernel
link_format_gpu_to_cpu((void*)unpackedEven, (void*)even, volumeCB, stride, prec, streams[0]);
#ifdef GPU_DIRECT
hipMemcpyAsync(cpuGauge, unpackedEven, datalen/2, hipMemcpyDeviceToHost, streams[0]);
#else
hipMemcpy(cpuGauge, unpackedEven, datalen/2, hipMemcpyDeviceToHost);
#endif
//unpack odd data kernel
link_format_gpu_to_cpu((void*)unpackedOdd, (void*)odd, volumeCB, stride, prec, streams[1]);
#ifdef GPU_DIRECT
hipMemcpyAsync(cpuGauge + 4*volumeCB*Nint, unpackedOdd, datalen/2, hipMemcpyDeviceToHost, streams[1]);
for(int i=0; i<2; i++) hipStreamSynchronize(streams[i]);
#else
hipMemcpy(cpuGauge + 4*volumeCB*Nint, unpackedOdd, datalen/2, hipMemcpyDeviceToHost);
#endif
device_free(unpacked);
for(int i=0; i<2; i++) hipStreamDestroy(streams[i]);
}
void cudaGaugeField::saveCPUField(cpuGaugeField &cpu, const QudaFieldLocation &pack_location) const
{
// FIXME use the generic copying for the below copying
// do device-side reordering then copy
if (pack_location == QUDA_CUDA_FIELD_LOCATION) {
// check parameters are suitable for device-side packing
if (precision != cpu.Precision())
errorQuda("cpu precision %d and cuda precision %d must be the same",
cpu.Precision(), precision);
if (reconstruct != QUDA_RECONSTRUCT_NO) errorQuda("Only no reconstruction supported");
if (order != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Only QUDA_FLOAT2_GAUGE_ORDER supported");
if (cpu.Order() != QUDA_MILC_GAUGE_ORDER) errorQuda("Only QUDA_MILC_GAUGE_ORDER supported");
// internal degrees of freedom
if (precision == QUDA_DOUBLE_PRECISION){
storeGaugeField((double*)cpu.gauge, (double2*)gauge, bytes, nInternal, volumeCB, stride, precision);
} else if (precision == QUDA_SINGLE_PRECISION){
storeGaugeField((float*)cpu.gauge, (float2*)gauge, bytes, nInternal, volumeCB, stride, precision);
} else {
errorQuda("Half precision not supported");
}
} else if (pack_location == QUDA_CPU_FIELD_LOCATION) { // do copy then host-side reorder
resizeBufferPinned(bytes,0);
// this copies over both even and odd
hipMemcpy(bufferPinned[0], gauge, bytes, hipMemcpyDeviceToHost);
checkCudaError();
copyGenericGauge(cpu, *this, QUDA_CPU_FIELD_LOCATION, cpu.gauge, bufferPinned[0]);
} else {
errorQuda("Invalid pack location %d", pack_location);
}
}
void cudaGaugeField::backup() const {
if (backed_up) errorQuda("Gauge field already backed up");
backup_h = new char[bytes];
hipMemcpy(backup_h, gauge, bytes, hipMemcpyDeviceToHost);
checkCudaError();
backed_up = true;
}
void cudaGaugeField::restore() {
if (!backed_up) errorQuda("Cannot restore since not backed up");
hipMemcpy(gauge, backup_h, bytes, hipMemcpyHostToDevice);
delete []backup_h;
checkCudaError();
backed_up = false;
}
void cudaGaugeField::zero() {
hipMemset(gauge, 0, bytes);
}
void setGhostSpinor(bool value);
ColorSpinorParam colorSpinorParam(const cudaGaugeField &a) {
if (a.FieldOrder() == QUDA_QDP_GAUGE_ORDER ||
a.FieldOrder() == QUDA_QDPJIT_GAUGE_ORDER)
errorQuda("Not implemented for this order");
if (a.LinkType() == QUDA_COARSE_LINKS)
errorQuda("Not implemented for this link type");
int spin = 0;
switch (a.Geometry()) {
case QUDA_SCALAR_GEOMETRY:
spin = 1;
break;
case QUDA_VECTOR_GEOMETRY:
spin = a.Ndim();
break;
default:
errorQuda("Unsupported field geometry %d", a.Geometry());
}
if (a.Precision() == QUDA_HALF_PRECISION)
errorQuda("Casting a cudaGaugeField into cudaColorSpinorField not possible in half precision");
if (a.Reconstruct() == QUDA_RECONSTRUCT_13 || a.Reconstruct() == QUDA_RECONSTRUCT_9)
errorQuda("Unsupported field reconstruct %d", a.Reconstruct());
ColorSpinorParam spinor_param;
spinor_param.nColor = a.Reconstruct()/2;
spinor_param.nSpin = spin;
spinor_param.nDim = a.Ndim();
for (int d=0; d<a.Ndim(); d++) spinor_param.x[d] = a.X()[d];
spinor_param.precision = a.Precision();
spinor_param.pad = a.Pad();
spinor_param.siteSubset = QUDA_FULL_SITE_SUBSET;
spinor_param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER;
spinor_param.fieldOrder = (a.Precision() == QUDA_DOUBLE_PRECISION || spinor_param.nSpin == 1) ?
QUDA_FLOAT2_FIELD_ORDER : QUDA_FLOAT4_FIELD_ORDER;
spinor_param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS;
spinor_param.create = QUDA_REFERENCE_FIELD_CREATE;
spinor_param.v = (void*)a.Gauge_p();
return spinor_param;
}
// Return the L2 norm squared of the gauge field
double norm2(const cudaGaugeField &a) {
// quick hack to disable ghost zone creation which otherwise breaks this mapping on multi-gpu
setGhostSpinor(false);
cudaColorSpinorField b(colorSpinorParam(a));
setGhostSpinor(true);
return blas::norm2(b);
}
// Return the L1 norm of the gauge field
double norm1(const cudaGaugeField &a) {
// quick hack to disable ghost zone creation which otherwise breaks this mapping on multi-gpu
setGhostSpinor(false);
cudaColorSpinorField b(colorSpinorParam(a));
setGhostSpinor(true);
return blas::norm1(b);
}
} // namespace quda
| e6fbcbfd5dc7e4c6a6b17a7dd04f4cacd10f539e.cu | #include <string.h>
#include <gauge_field.h>
#include <face_quda.h>
#include <typeinfo>
#include <misc_helpers.h>
#include <blas_quda.h>
namespace quda {
cudaGaugeField::cudaGaugeField(const GaugeFieldParam ¶m) :
GaugeField(param), gauge(0), even(0), odd(0), backed_up(false)
{
if ((order == QUDA_QDP_GAUGE_ORDER || order == QUDA_QDPJIT_GAUGE_ORDER) &&
create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("QDP ordering only supported for reference fields");
}
if (order == QUDA_QDP_GAUGE_ORDER || order == QUDA_MILC_GAUGE_ORDER ||
order == QUDA_TIFR_GAUGE_ORDER || order == QUDA_BQCD_GAUGE_ORDER ||
order == QUDA_CPS_WILSON_GAUGE_ORDER)
errorQuda("Field ordering %d presently disabled for this type", order);
#ifdef MULTI_GPU
if (link_type != QUDA_ASQTAD_MOM_LINKS &&
ghostExchange == QUDA_GHOST_EXCHANGE_PAD &&
isNative()) {
bool pad_check = true;
for (int i=0; i<nDim; i++)
if (pad < nFace*surfaceCB[i]) pad_check = false;
if (!pad_check)
errorQuda("cudaGaugeField being constructed with insufficient padding\n");
}
#endif
if(create != QUDA_NULL_FIELD_CREATE &&
create != QUDA_ZERO_FIELD_CREATE &&
create != QUDA_REFERENCE_FIELD_CREATE){
errorQuda("ERROR: create type(%d) not supported yet\n", create);
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
gauge = device_malloc(bytes);
if (create == QUDA_ZERO_FIELD_CREATE) cudaMemset(gauge, 0, bytes);
} else {
gauge = param.gauge;
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
size_t nbytes = nFace * surface[i] * nInternal * precision;
ghost[i] = nbytes ? device_malloc(nbytes) : NULL;
}
}
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD) {
if (create == QUDA_REFERENCE_FIELD_CREATE) exchangeGhost();
}
even = gauge;
odd = (char*)gauge + bytes/2;
#ifdef USE_TEXTURE_OBJECTS
createTexObject(evenTex, even);
createTexObject(oddTex, odd);
if(reconstruct == QUDA_RECONSTRUCT_13 || reconstruct == QUDA_RECONSTRUCT_9)
{ // Create texture objects for the phases
const int isPhase = 1;
createTexObject(evenPhaseTex, (char*)even + phase_offset, isPhase);
createTexObject(oddPhaseTex, (char*)odd + phase_offset, isPhase);
}
#endif
}
#ifdef USE_TEXTURE_OBJECTS
void cudaGaugeField::createTexObject(cudaTextureObject_t &tex, void *field, int isPhase) {
if( isNative() ){
// create the texture for the field components
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = cudaChannelFormatKindFloat;
else desc.f = cudaChannelFormatKindSigned; // half is short, double is int2
if(isPhase){
if(precision == QUDA_DOUBLE_PRECISION){
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 0;
desc.w = 0;
}else{
desc.x = 8*precision;
desc.y = desc.z = desc.w = 0;
}
}else{
// always four components regardless of precision
if (precision == QUDA_DOUBLE_PRECISION) {
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 8*sizeof(int);
desc.w = 8*sizeof(int);
} else {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = (reconstruct == 18) ? 0 : 8*precision; // float2 or short2 for 18 reconstruct
desc.w = (reconstruct == 18) ? 0 : 8*precision;
}
}
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = field;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = isPhase ? phase_bytes/2 : (bytes-phase_bytes)/2;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = cudaReadModeNormalizedFloat;
else texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
}
}
void cudaGaugeField::destroyTexObject() {
if( isNative() ){
cudaDestroyTextureObject(evenTex);
cudaDestroyTextureObject(oddTex);
if(reconstruct == QUDA_RECONSTRUCT_9 || reconstruct == QUDA_RECONSTRUCT_13){
cudaDestroyTextureObject(evenPhaseTex);
cudaDestroyTextureObject(oddPhaseTex);
}
checkCudaError();
}
}
#endif
cudaGaugeField::~cudaGaugeField()
{
#ifdef USE_TEXTURE_OBJECTS
destroyTexObject();
#endif
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (gauge) device_free(gauge);
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
if (ghost[i]) device_free(ghost[i]);
}
}
}
// This does the exchange of the gauge field ghost zone and places it
// into the ghost array.
void cudaGaugeField::exchangeGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *send[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
send[d] = device_malloc(nFace*surface[d]*nInternal*precision);
}
// get the links into contiguous buffers
extractGaugeGhost(*this, send);
// communicate between nodes
exchange(ghost_, send);
for (int d=0; d<nDim; d++) device_free(send[d]);
if (isNative()) {
// copy from ghost into the padded region in gauge
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, 0, ghost_, 1);
for (int d=0; d<nDim; d++) device_free(ghost_[d]);
}
}
void cudaGaugeField::exchangeExtendedGhost(const int *R, bool no_comms_fill) {
void *send[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
void *send_d[QUDA_MAX_DIM];
void *recv_d[QUDA_MAX_DIM];
size_t bytes[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d) && !no_comms_fill) continue;
// store both parities and directions in each
bytes[d] = surface[d] * R[d] * geometry * nInternal * precision;
send_d[d] = device_malloc(2 * bytes[d]);
recv_d[d] = device_malloc(2 * bytes[d]);
}
#ifndef GPU_COMMS
void *send_h[QUDA_MAX_DIM];
void *recv_h[QUDA_MAX_DIM];
size_t total_bytes = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
total_bytes += 4*bytes[d]; // (2 from send/recv) x (2 from fwd/back)
}
resizeBufferPinned(total_bytes,0);
size_t offset = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
recv_h[d] = static_cast<char*>(bufferPinned[0]) + offset;
send_h[d] = static_cast<char*>(recv_h[d]) + 2*bytes[d];
offset += 4*bytes[d];
}
#endif
// do the exchange
MsgHandle *mh_recv_back[QUDA_MAX_DIM];
MsgHandle *mh_recv_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_back[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
recv[d] = recv_d[d];
send[d] = send_d[d];
#else
recv[d] = recv_h[d];
send[d] = send_h[d];
#endif
// look into storing these for later
mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]);
mh_recv_fwd[d] = comm_declare_receive_relative(static_cast<char*>(recv[d])+bytes[d],
d, +1, bytes[d]);
mh_send_back[d] = comm_declare_send_relative(send[d], d, -1, bytes[d]);
mh_send_fwd[d] = comm_declare_send_relative(static_cast<char*>(send[d])+bytes[d],
d, +1, bytes[d]);
}
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d) && !no_comms_fill) continue;
// FIXME why does this break if the order is switched?
// prepost the receives
if (commDimPartitioned(d)) {
comm_start(mh_recv_fwd[d]);
comm_start(mh_recv_back[d]);
}
//extract into a contiguous buffer
extractExtendedGaugeGhost(*this, d, R, send_d, true);
if (commDimPartitioned(d)) {
// pipeline the forwards and backwards sending
#ifndef GPU_COMMS
cudaMemcpyAsync(send_h[d], send_d[d], bytes[d], cudaMemcpyDeviceToHost, streams[0]);
cudaMemcpyAsync(static_cast<char*>(send_h[d])+bytes[d],
static_cast<char*>(send_d[d])+bytes[d], bytes[d], cudaMemcpyDeviceToHost, streams[1]);
#endif
#ifndef GPU_COMMS
cudaStreamSynchronize(streams[0]);
#endif
comm_start(mh_send_back[d]);
#ifndef GPU_COMMS
cudaStreamSynchronize(streams[1]);
#endif
comm_start(mh_send_fwd[d]);
// forwards recv
comm_wait(mh_send_back[d]);
comm_wait(mh_recv_fwd[d]);
#ifndef GPU_COMMS
cudaMemcpyAsync(static_cast<char*>(recv_d[d])+bytes[d],
static_cast<char*>(recv_h[d])+bytes[d], bytes[d], cudaMemcpyHostToDevice, streams[0]);
#endif
// backwards recv
comm_wait(mh_send_fwd[d]);
comm_wait(mh_recv_back[d]);
#ifndef GPU_COMMS
cudaMemcpyAsync(recv_d[d], recv_h[d], bytes[d], cudaMemcpyHostToDevice, streams[1]);
#endif
} else { // if just doing a local exchange to fill halo then need to swap faces
cudaMemcpy(static_cast<char*>(recv_d[d])+bytes[d], send_d[d], bytes[d], cudaMemcpyDeviceToDevice);
cudaMemcpy(recv_d[d], static_cast<char*>(send_d[d])+bytes[d], bytes[d], cudaMemcpyDeviceToDevice);
}
// inject back into the gauge field
extractExtendedGaugeGhost(*this, d, R, recv_d, false);
}
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d) && !no_comms_fill) continue;
if (commDimPartitioned(d)) {
comm_free(mh_send_fwd[d]);
comm_free(mh_send_back[d]);
comm_free(mh_recv_back[d]);
comm_free(mh_recv_fwd[d]);
}
device_free(send_d[d]);
device_free(recv_d[d]);
}
}
void cudaGaugeField::setGauge(void *gauge_)
{
if(create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("Setting gauge pointer is only allowed when create="
"QUDA_REFERENCE_FIELD_CREATE type\n");
}
gauge = gauge_;
}
void cudaGaugeField::copy(const GaugeField &src) {
if (this == &src) return;
checkField(src);
if (link_type == QUDA_ASQTAD_FAT_LINKS) {
fat_link_max = src.LinkMax();
if (precision == QUDA_HALF_PRECISION && fat_link_max == 0.0)
errorQuda("fat_link_max has not been computed");
} else {
fat_link_max = 1.0;
}
if (typeid(src) == typeid(cudaGaugeField)) {
// copy field and ghost zone into this field
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge,
static_cast<const cudaGaugeField&>(src).gauge);
} else if (typeid(src) == typeid(cpuGaugeField)) {
LatticeField::resizeBufferPinned(bytes,0);
// copy field and ghost zone into bufferPinned
copyGenericGauge(*this, src, QUDA_CPU_FIELD_LOCATION, bufferPinned[0],
static_cast<const cpuGaugeField&>(src).gauge);
// this copies over both even and odd
cudaMemcpy(gauge, bufferPinned[0], bytes, cudaMemcpyHostToDevice);
} else {
errorQuda("Invalid gauge field type");
}
// if we have copied from a source without a pad then we need to exchange
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD &&
src.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD) {
exchangeGhost();
}
checkCudaError();
}
void cudaGaugeField::loadCPUField(const cpuGaugeField &cpu, const QudaFieldLocation &pack_location)
{
if (pack_location == QUDA_CUDA_FIELD_LOCATION) {
if (cpu.Order() == QUDA_MILC_GAUGE_ORDER ||
cpu.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
resizeBufferPinned(cpu.Bytes(),0);
memcpy(bufferPinned[0], cpu.Gauge_p(), cpu.Bytes());
// run kernel directly using host-mapped input data
void *bufferPinnedMapped;
cudaHostGetDevicePointer(&bufferPinnedMapped, bufferPinned[0], 0);
copyGenericGauge(*this, cpu, QUDA_CUDA_FIELD_LOCATION, gauge, bufferPinnedMapped);
} else {
errorQuda("Not implemented for order %d", cpu.Order());
}
} else if (pack_location == QUDA_CPU_FIELD_LOCATION) {
copy(cpu);
} else {
errorQuda("Invalid pack location %d", pack_location);
}
}
/*
Copies the device gauge field to the host.
- no reconstruction support
- device data is always Float2 ordered
- host data is a 1-dimensional array (MILC ordered)
- no support for half precision
- input and output precisions must match
*/
template<typename FloatN, typename Float>
static void storeGaugeField(Float* cpuGauge, FloatN *gauge, int bytes, int Nint,
int volumeCB, int stride, QudaPrecision prec)
{
cudaStream_t streams[2];
for (int i=0; i<2; i++) cudaStreamCreate(&streams[i]);
FloatN *even = gauge;
FloatN *odd = (FloatN*)((char*)gauge + bytes/2);
size_t datalen = 4*2*volumeCB*Nint*sizeof(Float); // both parities
void *unpacked = device_malloc(datalen);
void *unpackedEven = unpacked;
void *unpackedOdd = (char*)unpacked + datalen/2;
//unpack even data kernel
link_format_gpu_to_cpu((void*)unpackedEven, (void*)even, volumeCB, stride, prec, streams[0]);
#ifdef GPU_DIRECT
cudaMemcpyAsync(cpuGauge, unpackedEven, datalen/2, cudaMemcpyDeviceToHost, streams[0]);
#else
cudaMemcpy(cpuGauge, unpackedEven, datalen/2, cudaMemcpyDeviceToHost);
#endif
//unpack odd data kernel
link_format_gpu_to_cpu((void*)unpackedOdd, (void*)odd, volumeCB, stride, prec, streams[1]);
#ifdef GPU_DIRECT
cudaMemcpyAsync(cpuGauge + 4*volumeCB*Nint, unpackedOdd, datalen/2, cudaMemcpyDeviceToHost, streams[1]);
for(int i=0; i<2; i++) cudaStreamSynchronize(streams[i]);
#else
cudaMemcpy(cpuGauge + 4*volumeCB*Nint, unpackedOdd, datalen/2, cudaMemcpyDeviceToHost);
#endif
device_free(unpacked);
for(int i=0; i<2; i++) cudaStreamDestroy(streams[i]);
}
void cudaGaugeField::saveCPUField(cpuGaugeField &cpu, const QudaFieldLocation &pack_location) const
{
// FIXME use the generic copying for the below copying
// do device-side reordering then copy
if (pack_location == QUDA_CUDA_FIELD_LOCATION) {
// check parameters are suitable for device-side packing
if (precision != cpu.Precision())
errorQuda("cpu precision %d and cuda precision %d must be the same",
cpu.Precision(), precision);
if (reconstruct != QUDA_RECONSTRUCT_NO) errorQuda("Only no reconstruction supported");
if (order != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Only QUDA_FLOAT2_GAUGE_ORDER supported");
if (cpu.Order() != QUDA_MILC_GAUGE_ORDER) errorQuda("Only QUDA_MILC_GAUGE_ORDER supported");
// internal degrees of freedom
if (precision == QUDA_DOUBLE_PRECISION){
storeGaugeField((double*)cpu.gauge, (double2*)gauge, bytes, nInternal, volumeCB, stride, precision);
} else if (precision == QUDA_SINGLE_PRECISION){
storeGaugeField((float*)cpu.gauge, (float2*)gauge, bytes, nInternal, volumeCB, stride, precision);
} else {
errorQuda("Half precision not supported");
}
} else if (pack_location == QUDA_CPU_FIELD_LOCATION) { // do copy then host-side reorder
resizeBufferPinned(bytes,0);
// this copies over both even and odd
cudaMemcpy(bufferPinned[0], gauge, bytes, cudaMemcpyDeviceToHost);
checkCudaError();
copyGenericGauge(cpu, *this, QUDA_CPU_FIELD_LOCATION, cpu.gauge, bufferPinned[0]);
} else {
errorQuda("Invalid pack location %d", pack_location);
}
}
void cudaGaugeField::backup() const {
if (backed_up) errorQuda("Gauge field already backed up");
backup_h = new char[bytes];
cudaMemcpy(backup_h, gauge, bytes, cudaMemcpyDeviceToHost);
checkCudaError();
backed_up = true;
}
void cudaGaugeField::restore() {
if (!backed_up) errorQuda("Cannot restore since not backed up");
cudaMemcpy(gauge, backup_h, bytes, cudaMemcpyHostToDevice);
delete []backup_h;
checkCudaError();
backed_up = false;
}
void cudaGaugeField::zero() {
cudaMemset(gauge, 0, bytes);
}
void setGhostSpinor(bool value);
ColorSpinorParam colorSpinorParam(const cudaGaugeField &a) {
if (a.FieldOrder() == QUDA_QDP_GAUGE_ORDER ||
a.FieldOrder() == QUDA_QDPJIT_GAUGE_ORDER)
errorQuda("Not implemented for this order");
if (a.LinkType() == QUDA_COARSE_LINKS)
errorQuda("Not implemented for this link type");
int spin = 0;
switch (a.Geometry()) {
case QUDA_SCALAR_GEOMETRY:
spin = 1;
break;
case QUDA_VECTOR_GEOMETRY:
spin = a.Ndim();
break;
default:
errorQuda("Unsupported field geometry %d", a.Geometry());
}
if (a.Precision() == QUDA_HALF_PRECISION)
errorQuda("Casting a cudaGaugeField into cudaColorSpinorField not possible in half precision");
if (a.Reconstruct() == QUDA_RECONSTRUCT_13 || a.Reconstruct() == QUDA_RECONSTRUCT_9)
errorQuda("Unsupported field reconstruct %d", a.Reconstruct());
ColorSpinorParam spinor_param;
spinor_param.nColor = a.Reconstruct()/2;
spinor_param.nSpin = spin;
spinor_param.nDim = a.Ndim();
for (int d=0; d<a.Ndim(); d++) spinor_param.x[d] = a.X()[d];
spinor_param.precision = a.Precision();
spinor_param.pad = a.Pad();
spinor_param.siteSubset = QUDA_FULL_SITE_SUBSET;
spinor_param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER;
spinor_param.fieldOrder = (a.Precision() == QUDA_DOUBLE_PRECISION || spinor_param.nSpin == 1) ?
QUDA_FLOAT2_FIELD_ORDER : QUDA_FLOAT4_FIELD_ORDER;
spinor_param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS;
spinor_param.create = QUDA_REFERENCE_FIELD_CREATE;
spinor_param.v = (void*)a.Gauge_p();
return spinor_param;
}
// Return the L2 norm squared of the gauge field
double norm2(const cudaGaugeField &a) {
// quick hack to disable ghost zone creation which otherwise breaks this mapping on multi-gpu
setGhostSpinor(false);
cudaColorSpinorField b(colorSpinorParam(a));
setGhostSpinor(true);
return blas::norm2(b);
}
// Return the L1 norm of the gauge field
double norm1(const cudaGaugeField &a) {
// quick hack to disable ghost zone creation which otherwise breaks this mapping on multi-gpu
setGhostSpinor(false);
cudaColorSpinorField b(colorSpinorParam(a));
setGhostSpinor(true);
return blas::norm1(b);
}
} // namespace quda
|
c77c21ab44c1c728f1a19091752c0741fed19c18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_4_top;
int xdim0_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_4_top;
int ydim0_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_4_top;
int xdim1_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_4_top;
int ydim1_update_halo_kernel2_xvel_plus_4_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_4_top * (y) + \
xdim0_update_halo_kernel2_xvel_plus_4_top * \
ydim0_update_halo_kernel2_xvel_plus_4_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_4_top * (y) + \
xdim1_update_halo_kernel2_xvel_plus_4_top * \
ydim1_update_halo_kernel2_xvel_plus_4_top * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_4_top(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_4_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_top *
ydim0_update_halo_kernel2_xvel_plus_4_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_top *
ydim1_update_halo_kernel2_xvel_plus_4_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_4_top(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 71))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(71, "update_halo_kernel2_xvel_plus_4_top");
OPS_kernels[71].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_4_top_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_4_top_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_4_top_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_4_top_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_4_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_4_top_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_4_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_4_top_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_4_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_4_top_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_4_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_4_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[71].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_4_top), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[71].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[71].mpi_time += t2 - t1;
OPS_kernels[71].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[71].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| c77c21ab44c1c728f1a19091752c0741fed19c18.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_4_top;
int xdim0_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_4_top;
int ydim0_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_4_top;
int xdim1_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_4_top;
int ydim1_update_halo_kernel2_xvel_plus_4_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_4_top * (y) + \
xdim0_update_halo_kernel2_xvel_plus_4_top * \
ydim0_update_halo_kernel2_xvel_plus_4_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_4_top * (y) + \
xdim1_update_halo_kernel2_xvel_plus_4_top * \
ydim1_update_halo_kernel2_xvel_plus_4_top * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_4_top(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_4_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_top *
ydim0_update_halo_kernel2_xvel_plus_4_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_top *
ydim1_update_halo_kernel2_xvel_plus_4_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_4_top(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 71))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(71, "update_halo_kernel2_xvel_plus_4_top");
OPS_kernels[71].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_4_top_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_4_top_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_4_top_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_4_top_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_4_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_4_top_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_4_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_4_top_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_4_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_4_top_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_4_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_4_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[71].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_plus_4_top<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[71].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[71].mpi_time += t2 - t1;
OPS_kernels[71].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[71].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
e33088e5e593d9799479a540e1a2591129b16542.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype, typename Mtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : Dtype(in[index] * negative_slope);
}
}
template <typename Dtype, typename Mtype>
void ReLULayer<Dtype,Mtype>::Forward_gpu(const vector<Blob<Dtype,Mtype>*>& bottom,
const vector<Blob<Dtype,Mtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope(this->layer_param_.relu_param().negative_slope());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUForward<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype, typename Mtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Mtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = Get<Dtype>( Get<Mtype>(in_diff[index]) * ((Get<Mtype>(in_data[index]) > 0)
+ (Get<Mtype>(in_data[index]) <= 0) * negative_slope) );
}
}
template <typename Dtype, typename Mtype>
void ReLULayer<Dtype,Mtype>::Backward_gpu(const vector<Blob<Dtype,Mtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype,Mtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Mtype negative_slope(this->layer_param_.relu_param().negative_slope());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUBackward<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
| e33088e5e593d9799479a540e1a2591129b16542.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype, typename Mtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : Dtype(in[index] * negative_slope);
}
}
template <typename Dtype, typename Mtype>
void ReLULayer<Dtype,Mtype>::Forward_gpu(const vector<Blob<Dtype,Mtype>*>& bottom,
const vector<Blob<Dtype,Mtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope(this->layer_param_.relu_param().negative_slope());
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype,Mtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype, typename Mtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Mtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = Get<Dtype>( Get<Mtype>(in_diff[index]) * ((Get<Mtype>(in_data[index]) > 0)
+ (Get<Mtype>(in_data[index]) <= 0) * negative_slope) );
}
}
template <typename Dtype, typename Mtype>
void ReLULayer<Dtype,Mtype>::Backward_gpu(const vector<Blob<Dtype,Mtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype,Mtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Mtype negative_slope(this->layer_param_.relu_param().negative_slope());
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype,Mtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
4c31d48306a619980de477ade08cef96682f12e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ int cube(int x) {
int y;
asm(".reg .u32 t1;\n\t" // temp reg t1
" mul.lo.u32 t1, %1, %1;\n\t" // t1 = x * x
" mul.lo.u32 %0, t1, %1;" // y = t1 * x
: "=r"(y)
: "r"(x));
return y;
}
__global__ void test_ldg(float *a, float *b) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
a[i] = cube(1);
auto c = __ldg((float4 *)a);
a[i] = __ldg(&b[i]) + __ldg((double *)&b[i]) + __ldg((char *)&b[i]);
}
int main() {
float *a, *b;
hipLaunchKernelGGL(( test_ldg), dim3(1), dim3(1), 0, 0, a, b);
}
| 4c31d48306a619980de477ade08cef96682f12e4.cu | __device__ int cube(int x) {
int y;
asm(".reg .u32 t1;\n\t" // temp reg t1
" mul.lo.u32 t1, %1, %1;\n\t" // t1 = x * x
" mul.lo.u32 %0, t1, %1;" // y = t1 * x
: "=r"(y)
: "r"(x));
return y;
}
__global__ void test_ldg(float *a, float *b) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
a[i] = cube(1);
auto c = __ldg((float4 *)a);
a[i] = __ldg(&b[i]) + __ldg((double *)&b[i]) + __ldg((char *)&b[i]);
}
int main() {
float *a, *b;
test_ldg<<<1, 1>>>(a, b);
}
|
f7f942d6686f598adfea41e1253a4db216ebbdd9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "book.h"
__device__ int addem( int a, int b ) {
return a + b;
}
__global__ void add( int a, int b, int *c ) {
*c = addem( a, b );
}
int main( void ) {
int c;
int *dev_c;
HANDLE_ERROR( hipMalloc( (void**)&dev_c, sizeof(int) ) );
//adds 2 and 7 and returns 9
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c );
HANDLE_ERROR( hipMemcpy( &c, dev_c, sizeof(int),
hipMemcpyDeviceToHost ) );
printf( "2 + 7 = %d\n", c );
HANDLE_ERROR( hipFree( dev_c ) );
return 0;
}
| f7f942d6686f598adfea41e1253a4db216ebbdd9.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "book.h"
__device__ int addem( int a, int b ) {
return a + b;
}
__global__ void add( int a, int b, int *c ) {
*c = addem( a, b );
}
int main( void ) {
int c;
int *dev_c;
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, sizeof(int) ) );
//adds 2 and 7 and returns 9
add<<<1,1>>>( 2, 7, dev_c );
HANDLE_ERROR( cudaMemcpy( &c, dev_c, sizeof(int),
cudaMemcpyDeviceToHost ) );
printf( "2 + 7 = %d\n", c );
HANDLE_ERROR( cudaFree( dev_c ) );
return 0;
}
|
33a778d9d320a5d507d26a4efb8970ed6b08587a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <stdbool.h>
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 16
///
/// function name: gpu_matrix_mult
///
/// description: dot product of two matrix (not only square)
///
/// parameters:
/// &a GPU device pointer to a m X n matrix (A)
/// &b GPU device pointer to a n X k matrix (B)
/// &c GPU device output purpose pointer to a m X k matrix (C)
/// to store the result
///
/// Note:
/// grid and block should be configured as:
/// dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE,
/// (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
/// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
///
/// further sppedup can be obtained by using shared memory to decrease
/// global memory access times
/// return: none
///
__global__ void gpu_matrix_mult(float *a, float *b, float *c, int N) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
/// fill me in.
}
///
/// function name: cpu_matrix_mult
///
/// description: dot product of two matrix (not only square) in CPU,
/// for validating GPU results
///
/// parameters:
/// &a CPU host pointer to a m X n matrix (A)
/// &b CPU host pointer to a n X k matrix (B)
/// &c CPU host output purpose pointer to a m X k matrix (C)
/// to store the result
/// return: none
///
void cpu_matrix_mult(float *h_a, float *h_b, float *h_result, int N) {
int i,j,k;
#pragma omp parallel for schedule(dynamic,50) collapse(2) private(i,j,k) shared(h_a,h_b,h_result)
for( j=0;j<N;j++)
for(i=0;i<N;i++)
for(k=0;k<N;k++)
h_result[j*N+i] += h_a[j*N+k]*h_b[k*N+i];
}
///
/// function name: main
///
/// description: test and compare
///
/// parameters:
/// none
///
/// return: none
///
int main(int argc, char const *argv[])
{
int N=2048;
/// Fixed seed for illustration.
srand(3333);
/// allocate memory in host RAM, h_cc is used to store CPU result
float *h_a, *h_b, *h_c, *h_cc;
hipHostMalloc((void **) &h_a, sizeof(float)*N*N);
hipHostMalloc((void **) &h_b, sizeof(float)*N*N);
hipHostMalloc((void **) &h_c, sizeof(float)*N*N);
hipHostMalloc((void **) &h_cc, sizeof(float)*N*N);
/// random initialize matrix A
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
h_a[j*N + i] = rand() % 1024;
}
}
/// random initialize matrix B
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
h_b[j*N + i] = rand() % 1024;
}
}
/// c = 0
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
h_c[j*N + i] = 0.0;
}
}
float gpu_elapsed_time_ms, cpu_elapsed_time_ms;
/// some events to count the execution time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/// start to count execution time of GPU version
hipEventRecord(start, 0);
/// Allocate memory space on the device
float *d_a, *d_b, *d_c;
hipMalloc((void **) &d_a, sizeof(float)*N*N);
hipMalloc((void **) &d_b, sizeof(float)*N*N);
hipMalloc((void **) &d_c, sizeof(float)*N*N);
/// copy matrix A and B from host to device memory
hipMemcpy(d_a, h_a, sizeof(int)*N*N, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sizeof(int)*N*N, hipMemcpyHostToDevice);
unsigned int grid_rows = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/// Launch kernel
hipLaunchKernelGGL(( gpu_matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, N);
/// Transfer results from device to host
hipMemcpy(h_c, d_c, sizeof(int)*N*N, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
/// time counting terminate
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
/// compute time elapse on GPU computing
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on GPU: %f ms.\n\n", gpu_elapsed_time_ms);
/// start the CPU version
hipEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, N);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on on CPU: %f ms.\n\n", cpu_elapsed_time_ms);
/// validate results computed by GPU
bool all_ok = true;
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
if(fabs(h_c[j*N + i] - h_cc[j*N + i]) > 1.e-4) {
all_ok = false;
}
}
}
/// roughly compute speedup
if(all_ok) {
printf("all results are correct!!!, speedup = %f\n",
cpu_elapsed_time_ms / gpu_elapsed_time_ms);
} else {
printf("incorrect results\n");
}
/// free memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
hipHostFree(h_cc);
return 0;
}
| 33a778d9d320a5d507d26a4efb8970ed6b08587a.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <stdbool.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 16
///
/// function name: gpu_matrix_mult
///
/// description: dot product of two matrix (not only square)
///
/// parameters:
/// &a GPU device pointer to a m X n matrix (A)
/// &b GPU device pointer to a n X k matrix (B)
/// &c GPU device output purpose pointer to a m X k matrix (C)
/// to store the result
///
/// Note:
/// grid and block should be configured as:
/// dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE,
/// (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
/// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
///
/// further sppedup can be obtained by using shared memory to decrease
/// global memory access times
/// return: none
///
__global__ void gpu_matrix_mult(float *a, float *b, float *c, int N) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
/// fill me in.
}
///
/// function name: cpu_matrix_mult
///
/// description: dot product of two matrix (not only square) in CPU,
/// for validating GPU results
///
/// parameters:
/// &a CPU host pointer to a m X n matrix (A)
/// &b CPU host pointer to a n X k matrix (B)
/// &c CPU host output purpose pointer to a m X k matrix (C)
/// to store the result
/// return: none
///
void cpu_matrix_mult(float *h_a, float *h_b, float *h_result, int N) {
int i,j,k;
#pragma omp parallel for schedule(dynamic,50) collapse(2) private(i,j,k) shared(h_a,h_b,h_result)
for( j=0;j<N;j++)
for(i=0;i<N;i++)
for(k=0;k<N;k++)
h_result[j*N+i] += h_a[j*N+k]*h_b[k*N+i];
}
///
/// function name: main
///
/// description: test and compare
///
/// parameters:
/// none
///
/// return: none
///
int main(int argc, char const *argv[])
{
int N=2048;
/// Fixed seed for illustration.
srand(3333);
/// allocate memory in host RAM, h_cc is used to store CPU result
float *h_a, *h_b, *h_c, *h_cc;
cudaMallocHost((void **) &h_a, sizeof(float)*N*N);
cudaMallocHost((void **) &h_b, sizeof(float)*N*N);
cudaMallocHost((void **) &h_c, sizeof(float)*N*N);
cudaMallocHost((void **) &h_cc, sizeof(float)*N*N);
/// random initialize matrix A
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
h_a[j*N + i] = rand() % 1024;
}
}
/// random initialize matrix B
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
h_b[j*N + i] = rand() % 1024;
}
}
/// c = 0
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
h_c[j*N + i] = 0.0;
}
}
float gpu_elapsed_time_ms, cpu_elapsed_time_ms;
/// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/// start to count execution time of GPU version
cudaEventRecord(start, 0);
/// Allocate memory space on the device
float *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(float)*N*N);
cudaMalloc((void **) &d_b, sizeof(float)*N*N);
cudaMalloc((void **) &d_c, sizeof(float)*N*N);
/// copy matrix A and B from host to device memory
cudaMemcpy(d_a, h_a, sizeof(int)*N*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*N*N, cudaMemcpyHostToDevice);
unsigned int grid_rows = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/// Launch kernel
gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, N);
/// Transfer results from device to host
cudaMemcpy(h_c, d_c, sizeof(int)*N*N, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
/// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
/// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on GPU: %f ms.\n\n", gpu_elapsed_time_ms);
/// start the CPU version
cudaEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on on CPU: %f ms.\n\n", cpu_elapsed_time_ms);
/// validate results computed by GPU
bool all_ok = true;
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
if(fabs(h_c[j*N + i] - h_cc[j*N + i]) > 1.e-4) {
all_ok = false;
}
}
}
/// roughly compute speedup
if(all_ok) {
printf("all results are correct!!!, speedup = %f\n",
cpu_elapsed_time_ms / gpu_elapsed_time_ms);
} else {
printf("incorrect results\n");
}
/// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
return 0;
}
|
c1560ad775b223f74e5742257a4642c6ca387fd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
// Check tensor dimensions for index operations, and return the slice size.
// src can be nullptr in case of indexFill: in that case it is ignored.
static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst,
int dim,
THCudaLongTensor *index,
THCTensor *src)
{
int dstDims = THCTensor_(nDimension)(state, dst);
int srcDims = (src == nullptr) ? dstDims : THCTensor_(nDimension)(state, src);
THArgCheck(THCudaLongTensor_nDimension(state, index) == 1, 4,
"expecting vector of indices");
THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds");
ptrdiff_t dstSliceSize = 1;
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst->size[d];
}
}
if (src == nullptr) return dstSliceSize;
THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds");
THArgCheck(THCudaLongTensor_nElement(state, index) == src->size[dim], 4,
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src->size[d];
if (!mismatch && dst->size[d] != src->size[d]) mismatch = true;
}
}
THArgCheck(dstSliceSize == srcSliceSize, 2,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
static bool warningShown = false;
if (!warningShown) {
warningShown = true;
fprintf(stderr,
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
}
return dstSliceSize;
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
bool THCTensor_(indexShouldBeMajor)(TensorInfo<real, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
hipStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexCopyLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstCopyDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(!(THCTensor_(nDimension)(state, src) == 0 && THCudaLongTensor_nDimension(state, index) != 0), 2,
"tried to take from an empty tensor");
THCTensor_(resizeNd)(state, dst, index->nDimension, index->size, NULL);
// dispatchTakePut only handles non-empty tensors;
if (index->nDimension > 0) {
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
if (numIndices == 0) {
return;
}
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
hipStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstAddDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstAddDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize =
THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr);
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
hipStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize * numIndices, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstFillDimSize, val);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim);
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2, true);
} else if (dstInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, -2, false);
}
} else if (dstInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) <= 1, 3,
"Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize;
if (numIndices == 0) {
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, 0, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
return;
}
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
srcSelectDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#endif
| c1560ad775b223f74e5742257a4642c6ca387fd0.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
// Check tensor dimensions for index operations, and return the slice size.
// src can be nullptr in case of indexFill: in that case it is ignored.
static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst,
int dim,
THCudaLongTensor *index,
THCTensor *src)
{
int dstDims = THCTensor_(nDimension)(state, dst);
int srcDims = (src == nullptr) ? dstDims : THCTensor_(nDimension)(state, src);
THArgCheck(THCudaLongTensor_nDimension(state, index) == 1, 4,
"expecting vector of indices");
THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds");
ptrdiff_t dstSliceSize = 1;
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst->size[d];
}
}
if (src == nullptr) return dstSliceSize;
THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds");
THArgCheck(THCudaLongTensor_nElement(state, index) == src->size[dim], 4,
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src->size[d];
if (!mismatch && dst->size[d] != src->size[d]) mismatch = true;
}
}
THArgCheck(dstSliceSize == srcSliceSize, 2,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
static bool warningShown = false;
if (!warningShown) {
warningShown = true;
fprintf(stderr,
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
}
return dstSliceSize;
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
bool THCTensor_(indexShouldBeMajor)(TensorInfo<real, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
cudaStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexCopyLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstCopyDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(!(THCTensor_(nDimension)(state, src) == 0 && THCudaLongTensor_nDimension(state, index) != 0), 2,
"tried to take from an empty tensor");
THCTensor_(resizeNd)(state, dst, index->nDimension, index->size, NULL);
// dispatchTakePut only handles non-empty tensors;
if (index->nDimension > 0) {
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
if (numIndices == 0) {
return;
}
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
cudaStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstAddDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstAddDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize =
THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr);
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
cudaStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize * numIndices, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstFillDimSize, val);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim);
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2, true);
} else if (dstInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, -2, false);
}
} else if (dstInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) <= 1, 3,
"Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize;
if (numIndices == 0) {
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, 0, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
return;
}
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexSelectLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
srcSelectDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#endif
|
4ed8c7acfbac57fd7115cb67d35d1079162680ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void unique_gid_calculation_2D_2D(int *input){
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int num_threads_per_block = blockDim.x * blockDim.y;
int block_offset = blockIdx.x * num_threads_per_block;
int num_threads_per_row = num_threads_per_block * gridDim.x;
int row_offset = num_threads_per_row * blockIdx.y;
int gid = tid + block_offset + row_offset;
printf("blockIdx.x: %d, blockIdx.y: %d, threadIdx.x: %d, gid: %d, value: %d\n", blockIdx.x, blockIdx.y, tid, gid, input[gid]);
}
int main(){
int array_size = 16;
int array_bit_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 65, 12, 1, 33, 3, 92, 41, 54, 68, 11, 45, 21};
for(int i = 0; i < array_size; i++){
printf("%d ", h_data[i]);
}
printf("\n\n");
int *d_data;
hipMalloc((void **)&d_data, array_bit_size);
hipMemcpy(d_data, h_data, array_bit_size, hipMemcpyHostToDevice);
dim3 block(2, 2);
dim3 grid(2, 2);
hipLaunchKernelGGL(( unique_gid_calculation_2D_2D) , dim3(grid), dim3(block), 0, 0, d_data);
hipDeviceSynchronize();
hipDeviceReset();
return 0;
} | 4ed8c7acfbac57fd7115cb67d35d1079162680ad.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void unique_gid_calculation_2D_2D(int *input){
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int num_threads_per_block = blockDim.x * blockDim.y;
int block_offset = blockIdx.x * num_threads_per_block;
int num_threads_per_row = num_threads_per_block * gridDim.x;
int row_offset = num_threads_per_row * blockIdx.y;
int gid = tid + block_offset + row_offset;
printf("blockIdx.x: %d, blockIdx.y: %d, threadIdx.x: %d, gid: %d, value: %d\n", blockIdx.x, blockIdx.y, tid, gid, input[gid]);
}
int main(){
int array_size = 16;
int array_bit_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 65, 12, 1, 33, 3, 92, 41, 54, 68, 11, 45, 21};
for(int i = 0; i < array_size; i++){
printf("%d ", h_data[i]);
}
printf("\n\n");
int *d_data;
cudaMalloc((void **)&d_data, array_bit_size);
cudaMemcpy(d_data, h_data, array_bit_size, cudaMemcpyHostToDevice);
dim3 block(2, 2);
dim3 grid(2, 2);
unique_gid_calculation_2D_2D <<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
7b93e146f991dbdab56364515a44b2d74ad1ad48.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <random>
#include <chrono>
#include <hip/hip_runtime.h>
#include "kernels.h"
// transpose
double* t(const double *idata, const int width, const int height)
{
double *odata = (double*) malloc (sizeof(double) * width * height);
for (int yIndex = 0; yIndex < height; yIndex++) {
for (int xIndex = 0; xIndex < width; xIndex++) {
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
odata[index_out] = idata[index_in];
}
}
return odata;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage: %s <path to filename> <repeat>\n", argv[0]);
return 1;
}
char *filename = argv[1];
const int repeat = atoi(argv[2]);
// n and K should match the dimension of the dataset in the csv file
const int n = 26280, K = 21, M = 10000;
FILE *fp = fopen(filename, "r");
if (fp == NULL) {
printf("Error: failed to open file alphas.csv. Exit\n");
return 1;
}
int alphas_size = n * K; // n rows and K cols
int alphas_size_byte = n * K * sizeof(double);
int rands_size = M * K; // M rows and K cols
int rands_size_byte = M * K * sizeof(double);
double *alphas, *rands, *probs;
alphas = (double*) malloc (alphas_size_byte);
rands = (double*) malloc (rands_size_byte);
probs = (double*) malloc (alphas_size_byte);
// load the csv file
for (int i = 0; i < alphas_size; i++)
fscanf(fp, "%lf", &alphas[i]);
fclose(fp);
// normal distribution (mean: 0 and var: 1)
std::mt19937 gen(19937);
std::normal_distribution<double> norm_dist(0.0,1.0);
for (int i = 0; i < rands_size; i++) rands[i] = norm_dist(gen);
double *d_alphas, *d_rands, *d_probs;
hipMalloc((void**)&d_rands, rands_size_byte);
hipMalloc((void**)&d_alphas, alphas_size_byte);
hipMalloc((void**)&d_probs, alphas_size_byte);
hipMemcpy(d_rands, rands, rands_size_byte, hipMemcpyHostToDevice);
hipMemcpy(d_alphas, alphas, alphas_size_byte, hipMemcpyHostToDevice);
// kernel 1
int threads_per_block = 192;
dim3 threads (threads_per_block);
dim3 blocks (ceil(n/threads_per_block));
hipMemset(d_probs, 0.0, alphas_size_byte);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( compute_probs), dim3(blocks), dim3(threads), 0, 0, d_alphas, d_rands, d_probs, n, K, M);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
hipMemcpy(probs, d_probs, alphas_size_byte, hipMemcpyDeviceToHost);
double s = 0.0;
for (int i = 0; i < alphas_size; i++) s += probs[i];
printf("compute_probs: checksum = %lf\n", s);
// kernel 2
double *t_rands = t(rands, K, M);
double *t_alphas = t(alphas, K, n);
hipMemcpy(d_rands, t_rands, rands_size_byte, hipMemcpyHostToDevice);
hipMemcpy(d_alphas, t_alphas, alphas_size_byte, hipMemcpyHostToDevice);
hipMemset(d_probs, 0.0, alphas_size_byte);
hipDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( compute_probs_unitStrides), dim3(blocks), dim3(threads), 0, 0,
d_alphas, d_rands, d_probs, n, K, M);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
hipMemcpy(probs, d_probs, alphas_size_byte, hipMemcpyDeviceToHost);
s = 0.0;
for (int i = 0; i < alphas_size; i++) s += probs[i];
printf("compute_probs_unitStrides: checksum = %lf\n", s);
// kernel 3
threads_per_block = 96;
dim3 threads2 (threads_per_block);
dim3 blocks2 (ceil(n/threads_per_block));
const int sm_size = sizeof(double) * K * threads_per_block * 2;
hipMemset(d_probs, 0.0, alphas_size_byte);
hipDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( compute_probs_unitStrides_sharedMem), dim3(blocks2), dim3(threads2), sm_size, 0,
d_alphas, d_rands, d_probs, n, K, M);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
hipMemcpy(probs, d_probs, alphas_size_byte, hipMemcpyDeviceToHost);
s = 0.0;
for (int i = 0; i < alphas_size; i++) s += probs[i];
printf("compute_probs_unitStrides_sharedMem: checksum = %lf\n", s);
// free memory
hipFree(d_alphas);
hipFree(d_rands);
hipFree(d_probs);
free(alphas);
free(rands);
free(t_alphas);
free(t_rands);
free(probs);
return 0;
}
| 7b93e146f991dbdab56364515a44b2d74ad1ad48.cu | #include <cstdio>
#include <cstdlib>
#include <cmath>
#include <random>
#include <chrono>
#include <cuda.h>
#include "kernels.h"
// transpose
double* t(const double *idata, const int width, const int height)
{
double *odata = (double*) malloc (sizeof(double) * width * height);
for (int yIndex = 0; yIndex < height; yIndex++) {
for (int xIndex = 0; xIndex < width; xIndex++) {
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
odata[index_out] = idata[index_in];
}
}
return odata;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage: %s <path to filename> <repeat>\n", argv[0]);
return 1;
}
char *filename = argv[1];
const int repeat = atoi(argv[2]);
// n and K should match the dimension of the dataset in the csv file
const int n = 26280, K = 21, M = 10000;
FILE *fp = fopen(filename, "r");
if (fp == NULL) {
printf("Error: failed to open file alphas.csv. Exit\n");
return 1;
}
int alphas_size = n * K; // n rows and K cols
int alphas_size_byte = n * K * sizeof(double);
int rands_size = M * K; // M rows and K cols
int rands_size_byte = M * K * sizeof(double);
double *alphas, *rands, *probs;
alphas = (double*) malloc (alphas_size_byte);
rands = (double*) malloc (rands_size_byte);
probs = (double*) malloc (alphas_size_byte);
// load the csv file
for (int i = 0; i < alphas_size; i++)
fscanf(fp, "%lf", &alphas[i]);
fclose(fp);
// normal distribution (mean: 0 and var: 1)
std::mt19937 gen(19937);
std::normal_distribution<double> norm_dist(0.0,1.0);
for (int i = 0; i < rands_size; i++) rands[i] = norm_dist(gen);
double *d_alphas, *d_rands, *d_probs;
cudaMalloc((void**)&d_rands, rands_size_byte);
cudaMalloc((void**)&d_alphas, alphas_size_byte);
cudaMalloc((void**)&d_probs, alphas_size_byte);
cudaMemcpy(d_rands, rands, rands_size_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_alphas, alphas, alphas_size_byte, cudaMemcpyHostToDevice);
// kernel 1
int threads_per_block = 192;
dim3 threads (threads_per_block);
dim3 blocks (ceil(n/threads_per_block));
cudaMemset(d_probs, 0.0, alphas_size_byte);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
compute_probs<<<blocks, threads>>>(d_alphas, d_rands, d_probs, n, K, M);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
cudaMemcpy(probs, d_probs, alphas_size_byte, cudaMemcpyDeviceToHost);
double s = 0.0;
for (int i = 0; i < alphas_size; i++) s += probs[i];
printf("compute_probs: checksum = %lf\n", s);
// kernel 2
double *t_rands = t(rands, K, M);
double *t_alphas = t(alphas, K, n);
cudaMemcpy(d_rands, t_rands, rands_size_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_alphas, t_alphas, alphas_size_byte, cudaMemcpyHostToDevice);
cudaMemset(d_probs, 0.0, alphas_size_byte);
cudaDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
compute_probs_unitStrides<<<blocks, threads>>>(
d_alphas, d_rands, d_probs, n, K, M);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
cudaMemcpy(probs, d_probs, alphas_size_byte, cudaMemcpyDeviceToHost);
s = 0.0;
for (int i = 0; i < alphas_size; i++) s += probs[i];
printf("compute_probs_unitStrides: checksum = %lf\n", s);
// kernel 3
threads_per_block = 96;
dim3 threads2 (threads_per_block);
dim3 blocks2 (ceil(n/threads_per_block));
const int sm_size = sizeof(double) * K * threads_per_block * 2;
cudaMemset(d_probs, 0.0, alphas_size_byte);
cudaDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
compute_probs_unitStrides_sharedMem<<<blocks2, threads2, sm_size, 0>>>(
d_alphas, d_rands, d_probs, n, K, M);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
cudaMemcpy(probs, d_probs, alphas_size_byte, cudaMemcpyDeviceToHost);
s = 0.0;
for (int i = 0; i < alphas_size; i++) s += probs[i];
printf("compute_probs_unitStrides_sharedMem: checksum = %lf\n", s);
// free memory
cudaFree(d_alphas);
cudaFree(d_rands);
cudaFree(d_probs);
free(alphas);
free(rands);
free(t_alphas);
free(t_rands);
free(probs);
return 0;
}
|
a32c8f363756781ea56595987bf6697c985a28ad.hip | // !!! This is a file automatically generated by hipify!!!
//Example 6.2.2. Pg 99. Ray-tracing example
#include "../common/book.h"
#include "hip/hip_runtime.h"
#include "../common/cpu_bitmap.h"
#define rnd(x) (x*rand() / RAND_MAX)
#define SPHERES 20
#define INF 2e10f
#define DIM 1024
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz / sqrtf(radius * radius);
return dz + z;
}
return -INF;
}
};
Sphere *s;
__global__ void kernel(Sphere *s, unsigned char *ptr) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM / 2);
float oy = (y - DIM / 2);
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i < SPHERES; i++) {
float n;
float t = s[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset * 4 + 0] = (int)(r * 255);
ptr[offset * 4 + 1] = (int)(g * 255);
ptr[offset * 4 + 2] = (int)(b * 255);
ptr[offset * 4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
int main(void) {
DataBlock data;
// capture the start time
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
Sphere *s;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap,
bitmap.image_size()));
// allocate memory for the Sphere dataset
HANDLE_ERROR(hipMalloc((void**)&s,
sizeof(Sphere) * SPHERES));
// allocate temp memory, initialize it, copy to
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i<SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
HANDLE_ERROR(hipMemcpy(s, temp_s,
sizeof(Sphere) * SPHERES,
hipMemcpyHostToDevice));
free(temp_s);
// generate a bitmap from our sphere data
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( kernel) , dim3(grids), dim3(threads) , 0, 0, s, dev_bitmap);
// copy our bitmap back from the GPU for display
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost));
// get stop time, and display the timing results
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime,
start, stop));
printf("Time to generate: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
HANDLE_ERROR(hipFree(dev_bitmap));
HANDLE_ERROR(hipFree(s));
// display
bitmap.display_and_exit();
} | a32c8f363756781ea56595987bf6697c985a28ad.cu | //Example 6.2.2. Pg 99. Ray-tracing example
#include "../common/book.h"
#include "cuda.h"
#include "../common/cpu_bitmap.h"
#define rnd(x) (x*rand() / RAND_MAX)
#define SPHERES 20
#define INF 2e10f
#define DIM 1024
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz / sqrtf(radius * radius);
return dz + z;
}
return -INF;
}
};
Sphere *s;
__global__ void kernel(Sphere *s, unsigned char *ptr) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM / 2);
float oy = (y - DIM / 2);
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i < SPHERES; i++) {
float n;
float t = s[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset * 4 + 0] = (int)(r * 255);
ptr[offset * 4 + 1] = (int)(g * 255);
ptr[offset * 4 + 2] = (int)(b * 255);
ptr[offset * 4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
int main(void) {
DataBlock data;
// capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
Sphere *s;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap,
bitmap.image_size()));
// allocate memory for the Sphere dataset
HANDLE_ERROR(cudaMalloc((void**)&s,
sizeof(Sphere) * SPHERES));
// allocate temp memory, initialize it, copy to
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i<SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
HANDLE_ERROR(cudaMemcpy(s, temp_s,
sizeof(Sphere) * SPHERES,
cudaMemcpyHostToDevice));
free(temp_s);
// generate a bitmap from our sphere data
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel <<<grids, threads >>>(s, dev_bitmap);
// copy our bitmap back from the GPU for display
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost));
// get stop time, and display the timing results
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime,
start, stop));
printf("Time to generate: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
HANDLE_ERROR(cudaFree(dev_bitmap));
HANDLE_ERROR(cudaFree(s));
// display
bitmap.display_and_exit();
} |
5a089f836ceeebf0ed1d71629959cf7ddb9cba8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Duane Merrill
* Copyright (c) 2011-2018, NVIDIA CORPORATION
* Copyright (c) 2020 Savely Pototsky (SavaLione)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/******************************************************************************
* Simple demonstration of hipcub::BlockReduce
*
* To compile using the command line:
* nvcc -arch=sm_XX example_block_reduce.cu -I/.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console (define before including cub.h)
#define CUB_STDERR
#include <stdio.h>
#include <iostream>
#include <newcub/block/block_load.cuh>
#include <newcub/block/block_store.cuh>
#include <newcub/block/block_reduce.cuh>
// #include <newcub/test/test_util.h>
#include <test_util.h>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
/// Verbose output
bool g_verbose = false;
/// Timing iterations
int g_timing_iterations = 100;
/// Default grid size
int g_grid_size = 1;
//---------------------------------------------------------------------
// Kernels
//---------------------------------------------------------------------
/**
* Simple kernel for performing a block-wide exclusive prefix sum over integers
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockReduceAlgorithm ALGORITHM>
__global__ void BlockSumKernel(
int *d_in, // Tile of input
int *d_out, // Tile aggregate
clock_t *d_elapsed) // Elapsed cycle count of block reduction
{
// Specialize BlockReduce type for our thread block
typedef BlockReduce<int, BLOCK_THREADS, ALGORITHM> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
// Per-thread tile data
int data[ITEMS_PER_THREAD];
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in, data);
// Start cycle timer
clock_t start = clock();
// Compute sum
int aggregate = BlockReduceT(temp_storage).Sum(data);
// Stop cycle timer
clock_t stop = clock();
// Store aggregate and elapsed clocks
if (threadIdx.x == 0)
{
*d_elapsed = (start > stop) ? start - stop : stop - start;
*d_out = aggregate;
}
}
//---------------------------------------------------------------------
// Host utilities
//---------------------------------------------------------------------
/**
* Initialize reduction problem (and solution).
* Returns the aggregate
*/
int Initialize(int *h_in, int num_items)
{
int inclusive = 0;
for (int i = 0; i < num_items; ++i)
{
h_in[i] = i % 17;
inclusive += h_in[i];
}
return inclusive;
}
/**
* Test thread block reduction
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockReduceAlgorithm ALGORITHM>
void Test()
{
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Allocate host arrays
int *h_in = new int[TILE_SIZE];
int *h_gpu = new int[TILE_SIZE + 1];
// Initialize problem and reference output on host
int h_aggregate = Initialize(h_in, TILE_SIZE);
// Initialize device arrays
int *d_in = NULL;
int *d_out = NULL;
clock_t *d_elapsed = NULL;
hipMalloc((void**)&d_in, sizeof(int) * TILE_SIZE);
hipMalloc((void**)&d_out, sizeof(int) * 1);
hipMalloc((void**)&d_elapsed, sizeof(clock_t));
// Display input problem data
if (g_verbose)
{
printf("Input data: ");
for (int i = 0; i < TILE_SIZE; i++)
printf("%d, ", h_in[i]);
printf("\n\n");
}
// Kernel props
int max_sm_occupancy;
CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS));
// Copy problem to device
hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice);
printf("BlockReduce algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : "BLOCK_REDUCE_WARP_REDUCTIONS",
TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy);
// Run aggregate/prefix kernel
hipLaunchKernelGGL(( BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>), dim3(g_grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
d_elapsed);
// Check total aggregate
printf("\tAggregate: ");
int compare = CompareDeviceResults(&h_aggregate, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Run this several times and average the performance results
GpuTimer timer;
float elapsed_millis = 0.0;
clock_t elapsed_clocks = 0;
for (int i = 0; i < g_timing_iterations; ++i)
{
// Copy problem to device
hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice);
timer.Start();
// Run aggregate/prefix kernel
hipLaunchKernelGGL(( BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>), dim3(g_grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
d_elapsed);
timer.Stop();
elapsed_millis += timer.ElapsedMillis();
// Copy clocks from device
clock_t clocks;
CubDebugExit(hipMemcpy(&clocks, d_elapsed, sizeof(clock_t), hipMemcpyDeviceToHost));
elapsed_clocks += clocks;
}
// Check for kernel errors and STDIO from the kernel, if any
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Display timing results
float avg_millis = elapsed_millis / g_timing_iterations;
float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f;
float avg_clocks = float(elapsed_clocks) / g_timing_iterations;
float avg_clocks_per_item = avg_clocks / TILE_SIZE;
printf("\tAverage BlockReduce::Sum clocks: %.3f\n", avg_clocks);
printf("\tAverage BlockReduce::Sum clocks per item: %.3f\n", avg_clocks_per_item);
printf("\tAverage kernel millis: %.4f\n", avg_millis);
printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec);
// Cleanup
if (h_in) delete[] h_in;
if (h_gpu) delete[] h_gpu;
if (d_in) hipFree(d_in);
if (d_out) hipFree(d_out);
if (d_elapsed) hipFree(d_elapsed);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("grid-size", g_grid_size);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--i=<timing iterations>] "
"[--grid-size=<grid size>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Run tests
Test<1024, 1, BLOCK_REDUCE_RAKING>();
Test<512, 2, BLOCK_REDUCE_RAKING>();
Test<256, 4, BLOCK_REDUCE_RAKING>();
Test<128, 8, BLOCK_REDUCE_RAKING>();
Test<64, 16, BLOCK_REDUCE_RAKING>();
Test<32, 32, BLOCK_REDUCE_RAKING>();
Test<16, 64, BLOCK_REDUCE_RAKING>();
printf("-------------\n");
Test<1024, 1, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<512, 2, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<256, 4, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<128, 8, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<64, 16, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<32, 32, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<16, 64, BLOCK_REDUCE_WARP_REDUCTIONS>();
return 0;
}
| 5a089f836ceeebf0ed1d71629959cf7ddb9cba8b.cu | /*
* Copyright (c) 2011, Duane Merrill
* Copyright (c) 2011-2018, NVIDIA CORPORATION
* Copyright (c) 2020 Savely Pototsky (SavaLione)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/******************************************************************************
* Simple demonstration of cub::BlockReduce
*
* To compile using the command line:
* nvcc -arch=sm_XX example_block_reduce.cu -I/.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console (define before including cub.h)
#define CUB_STDERR
#include <stdio.h>
#include <iostream>
#include <newcub/block/block_load.cuh>
#include <newcub/block/block_store.cuh>
#include <newcub/block/block_reduce.cuh>
// #include <newcub/test/test_util.h>
#include <test_util.h>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
/// Verbose output
bool g_verbose = false;
/// Timing iterations
int g_timing_iterations = 100;
/// Default grid size
int g_grid_size = 1;
//---------------------------------------------------------------------
// Kernels
//---------------------------------------------------------------------
/**
* Simple kernel for performing a block-wide exclusive prefix sum over integers
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockReduceAlgorithm ALGORITHM>
__global__ void BlockSumKernel(
int *d_in, // Tile of input
int *d_out, // Tile aggregate
clock_t *d_elapsed) // Elapsed cycle count of block reduction
{
// Specialize BlockReduce type for our thread block
typedef BlockReduce<int, BLOCK_THREADS, ALGORITHM> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
// Per-thread tile data
int data[ITEMS_PER_THREAD];
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in, data);
// Start cycle timer
clock_t start = clock();
// Compute sum
int aggregate = BlockReduceT(temp_storage).Sum(data);
// Stop cycle timer
clock_t stop = clock();
// Store aggregate and elapsed clocks
if (threadIdx.x == 0)
{
*d_elapsed = (start > stop) ? start - stop : stop - start;
*d_out = aggregate;
}
}
//---------------------------------------------------------------------
// Host utilities
//---------------------------------------------------------------------
/**
* Initialize reduction problem (and solution).
* Returns the aggregate
*/
int Initialize(int *h_in, int num_items)
{
int inclusive = 0;
for (int i = 0; i < num_items; ++i)
{
h_in[i] = i % 17;
inclusive += h_in[i];
}
return inclusive;
}
/**
* Test thread block reduction
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockReduceAlgorithm ALGORITHM>
void Test()
{
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Allocate host arrays
int *h_in = new int[TILE_SIZE];
int *h_gpu = new int[TILE_SIZE + 1];
// Initialize problem and reference output on host
int h_aggregate = Initialize(h_in, TILE_SIZE);
// Initialize device arrays
int *d_in = NULL;
int *d_out = NULL;
clock_t *d_elapsed = NULL;
cudaMalloc((void**)&d_in, sizeof(int) * TILE_SIZE);
cudaMalloc((void**)&d_out, sizeof(int) * 1);
cudaMalloc((void**)&d_elapsed, sizeof(clock_t));
// Display input problem data
if (g_verbose)
{
printf("Input data: ");
for (int i = 0; i < TILE_SIZE; i++)
printf("%d, ", h_in[i]);
printf("\n\n");
}
// Kernel props
int max_sm_occupancy;
CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS));
// Copy problem to device
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
printf("BlockReduce algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : "BLOCK_REDUCE_WARP_REDUCTIONS",
TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy);
// Run aggregate/prefix kernel
BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>(
d_in,
d_out,
d_elapsed);
// Check total aggregate
printf("\tAggregate: ");
int compare = CompareDeviceResults(&h_aggregate, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Run this several times and average the performance results
GpuTimer timer;
float elapsed_millis = 0.0;
clock_t elapsed_clocks = 0;
for (int i = 0; i < g_timing_iterations; ++i)
{
// Copy problem to device
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
timer.Start();
// Run aggregate/prefix kernel
BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>(
d_in,
d_out,
d_elapsed);
timer.Stop();
elapsed_millis += timer.ElapsedMillis();
// Copy clocks from device
clock_t clocks;
CubDebugExit(cudaMemcpy(&clocks, d_elapsed, sizeof(clock_t), cudaMemcpyDeviceToHost));
elapsed_clocks += clocks;
}
// Check for kernel errors and STDIO from the kernel, if any
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Display timing results
float avg_millis = elapsed_millis / g_timing_iterations;
float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f;
float avg_clocks = float(elapsed_clocks) / g_timing_iterations;
float avg_clocks_per_item = avg_clocks / TILE_SIZE;
printf("\tAverage BlockReduce::Sum clocks: %.3f\n", avg_clocks);
printf("\tAverage BlockReduce::Sum clocks per item: %.3f\n", avg_clocks_per_item);
printf("\tAverage kernel millis: %.4f\n", avg_millis);
printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec);
// Cleanup
if (h_in) delete[] h_in;
if (h_gpu) delete[] h_gpu;
if (d_in) cudaFree(d_in);
if (d_out) cudaFree(d_out);
if (d_elapsed) cudaFree(d_elapsed);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("grid-size", g_grid_size);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--i=<timing iterations>] "
"[--grid-size=<grid size>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Run tests
Test<1024, 1, BLOCK_REDUCE_RAKING>();
Test<512, 2, BLOCK_REDUCE_RAKING>();
Test<256, 4, BLOCK_REDUCE_RAKING>();
Test<128, 8, BLOCK_REDUCE_RAKING>();
Test<64, 16, BLOCK_REDUCE_RAKING>();
Test<32, 32, BLOCK_REDUCE_RAKING>();
Test<16, 64, BLOCK_REDUCE_RAKING>();
printf("-------------\n");
Test<1024, 1, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<512, 2, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<256, 4, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<128, 8, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<64, 16, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<32, 32, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<16, 64, BLOCK_REDUCE_WARP_REDUCTIONS>();
return 0;
}
|
2643c5820f3c4f3160a1d53586c7249c014c7623.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define min(x,y) ((x)<(y)?(x):(y))
#define max(x,y) ((x)>(y)?(x):(y))
#define dist(x,y) ((x-y)*(x-y))
#define INF 1e10 //Pseudo Infitinte number for this code
/// Calculate quick lower bound
/// Usually, LB_Kim take time O(m) for finding top,bottom,fist and last.
/// However, because of z-normalization the top and bottom cannot give siginifant benefits.
/// And using the first and last points can be computed in constant time.
/// The prunning power of LB_Kim is non-trivial, especially when the query is not long, say in length 128.
/////////////////////Added to use constant memory///////////////
//extern __constant__ double q[];
//////////////////////////////////////////////////////
///////////////////// Added to include locs ///////////////////
__device__ int lock_Variable = 0; //0 loc open, 1 closed
/////////////////////////////////////
__device__ double lb_kim_hierarchy(double *t, double *q, int j, int len,
double mean, double std, double bsf = INF) {
/// 1 point at front and back
double d, lb;
double x0 = (t[j] - mean) / std;
double y0 = (t[(len - 1 + j)] - mean) / std;
lb = dist(x0,q[0]) + dist(y0,q[len-1]);
if (lb >= bsf)
return lb;
/// 2 points at front
double x1 = (t[(j + 1)] - mean) / std;
d = min(dist(x1,q[0]), dist(x0,q[1]));
d = min(d, dist(x1,q[1]));
lb += d;
if (lb >= bsf)
return lb;
/// 2 points at back
double y1 = (t[(len - 2 + j)] - mean) / std;
d = min(dist(y1,q[len-1]), dist(y0, q[len-2]) );
d = min(d, dist(y1,q[len-2]));
lb += d;
if (lb >= bsf)
return lb;
/// 3 points at front
double x2 = (t[(j + 2)] - mean) / std;
d = min(dist(x0,q[2]), dist(x1, q[2]));
d = min(d, dist(x2,q[2]));
d = min(d, dist(x2,q[1]));
d = min(d, dist(x2,q[0]));
lb += d;
if (lb >= bsf)
return lb;
/// 3 points at back
double y2 = (t[(len - 3 + j)] - mean) / std;
d = min(dist(y0,q[len-3]), dist(y1, q[len-3]));
d = min(d, dist(y2,q[len-3]));
d = min(d, dist(y2,q[len-2]));
d = min(d, dist(y2,q[len-1]));
lb += d;
return lb;
}
__device__ double dtw(double* A, double* B, int m, int r, double* costM,
double* cost_prevM, int bsfindex, double bsf = INF) {
double *cost_tmp;
int i, j, k;
double x, y, z, min_cost;
int start = bsfindex * (2 * r + 1);
double* cost = costM + start;
double*cost_prev = cost_prevM + start;
/// Instead of using matrix of size O(m^2) or O(mr), we will reuse two array of size O(r).
// hipMalloc((void**)&cost, (2*r+1) * sizeof(double));
for (k = 0; k < 2 * r + 1; k++)
cost[k] = INF;
// hipMalloc((void**)&cost_prev, (2*r+1) * sizeof(double));
for (k = 0; k < 2 * r + 1; k++)
cost_prev[k] = INF;
for (i = 0; i < m; i++) {
k = max(0,r-i);
min_cost = INF;
for (j = max(0,i-r); j <= min(m-1,i+r); j++, k++) {
/// Initialize all row and column
if ((i == 0) && (j == 0)) {
cost[k] = dist(A[0],B[0]);
min_cost = cost[k];
continue;
}
if ((j - 1 < 0) || (k - 1 < 0))
y = INF;
else
y = cost[k - 1];
if ((i - 1 < 0) || (k + 1 > 2 * r))
x = INF;
else
x = cost_prev[k + 1];
if ((i - 1 < 0) || (j - 1 < 0))
z = INF;
else
z = cost_prev[k];
/// Classic DTW calculation
cost[k] = min( min( x, y) , z) + dist(A[i],B[j]);
/// Find minimum cost in row for early abandoning (possibly to use column instead of row).
if (cost[k] < min_cost) {
min_cost = cost[k];
}
}
/// We can abandon early if the current cummulative distace with lower bound together are larger than bsf
if (i + r < m - 1 && min_cost >= bsf) {
return min_cost;
}
/// Move current array to previous array.
cost_tmp = cost;
cost = cost_prev;
cost_prev = cost_tmp;
}
k--;
/// the DTW distance is in the last cell in the matrix of size O(m^2) or at the middle of our array.
double final_dtw = cost_prev[k];
return final_dtw;
}
__global__ void processKernel(double* queue, double* buffer, double* cost,
double* cost_prev, double* bsf_a, int* loc_a, double* tM, double* tzM,
int m, int r, double bsf, int size, int EPOCH) {
extern __shared__ double q[];
int shared_index = threadIdx.x;
while(shared_index < m){
q[shared_index] = queue[shared_index];
shared_index += blockDim.x;
}
//printf("Hello");
int N = gridDim.x;
int M = blockDim.x;
int i = blockIdx.x;
int j = threadIdx.x;
int items_per_a = EPOCH / (N * M);
int maxindex = (size - 1) / items_per_a;
double lb_kim;
int bsfindex = i * M + j;
int sindex = bsfindex * items_per_a;
int loc;
int k;
double d;
double *t, *tz;
double ex, ex2, mean, std, dist;
t = tM + bsfindex * 2 * m;
tz = tzM + bsfindex * 2 * m;
/// Initial the cummulative lower bound
ex = 0;
ex2 = 0;
int offset = m;
if (bsfindex == maxindex)
offset = 0;
if (bsfindex <= maxindex)
for (i = 0; i < items_per_a + offset; i++) {
d = (double) buffer[sindex + i];
ex += d;
ex2 += d * d;
t[i % m] = d;
t[(i % m) + m] = d;
/// If there is enough data in t, the DTW distance can be calculated
if (i >= m - 1) {
mean = ex / m;
std = ex2 / m;
std = sqrt(std - mean * mean);
/// compute the start location of the data in the current circular array, t
j = (i + 1) % m;
/// Use a constant lower bound to prune the obvious subsequence
lb_kim = lb_kim_hierarchy(t, q, j, m, mean, std, bsf);
if (lb_kim < bsf)
{
for (k = 0; k < m; k++) {
tz[k] = (t[(k + j)] - mean) / std;
}
dist = dtw(tz, q, m, r, cost, cost_prev, bsfindex, bsf);
////////////////////////////// Implementing locks //////////////////////////////////
///Previous code
//
// if (dist < bsf) { /// Update bsf
// /// loc is the real starting location of the nearest neighbor in the file
// bsf = dist;
// loc = sindex + i;
// }
/////////End of previous code
///////// Implementing loc
if (dist < bsf) {
bool loop = true;
while (loop) {
if (atomicCAS(&lock_Variable, 0, 1)) { //If loc open (loc == 0) then close it (make it equal to 1)
if (dist < bsf) {
bsf = dist;
loc = sindex + i;
}
lock_Variable = 0;
loop = false;
}
}
}
///////////////////////////////////////////////////////////////////////////////////
}
/// Reduce obsolute points from sum and sum square
ex -= t[j];
ex2 -= t[j] * t[j];
}
}
bsf_a[bsfindex] = bsf;
loc_a[bsfindex] = loc;
//Some issue which popped up now .
}
void error(int id) {
if (id == 1)
printf("ERROR : Memory can't be allocated!!!\n\n");
else if (id == 2)
printf("ERROR : File not Found!!!\n\n");
else if (id == 3)
printf("ERROR : Can't create Output File!!!\n\n");
else if (id == 4) {
printf("ERROR : Invalid Number of Arguments!!!\n");
printf(
"Command Usage: UCR_DTW.exe data-file query-file m R\n\n");
printf(
"For example : UCR_DTW.exe data.txt query.txt 128 0.05\n");
}
exit(1);
}
/// Main Function
int main(int argc, char *argv[]) {
FILE *fp; /// data file pointer
FILE *qp; /// query file pointer
double bsf = INF; /// best-so-far
double *h_q; /// data array and query array
clock_t begin, end;
double time_spent;
double d;
long long i;
double ex, ex2, mean, std;
int m = -1, r = -1;
long long loc = 0;
double t1, t2;
int kim = 0, keogh = 0, keogh2 = 0;
double *h_buffer;
int N = 10, M = 100;
int sh= 0;
/// For every EPOCH points, all cummulative values, such as ex (sum), ex2 (sum square), will be restarted for reducing the doubleing point error.
int EPOCH = 1000000;
int epoch; //Optimization
/// If not enough input, display an error.
if (argc <= 3)
error(4);
/// read size of the query
if (argc > 3)
m = atol(argv[3]);
/// read warping windows
if (argc > 4) {
double R = atof(argv[4]);
if (R <= 1)
r = floor(R * m);
else
r = floor(R);
}
if (argc > 7) {
N = atoi(argv[5]);
M = atoi(argv[6]);
EPOCH = atol(argv[7]);
}
// m = 128;
// r = 6;
fp = fopen(argv[1], "r");
// fp = fopen("/home/ubuntu/Desktop/DTW Project/Executable/Data.txt", "r");
// if( fp == NULL )
// error(2);
qp = fopen(argv[2], "r");
// qp = fopen("/home/ubuntu/Desktop/DTW Project/Executable/Query.txt", "r");
// if( qp == NULL )
// error(2);
/// start the clock
t1 = clock();
/// malloc everything here
h_q = (double *) malloc(sizeof(double) * m);
if (h_q == NULL)
error(1);
h_buffer = (double *) malloc(sizeof(double) * (EPOCH));
if (h_buffer == NULL)
error(1);
/// Read query file
bsf = INF;
i = 0;
ex = ex2 = 0;
while (fscanf(qp, "%lf", &d) != EOF && i < m) {
ex += d;
ex2 += d * d;
h_q[i] = d;
i++;
}
fclose(qp);
/// Do z-normalize the query, keep in same array, q
mean = ex / m;
std = ex2 / m;
std = sqrt(std - mean * mean);
for (i = 0; i < m; i++)
h_q[i] = (h_q[i] - mean) / std;
int size = N * M;
double* h_bsf = (double *) malloc(sizeof(double) * size);
int* h_loc = (int *) malloc(sizeof(int) * size);
for (i = 0; i < size; i++) {
h_bsf[i] = INF;
h_loc[i] = 0;
}
//Allocate all the cuda Stuffs
double *d_q;
double *d_buffer, *d_bsf;
double *d_cost, *d_cost_prev;
double *d_t, *d_tz;
int* d_loc;
hipMalloc((void**) &d_buffer, (EPOCH) * sizeof(double));
hipMalloc((void**) &d_cost, (2 * r + 1) * size * sizeof(double));
hipMalloc((void**) &d_cost_prev, (2 * r + 1) * size * sizeof(double));
hipMalloc((void**) &d_bsf, size * sizeof(double));
hipMalloc((void**) &d_t, 2 * m * size * sizeof(double));
hipMalloc((void**) &d_tz, 2 * m * size * sizeof(double));
hipMalloc((void**) &d_q, m * sizeof(double));
hipMalloc((void**) &d_loc, size * sizeof(int));
///Copying BSF array
hipMemcpy(d_bsf, h_bsf, m * sizeof(double), hipMemcpyHostToDevice);
///Copy all the Query related arrays
hipMemcpy(d_q, h_q, m * sizeof(double), hipMemcpyHostToDevice);
bool done = false;
bool last = false;
int it = 0, ep = 0, k = 0;
//begin = clock();
while (!done) {
/// Read first m-1 points
if (it == 0) {
epoch = 100000;
while (ep < epoch) {
if (fscanf(fp, "%lf", &d) == EOF)
break;
h_buffer[ep] = d;
ep++;
}
}
/// Data are read in chunk of size EPOCH.
/// When there is nothing to read, the loop is end.
if (ep <= m - 1) {
done = true;
} else {
if (last) {
done = true;
}
//printf("Reading Done.\n");
sh ++;
//begin = clock();
hipMemcpy(d_buffer, h_buffer, ep * sizeof(double),
hipMemcpyHostToDevice); // to copy from CPU to GPU
hipDeviceSynchronize();
//end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken by memcpy for reading buffer %lf ", time_spent);
/// Just for printing a dot for approximate a million point. Not much accurate.
// printf("Copying done.\n");
//Do everything here
// begin = clock();
hipLaunchKernelGGL(( processKernel), dim3(N), dim3(M),m*sizeof(double), 0, d_q, d_buffer, d_cost, d_cost_prev, d_bsf,
d_loc, d_t, d_tz, m, r, bsf, ep, EPOCH);
// hipDeviceSynchronize();
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken by kernel %lf ", time_spent);
//Do the next set of buffering
// printf("Kernel done.\n");
epoch = EPOCH;
ep = 0;
// begin = clock();
while (ep < epoch) {
if (fscanf(fp, "%lf", &d) == EOF) {
last = true;
break;
}
h_buffer[ep] = d;
ep++;
}
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken for reading %lf ", time_spent);
//printf("Loading next set done\n");
// begin = clock();
hipMemcpy(h_bsf, d_bsf, size * sizeof(double),
hipMemcpyDeviceToHost);
hipMemcpy(h_loc, d_loc, size * sizeof(int),
hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken for memcpy %lf", time_spent);
// printf("computation");
begin = clock();
for (k = 0; k < size; k++) {
if (bsf > h_bsf[k]) {
bsf = h_bsf[k];
if (it == 0) {
loc = (it) * (EPOCH) + h_loc[k] - m + 1;
} else {
loc = 100000 + (it - 1) * (EPOCH) + h_loc[k] - m + 1;
}
}
}
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken for computation %lf \n", time_spent);
// printf("Computation Done.\n");
/// If the size of last chunk is less then EPOCH, then no more data and terminate.
}
it++;
}
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("\nTime taken %lf ", time_spent);
fclose(fp);
free(h_q);
free(h_buffer);
free(h_bsf);
hipFree(d_buffer);
hipFree(d_q);
hipFree(d_bsf);
hipFree(d_loc);
hipFree(d_cost);
hipFree(d_cost_prev);
hipFree(d_t);
hipFree(d_tz);
// t2 = clock();
printf("\n");
/// Note that loc and i are long long.
// cout << "Location : " << loc << endl;
// cout << "Distance : " << sqrt(bsf) << endl;
// cout << "Data Scanned : " << i << endl;
// cout << "Total Execution Time : " << (t2-t1)/CLOCKS_PER_SEC << " sec" << endl;
/// printf is just easier for formating ;)
printf("Distance %lf\n", sqrt(bsf));
printf("Location %d\n", loc);
printf("No of iterations %d\n", sh);
return 0;
}
| 2643c5820f3c4f3160a1d53586c7249c014c7623.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define min(x,y) ((x)<(y)?(x):(y))
#define max(x,y) ((x)>(y)?(x):(y))
#define dist(x,y) ((x-y)*(x-y))
#define INF 1e10 //Pseudo Infitinte number for this code
/// Calculate quick lower bound
/// Usually, LB_Kim take time O(m) for finding top,bottom,fist and last.
/// However, because of z-normalization the top and bottom cannot give siginifant benefits.
/// And using the first and last points can be computed in constant time.
/// The prunning power of LB_Kim is non-trivial, especially when the query is not long, say in length 128.
/////////////////////Added to use constant memory///////////////
//extern __constant__ double q[];
//////////////////////////////////////////////////////
///////////////////// Added to include locs ///////////////////
__device__ int lock_Variable = 0; //0 loc open, 1 closed
/////////////////////////////////////
__device__ double lb_kim_hierarchy(double *t, double *q, int j, int len,
double mean, double std, double bsf = INF) {
/// 1 point at front and back
double d, lb;
double x0 = (t[j] - mean) / std;
double y0 = (t[(len - 1 + j)] - mean) / std;
lb = dist(x0,q[0]) + dist(y0,q[len-1]);
if (lb >= bsf)
return lb;
/// 2 points at front
double x1 = (t[(j + 1)] - mean) / std;
d = min(dist(x1,q[0]), dist(x0,q[1]));
d = min(d, dist(x1,q[1]));
lb += d;
if (lb >= bsf)
return lb;
/// 2 points at back
double y1 = (t[(len - 2 + j)] - mean) / std;
d = min(dist(y1,q[len-1]), dist(y0, q[len-2]) );
d = min(d, dist(y1,q[len-2]));
lb += d;
if (lb >= bsf)
return lb;
/// 3 points at front
double x2 = (t[(j + 2)] - mean) / std;
d = min(dist(x0,q[2]), dist(x1, q[2]));
d = min(d, dist(x2,q[2]));
d = min(d, dist(x2,q[1]));
d = min(d, dist(x2,q[0]));
lb += d;
if (lb >= bsf)
return lb;
/// 3 points at back
double y2 = (t[(len - 3 + j)] - mean) / std;
d = min(dist(y0,q[len-3]), dist(y1, q[len-3]));
d = min(d, dist(y2,q[len-3]));
d = min(d, dist(y2,q[len-2]));
d = min(d, dist(y2,q[len-1]));
lb += d;
return lb;
}
__device__ double dtw(double* A, double* B, int m, int r, double* costM,
double* cost_prevM, int bsfindex, double bsf = INF) {
double *cost_tmp;
int i, j, k;
double x, y, z, min_cost;
int start = bsfindex * (2 * r + 1);
double* cost = costM + start;
double*cost_prev = cost_prevM + start;
/// Instead of using matrix of size O(m^2) or O(mr), we will reuse two array of size O(r).
// cudaMalloc((void**)&cost, (2*r+1) * sizeof(double));
for (k = 0; k < 2 * r + 1; k++)
cost[k] = INF;
// cudaMalloc((void**)&cost_prev, (2*r+1) * sizeof(double));
for (k = 0; k < 2 * r + 1; k++)
cost_prev[k] = INF;
for (i = 0; i < m; i++) {
k = max(0,r-i);
min_cost = INF;
for (j = max(0,i-r); j <= min(m-1,i+r); j++, k++) {
/// Initialize all row and column
if ((i == 0) && (j == 0)) {
cost[k] = dist(A[0],B[0]);
min_cost = cost[k];
continue;
}
if ((j - 1 < 0) || (k - 1 < 0))
y = INF;
else
y = cost[k - 1];
if ((i - 1 < 0) || (k + 1 > 2 * r))
x = INF;
else
x = cost_prev[k + 1];
if ((i - 1 < 0) || (j - 1 < 0))
z = INF;
else
z = cost_prev[k];
/// Classic DTW calculation
cost[k] = min( min( x, y) , z) + dist(A[i],B[j]);
/// Find minimum cost in row for early abandoning (possibly to use column instead of row).
if (cost[k] < min_cost) {
min_cost = cost[k];
}
}
/// We can abandon early if the current cummulative distace with lower bound together are larger than bsf
if (i + r < m - 1 && min_cost >= bsf) {
return min_cost;
}
/// Move current array to previous array.
cost_tmp = cost;
cost = cost_prev;
cost_prev = cost_tmp;
}
k--;
/// the DTW distance is in the last cell in the matrix of size O(m^2) or at the middle of our array.
double final_dtw = cost_prev[k];
return final_dtw;
}
__global__ void processKernel(double* queue, double* buffer, double* cost,
double* cost_prev, double* bsf_a, int* loc_a, double* tM, double* tzM,
int m, int r, double bsf, int size, int EPOCH) {
extern __shared__ double q[];
int shared_index = threadIdx.x;
while(shared_index < m){
q[shared_index] = queue[shared_index];
shared_index += blockDim.x;
}
//printf("Hello");
int N = gridDim.x;
int M = blockDim.x;
int i = blockIdx.x;
int j = threadIdx.x;
int items_per_a = EPOCH / (N * M);
int maxindex = (size - 1) / items_per_a;
double lb_kim;
int bsfindex = i * M + j;
int sindex = bsfindex * items_per_a;
int loc;
int k;
double d;
double *t, *tz;
double ex, ex2, mean, std, dist;
t = tM + bsfindex * 2 * m;
tz = tzM + bsfindex * 2 * m;
/// Initial the cummulative lower bound
ex = 0;
ex2 = 0;
int offset = m;
if (bsfindex == maxindex)
offset = 0;
if (bsfindex <= maxindex)
for (i = 0; i < items_per_a + offset; i++) {
d = (double) buffer[sindex + i];
ex += d;
ex2 += d * d;
t[i % m] = d;
t[(i % m) + m] = d;
/// If there is enough data in t, the DTW distance can be calculated
if (i >= m - 1) {
mean = ex / m;
std = ex2 / m;
std = sqrt(std - mean * mean);
/// compute the start location of the data in the current circular array, t
j = (i + 1) % m;
/// Use a constant lower bound to prune the obvious subsequence
lb_kim = lb_kim_hierarchy(t, q, j, m, mean, std, bsf);
if (lb_kim < bsf)
{
for (k = 0; k < m; k++) {
tz[k] = (t[(k + j)] - mean) / std;
}
dist = dtw(tz, q, m, r, cost, cost_prev, bsfindex, bsf);
////////////////////////////// Implementing locks //////////////////////////////////
///Previous code
//
// if (dist < bsf) { /// Update bsf
// /// loc is the real starting location of the nearest neighbor in the file
// bsf = dist;
// loc = sindex + i;
// }
/////////End of previous code
///////// Implementing loc
if (dist < bsf) {
bool loop = true;
while (loop) {
if (atomicCAS(&lock_Variable, 0, 1)) { //If loc open (loc == 0) then close it (make it equal to 1)
if (dist < bsf) {
bsf = dist;
loc = sindex + i;
}
lock_Variable = 0;
loop = false;
}
}
}
///////////////////////////////////////////////////////////////////////////////////
}
/// Reduce obsolute points from sum and sum square
ex -= t[j];
ex2 -= t[j] * t[j];
}
}
bsf_a[bsfindex] = bsf;
loc_a[bsfindex] = loc;
//Some issue which popped up now .
}
void error(int id) {
if (id == 1)
printf("ERROR : Memory can't be allocated!!!\n\n");
else if (id == 2)
printf("ERROR : File not Found!!!\n\n");
else if (id == 3)
printf("ERROR : Can't create Output File!!!\n\n");
else if (id == 4) {
printf("ERROR : Invalid Number of Arguments!!!\n");
printf(
"Command Usage: UCR_DTW.exe data-file query-file m R\n\n");
printf(
"For example : UCR_DTW.exe data.txt query.txt 128 0.05\n");
}
exit(1);
}
/// Main Function
int main(int argc, char *argv[]) {
FILE *fp; /// data file pointer
FILE *qp; /// query file pointer
double bsf = INF; /// best-so-far
double *h_q; /// data array and query array
clock_t begin, end;
double time_spent;
double d;
long long i;
double ex, ex2, mean, std;
int m = -1, r = -1;
long long loc = 0;
double t1, t2;
int kim = 0, keogh = 0, keogh2 = 0;
double *h_buffer;
int N = 10, M = 100;
int sh= 0;
/// For every EPOCH points, all cummulative values, such as ex (sum), ex2 (sum square), will be restarted for reducing the doubleing point error.
int EPOCH = 1000000;
int epoch; //Optimization
/// If not enough input, display an error.
if (argc <= 3)
error(4);
/// read size of the query
if (argc > 3)
m = atol(argv[3]);
/// read warping windows
if (argc > 4) {
double R = atof(argv[4]);
if (R <= 1)
r = floor(R * m);
else
r = floor(R);
}
if (argc > 7) {
N = atoi(argv[5]);
M = atoi(argv[6]);
EPOCH = atol(argv[7]);
}
// m = 128;
// r = 6;
fp = fopen(argv[1], "r");
// fp = fopen("/home/ubuntu/Desktop/DTW Project/Executable/Data.txt", "r");
// if( fp == NULL )
// error(2);
qp = fopen(argv[2], "r");
// qp = fopen("/home/ubuntu/Desktop/DTW Project/Executable/Query.txt", "r");
// if( qp == NULL )
// error(2);
/// start the clock
t1 = clock();
/// malloc everything here
h_q = (double *) malloc(sizeof(double) * m);
if (h_q == NULL)
error(1);
h_buffer = (double *) malloc(sizeof(double) * (EPOCH));
if (h_buffer == NULL)
error(1);
/// Read query file
bsf = INF;
i = 0;
ex = ex2 = 0;
while (fscanf(qp, "%lf", &d) != EOF && i < m) {
ex += d;
ex2 += d * d;
h_q[i] = d;
i++;
}
fclose(qp);
/// Do z-normalize the query, keep in same array, q
mean = ex / m;
std = ex2 / m;
std = sqrt(std - mean * mean);
for (i = 0; i < m; i++)
h_q[i] = (h_q[i] - mean) / std;
int size = N * M;
double* h_bsf = (double *) malloc(sizeof(double) * size);
int* h_loc = (int *) malloc(sizeof(int) * size);
for (i = 0; i < size; i++) {
h_bsf[i] = INF;
h_loc[i] = 0;
}
//Allocate all the cuda Stuffs
double *d_q;
double *d_buffer, *d_bsf;
double *d_cost, *d_cost_prev;
double *d_t, *d_tz;
int* d_loc;
cudaMalloc((void**) &d_buffer, (EPOCH) * sizeof(double));
cudaMalloc((void**) &d_cost, (2 * r + 1) * size * sizeof(double));
cudaMalloc((void**) &d_cost_prev, (2 * r + 1) * size * sizeof(double));
cudaMalloc((void**) &d_bsf, size * sizeof(double));
cudaMalloc((void**) &d_t, 2 * m * size * sizeof(double));
cudaMalloc((void**) &d_tz, 2 * m * size * sizeof(double));
cudaMalloc((void**) &d_q, m * sizeof(double));
cudaMalloc((void**) &d_loc, size * sizeof(int));
///Copying BSF array
cudaMemcpy(d_bsf, h_bsf, m * sizeof(double), cudaMemcpyHostToDevice);
///Copy all the Query related arrays
cudaMemcpy(d_q, h_q, m * sizeof(double), cudaMemcpyHostToDevice);
bool done = false;
bool last = false;
int it = 0, ep = 0, k = 0;
//begin = clock();
while (!done) {
/// Read first m-1 points
if (it == 0) {
epoch = 100000;
while (ep < epoch) {
if (fscanf(fp, "%lf", &d) == EOF)
break;
h_buffer[ep] = d;
ep++;
}
}
/// Data are read in chunk of size EPOCH.
/// When there is nothing to read, the loop is end.
if (ep <= m - 1) {
done = true;
} else {
if (last) {
done = true;
}
//printf("Reading Done.\n");
sh ++;
//begin = clock();
cudaMemcpy(d_buffer, h_buffer, ep * sizeof(double),
cudaMemcpyHostToDevice); // to copy from CPU to GPU
cudaDeviceSynchronize();
//end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken by memcpy for reading buffer %lf ", time_spent);
/// Just for printing a dot for approximate a million point. Not much accurate.
// printf("Copying done.\n");
//Do everything here
// begin = clock();
processKernel<<<N, M,m*sizeof(double)>>>(d_q, d_buffer, d_cost, d_cost_prev, d_bsf,
d_loc, d_t, d_tz, m, r, bsf, ep, EPOCH);
// cudaDeviceSynchronize();
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken by kernel %lf ", time_spent);
//Do the next set of buffering
// printf("Kernel done.\n");
epoch = EPOCH;
ep = 0;
// begin = clock();
while (ep < epoch) {
if (fscanf(fp, "%lf", &d) == EOF) {
last = true;
break;
}
h_buffer[ep] = d;
ep++;
}
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken for reading %lf ", time_spent);
//printf("Loading next set done\n");
// begin = clock();
cudaMemcpy(h_bsf, d_bsf, size * sizeof(double),
cudaMemcpyDeviceToHost);
cudaMemcpy(h_loc, d_loc, size * sizeof(int),
cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken for memcpy %lf", time_spent);
// printf("computation");
begin = clock();
for (k = 0; k < size; k++) {
if (bsf > h_bsf[k]) {
bsf = h_bsf[k];
if (it == 0) {
loc = (it) * (EPOCH) + h_loc[k] - m + 1;
} else {
loc = 100000 + (it - 1) * (EPOCH) + h_loc[k] - m + 1;
}
}
}
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("Time taken for computation %lf \n", time_spent);
// printf("Computation Done.\n");
/// If the size of last chunk is less then EPOCH, then no more data and terminate.
}
it++;
}
// end = clock();
// time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
// printf("\nTime taken %lf ", time_spent);
fclose(fp);
free(h_q);
free(h_buffer);
free(h_bsf);
cudaFree(d_buffer);
cudaFree(d_q);
cudaFree(d_bsf);
cudaFree(d_loc);
cudaFree(d_cost);
cudaFree(d_cost_prev);
cudaFree(d_t);
cudaFree(d_tz);
// t2 = clock();
printf("\n");
/// Note that loc and i are long long.
// cout << "Location : " << loc << endl;
// cout << "Distance : " << sqrt(bsf) << endl;
// cout << "Data Scanned : " << i << endl;
// cout << "Total Execution Time : " << (t2-t1)/CLOCKS_PER_SEC << " sec" << endl;
/// printf is just easier for formating ;)
printf("Distance %lf\n", sqrt(bsf));
printf("Location %d\n", loc);
printf("No of iterations %d\n", sh);
return 0;
}
|
5630a81fef4c65cb3f6aa34032218f4d3ee93563.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fake_learned_scale_quant_perchannel_impl.cuh"
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include <thrust/pair.h>
#include <algorithm>
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
__global__ void FakeLearnedScaleQuantPerChannel(float *output, const int size, float *input_alpha,
float *input_quant, const int channel_num) {
int channel_idx = 0;
int per_channel_num = size / channel_num;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
channel_idx = floor(static_cast<double>(i) / static_cast<double>(per_channel_num));
// dequantize
output[i] = input_quant[i] * input_alpha[channel_idx];
}
return;
}
__global__ void FakeLearnedScaleQuantPerChannelGrad(float *grad_input, float *grad_alpha, const float *gradient,
const int size, const float *input_div_alpha,
const float *input_quant, const bool neg_trunc,
const int channel_num) {
int channel_idx = 0;
int per_channel_num = size / channel_num;
float lower_bound = -1.0 * !neg_trunc;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
float grad_alpha_temp = 0.f;
channel_idx = floor(static_cast<double>(i) / static_cast<double>(per_channel_num));
if (input_div_alpha[i] > 1.0) {
grad_alpha_temp = gradient[i];
grad_input[i] = 0;
} else if (input_div_alpha[i] < lower_bound) {
grad_alpha_temp = -gradient[i];
grad_input[i] = 0;
} else {
grad_input[i] = gradient[i];
grad_alpha_temp = (gradient[i] * (input_quant[i] - input_div_alpha[i]));
}
MsAtomicAdd(grad_alpha + channel_idx, grad_alpha_temp);
}
return;
}
__global__ void LSQNudgePerChannel(const float *input, const int size, float *input_alpha, float *input_quant_max,
float *input_div_alpha, float *input_quant, const bool neg_trunc,
const int channel_num) {
float input_x;
int channel_idx = 0;
int per_channel_num = size / channel_num;
float lower_bound = -1.0 * !neg_trunc;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
channel_idx = floor(static_cast<double>(i) / static_cast<double>(per_channel_num));
input_x = input[i] / input_alpha[channel_idx];
input_div_alpha[i] = input_x;
input_x = max(input_x, lower_bound);
input_x = min(input_x, 1.0);
// quantize
input_quant[i] = floor(input_x * input_quant_max[0] + 0.5f) / input_quant_max[0];
}
return;
}
void CalFakeLearnedScaleQuantPerChannel(float *output, const int size, float *input_alpha, float *input_quant,
const int channel_num, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( FakeLearnedScaleQuantPerChannel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, output, size, input_alpha,
input_quant, channel_num);
return;
}
void CalFakeLearnedScaleQuantPerChannelGrad(float *grad_input, float *grad_alpha, const float *gradient, const int size,
const float *input_div_alpha, const float *input_quant,
const bool neg_trunc, const int channel_num, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( FakeLearnedScaleQuantPerChannelGrad), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, grad_input,
grad_alpha,
gradient,
size,
input_div_alpha,
input_quant,
neg_trunc,
channel_num);
return;
}
void CalLSQNudgePerChannel(const float *input, const int size, float *input_alpha, float *input_quant_max,
float *input_div_alpha, float *input_quant, const bool neg_trunc, const int channel_num,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( LSQNudgePerChannel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, input, size, input_alpha, input_quant_max,
input_div_alpha, input_quant, neg_trunc,
channel_num);
return;
}
| 5630a81fef4c65cb3f6aa34032218f4d3ee93563.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fake_learned_scale_quant_perchannel_impl.cuh"
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include <thrust/pair.h>
#include <algorithm>
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
__global__ void FakeLearnedScaleQuantPerChannel(float *output, const int size, float *input_alpha,
float *input_quant, const int channel_num) {
int channel_idx = 0;
int per_channel_num = size / channel_num;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
channel_idx = floor(static_cast<double>(i) / static_cast<double>(per_channel_num));
// dequantize
output[i] = input_quant[i] * input_alpha[channel_idx];
}
return;
}
__global__ void FakeLearnedScaleQuantPerChannelGrad(float *grad_input, float *grad_alpha, const float *gradient,
const int size, const float *input_div_alpha,
const float *input_quant, const bool neg_trunc,
const int channel_num) {
int channel_idx = 0;
int per_channel_num = size / channel_num;
float lower_bound = -1.0 * !neg_trunc;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
float grad_alpha_temp = 0.f;
channel_idx = floor(static_cast<double>(i) / static_cast<double>(per_channel_num));
if (input_div_alpha[i] > 1.0) {
grad_alpha_temp = gradient[i];
grad_input[i] = 0;
} else if (input_div_alpha[i] < lower_bound) {
grad_alpha_temp = -gradient[i];
grad_input[i] = 0;
} else {
grad_input[i] = gradient[i];
grad_alpha_temp = (gradient[i] * (input_quant[i] - input_div_alpha[i]));
}
MsAtomicAdd(grad_alpha + channel_idx, grad_alpha_temp);
}
return;
}
__global__ void LSQNudgePerChannel(const float *input, const int size, float *input_alpha, float *input_quant_max,
float *input_div_alpha, float *input_quant, const bool neg_trunc,
const int channel_num) {
float input_x;
int channel_idx = 0;
int per_channel_num = size / channel_num;
float lower_bound = -1.0 * !neg_trunc;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
channel_idx = floor(static_cast<double>(i) / static_cast<double>(per_channel_num));
input_x = input[i] / input_alpha[channel_idx];
input_div_alpha[i] = input_x;
input_x = max(input_x, lower_bound);
input_x = min(input_x, 1.0);
// quantize
input_quant[i] = floor(input_x * input_quant_max[0] + 0.5f) / input_quant_max[0];
}
return;
}
void CalFakeLearnedScaleQuantPerChannel(float *output, const int size, float *input_alpha, float *input_quant,
const int channel_num, cudaStream_t cuda_stream) {
FakeLearnedScaleQuantPerChannel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(output, size, input_alpha,
input_quant, channel_num);
return;
}
void CalFakeLearnedScaleQuantPerChannelGrad(float *grad_input, float *grad_alpha, const float *gradient, const int size,
const float *input_div_alpha, const float *input_quant,
const bool neg_trunc, const int channel_num, cudaStream_t cuda_stream) {
FakeLearnedScaleQuantPerChannelGrad<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(grad_input,
grad_alpha,
gradient,
size,
input_div_alpha,
input_quant,
neg_trunc,
channel_num);
return;
}
void CalLSQNudgePerChannel(const float *input, const int size, float *input_alpha, float *input_quant_max,
float *input_div_alpha, float *input_quant, const bool neg_trunc, const int channel_num,
cudaStream_t cuda_stream) {
LSQNudgePerChannel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(input, size, input_alpha, input_quant_max,
input_div_alpha, input_quant, neg_trunc,
channel_num);
return;
}
|
e469f7424741681ddd2255353e58b99b500aad76.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <memory>
#define NUM_THREADS 10000
#define SIZE 10
#define BLOCK_WIDTH 100
__global__ void gpu_increment_atomic(int* d_a) {
//
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//10
tid = tid % SIZE;
//d_a[tid] += 1;
atomicAdd(&d_a[tid], 1);
/*atomicAdd+=2
--
*/
}
int main(void) {
printf("%d total threads in %d blocks writing into %d array elements\n", NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, SIZE);
//
int h_a[SIZE];
const int ARRAY_BYTES = SIZE * sizeof(int);
//GPU
int* d_a;
hipMalloc((void**)&d_a, ARRAY_BYTES);
//GPU0
hipMemset((void*)d_a, 0, ARRAY_BYTES);
gpu_increment_atomic << <NUM_THREADS / BLOCK_WIDTH, BLOCK_WIDTH >> > (d_a);
//GPU
hipMemcpy(h_a, d_a, ARRAY_BYTES, hipMemcpyDeviceToHost);
printf("Number of times a particular Array index has been incremented is:\n");
for (int i = 0; i < SIZE; i++)
{
printf("index:%d --> %d times\n", i, h_a[i]);
}
hipFree(d_a);
return 0;
}
| e469f7424741681ddd2255353e58b99b500aad76.cu | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <memory>
#define NUM_THREADS 10000
#define SIZE 10
#define BLOCK_WIDTH 100
__global__ void gpu_increment_atomic(int* d_a) {
//计算线程的索引
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//在10个元素中每个线程增加
tid = tid % SIZE;
//d_a[tid] += 1;
atomicAdd(&d_a[tid], 1);
/*atomicAdd原子操作函数替换了之前的直接+=操作,该函数具有2个参数:第一个参数是我们要进行原子加法操作
的内存区域;第二个参数是该原子加法操作具体要加上的值。
该函数会从逻辑上保证,每个调用它的线程对相同的内存区域上的“读取旧值-累加-回写新值”操作是不可被
其他线程扰乱的原子性的整体完成的。*/
}
int main(void) {
printf("%d total threads in %d blocks writing into %d array elements\n", NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, SIZE);
//声明和分配主机内存
int h_a[SIZE];
const int ARRAY_BYTES = SIZE * sizeof(int);
//声明和分配GPU内存
int* d_a;
cudaMalloc((void**)&d_a, ARRAY_BYTES);
//初始化GPU内存,默认值为0
cudaMemset((void*)d_a, 0, ARRAY_BYTES);
gpu_increment_atomic << <NUM_THREADS / BLOCK_WIDTH, BLOCK_WIDTH >> > (d_a);
//从GPU复制回主机并且打印出来
cudaMemcpy(h_a, d_a, ARRAY_BYTES, cudaMemcpyDeviceToHost);
printf("Number of times a particular Array index has been incremented is:\n");
for (int i = 0; i < SIZE; i++)
{
printf("index:%d --> %d times\n", i, h_a[i]);
}
cudaFree(d_a);
return 0;
}
|
d038bbe74a13f351a6874530284ac7cf5a545ae7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<iostream>
#include<stdlib.h>
#include<getopt.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <chrono>
#define RAND_RANGE_MIN -10.0
#define RAND_RANGE_MAX 10.0
#define SEED 123
#define JACOBI_DEBUG 0
enum ERROR_TYPE { MEMCPY, CMALLOC, ADDK, DEVSYNC };
void init_const(float* v, float x, int dim) {
for (int i = 0; i < dim; i++)
v[i] = x;
return;
}
void init_matrix(float **A, int matrix_order) {
for (int i = 0; i < matrix_order; i++) {
A[i] = new float[matrix_order];
if (A[i] == NULL) {
std::cerr << "Error while allocating resources." << std::endl;
exit(-1);
}
}
}
/** Generates a random number in a specific range.
@param fMin The lower bound of the range.
@param fMax The upper bound of the range.
@return The generated number.
*/
float generate_random_number(float fMin, float fMax) {
float f = (float)rand() / RAND_MAX;
return fMin + f * (fMax - fMin);
}
/** Generates a random square matrix.
@param A The matrix.
*/
void generate_random_matrix(float **A, int matrix_order) {
for (int i = 0; i < matrix_order; i++) {
float sum = 0.0;
for (int j = 0; j < matrix_order; j++)
if (j != i) {
float val = generate_random_number(RAND_RANGE_MIN, RAND_RANGE_MAX);
sum += abs(val);
A[i][j] = val;
}
/* Change back A[i][i] to be > then sum(A[i][j]) */
A[i][i] = sum + generate_random_number(1.0, RAND_RANGE_MAX);
}
}
/** Generates a random vector.
@param v .
*/
void generate_random_vector(float *v, int matrix_order) {
/* generate vector v */
for (int j = 0; j < matrix_order; j++) {
float val = generate_random_number(RAND_RANGE_MIN, RAND_RANGE_MAX);
v[j] = val;
}
}
/** Generate a random number in a specific range.
@param A The square matrix.
@param v The vector.
@param start .
@param end .
*/
void matrix_vector_multiplication(float *x, float **A, float *v, int matrix_order) {
for (int i = 0; i < matrix_order; i++) {
x[i] = 0;
for (int j = 0; j < matrix_order; j++)
x[i] += A[i][j] * v[j];
}
return;
}
void error_on_computation(float* x, float ** A, float *b, int matrix_order, float *err) {
float error = 0.0, sum = 0.0;
for (size_t i = 0; i < matrix_order; i++) {
sum = 0.0;
for (size_t j = 0; j < matrix_order; j++) {
sum = sum + A[i][j] * x[j];
}
error = error + abs(sum - b[i]);
}
*err = error / matrix_order;
return;
}
std::chrono::duration<double> delta_time(std::chrono::time_point<std::chrono::system_clock> start, std::chrono::time_point<std::chrono::system_clock> end) {
return end - start;
}
hipError_t error_check(hipError_t cudaStatus, ERROR_TYPE msgtype, float*dev_a, float*dev_x_solution, float*dev_b, float*dev_prec_values){
if (cudaStatus != hipSuccess) {
switch(msgtype) {
case (CMALLOC):{
std::cerr << "hipMalloc failed!" << std::endl;
}
case (MEMCPY):{
std::cerr << "hipMemcpy failed!" << std::endl;
}
case (ADDK):{
std::cerr << "addKernel launch failed:" << hipGetErrorString(cudaStatus) << std::endl;
}
case(DEVSYNC):{
std::cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching jacobi!" << std::endl;
}
}
hipFree(dev_a);
hipFree(dev_x_solution);
hipFree(dev_prec_values);
hipFree(dev_b);
return cudaStatus;
}
}
__global__ void iteration(float * a, float * x_solution, float * b, float * prec_values, unsigned int matrix_order) {
unsigned int j, i;
float sigma = 0.0, newValue = 0.0;
int bx = blockIdx.x, tx = threadIdx.x;
i = tx + bx*blockDim.x;
if (i >= matrix_order) return;
if (i < matrix_order){
sigma = b[i];
int idx_Ai = i*matrix_order;
for (j = 0; j < matrix_order; j++) {
if (i != j) { sigma = sigma - a[idx_Ai + j] * x_solution[j]; }
}
newValue = sigma / a[idx_Ai + i];
prec_values[i] = (x_solution[i] - newValue)*(x_solution[i] - newValue);
x_solution[i] = newValue;
__syncthreads();
}
}
hipError_t cuda_jacobi_solve(float * a, float * x_solution, float * b, float eps, unsigned int matrix_order, int * max_iter, float *prec) {
unsigned int i, j;
int k = 0, nTiles;
float *dev_a = 0, *dev_x_solution = 0, *dev_b = 0, *dev_prec_values = 0;
float accur = 1.0, sum = 0.0;
float *prec_values = new float[matrix_order];
init_const(prec_values, 0.0, matrix_order);
size_t matrix_size = matrix_order*matrix_order*sizeof(float);
size_t vector_size = matrix_order*sizeof(float);
hipError_t cudaStatus;
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
cudaStatus = hipMalloc((void**)&dev_a, matrix_size);
error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = hipMalloc((void**)&dev_x_solution, vector_size);
error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = hipMalloc((void**)&dev_b, vector_size);
error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = hipMalloc((void**)&dev_prec_values, vector_size);
error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = hipMemcpy(dev_a, a, matrix_size, hipMemcpyHostToDevice);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = hipMemcpy(dev_x_solution, x_solution, vector_size, hipMemcpyHostToDevice);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = hipMemcpy(dev_b, b, vector_size, hipMemcpyHostToDevice);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = hipMemcpy(dev_prec_values, prec_values, vector_size, hipMemcpyHostToDevice);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
int tileSize = 16;
nTiles = matrix_order/tileSize + (matrix_order%tileSize == 0?0:1);
for (i = 0; i < *max_iter; i++) {
hipLaunchKernelGGL(( iteration) , dim3(nTiles),dim3(tileSize), 0, 0, dev_a, dev_x_solution, dev_b, dev_prec_values, matrix_order);
k++;
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
error_check(cudaStatus, ADDK, dev_a, dev_x_solution, dev_b, dev_prec_values);
// hipDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
error_check(cudaStatus, DEVSYNC, dev_a, dev_x_solution, dev_b, dev_prec_values);
// Retreive the dev_prec_values vector with all the precision values
cudaStatus = hipMemcpy(prec_values, dev_prec_values, vector_size, hipMemcpyDeviceToHost);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
// Computes the precision
sum = 0.0;
for (j = 0; j < matrix_order; j++) {
sum = sum + fabs(prec_values[j]);
}
accur = sqrt(sum);
if (accur <= eps) break;
}
*max_iter = k;
*prec = accur;
cudaStatus = hipMemcpy(x_solution, dev_x_solution, vector_size, hipMemcpyDeviceToHost);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
hipFree(dev_a);
hipFree(dev_x_solution);
hipFree(dev_prec_values);
hipFree(dev_b);
}
int main(int argc, char *argv[]){
const int matrix_order = atoi(argv[1]); // order of the matrix
int max_iter = atoi(argv[2]); // number of max_iterations
const float epsilon = atof(argv[3]); // precision
int iterations = max_iter;
std::chrono::time_point<std::chrono::system_clock> start_time, end_time;
float *x_solution_h, *b_h, **A_h, *rand_x_h;
float *extended_a = 0;
// Allocate memory for CPU.
A_h = new float *[matrix_order];
b_h = new float[matrix_order];
x_solution_h = new float[matrix_order];
rand_x_h = new float[matrix_order];
if (A_h == NULL || b_h == NULL || rand_x_h == NULL || x_solution_h == NULL) {
std::cerr << "Error while allocating resources." << std::endl;
exit(-1);
}
init_matrix(A_h, matrix_order);
srand(SEED);
generate_random_matrix(A_h, matrix_order);
extended_a = (float*)malloc(matrix_order*matrix_order*sizeof(float));
for (int i = 0; i < matrix_order; i++) {
for (int j = 0; j < matrix_order; j++) {
extended_a[i*matrix_order + j] = A_h[i][j];
}
}
generate_random_vector(rand_x_h, matrix_order);
int repetitions = 20;
float precision = 1.0, err = 0.0;
matrix_vector_multiplication(b_h, A_h, rand_x_h, matrix_order);
for (int m=0; m<repetitions; m++) {
init_const(x_solution_h, 0.0, matrix_order);
iterations = max_iter;
err=0.0;
start_time = std::chrono::system_clock::now();
cuda_jacobi_solve(extended_a, x_solution_h, b_h, epsilon, matrix_order, &iterations, &precision);
end_time = std::chrono::system_clock::now();
std::cout << delta_time(start_time, end_time).count() << "\t" ;
error_on_computation(x_solution_h, A_h, b_h, matrix_order, &err);
}
std::cout << "\t" << iterations << "\t" << precision << "\t" << err << std::endl;
// Release resources
for (int i = 0; i < matrix_order; i++)
delete[] A_h[i];
delete[] A_h;
delete[] b_h;
delete[] rand_x_h;
delete[] x_solution_h;
free(extended_a);
return 0;
} | d038bbe74a13f351a6874530284ac7cf5a545ae7.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<iostream>
#include<stdlib.h>
#include<getopt.h>
#include <assert.h>
#include <cuda.h>
#include <chrono>
#define RAND_RANGE_MIN -10.0
#define RAND_RANGE_MAX 10.0
#define SEED 123
#define JACOBI_DEBUG 0
enum ERROR_TYPE { MEMCPY, CMALLOC, ADDK, DEVSYNC };
void init_const(float* v, float x, int dim) {
for (int i = 0; i < dim; i++)
v[i] = x;
return;
}
void init_matrix(float **A, int matrix_order) {
for (int i = 0; i < matrix_order; i++) {
A[i] = new float[matrix_order];
if (A[i] == NULL) {
std::cerr << "Error while allocating resources." << std::endl;
exit(-1);
}
}
}
/** Generates a random number in a specific range.
@param fMin The lower bound of the range.
@param fMax The upper bound of the range.
@return The generated number.
*/
float generate_random_number(float fMin, float fMax) {
float f = (float)rand() / RAND_MAX;
return fMin + f * (fMax - fMin);
}
/** Generates a random square matrix.
@param A The matrix.
*/
void generate_random_matrix(float **A, int matrix_order) {
for (int i = 0; i < matrix_order; i++) {
float sum = 0.0;
for (int j = 0; j < matrix_order; j++)
if (j != i) {
float val = generate_random_number(RAND_RANGE_MIN, RAND_RANGE_MAX);
sum += abs(val);
A[i][j] = val;
}
/* Change back A[i][i] to be > then sum(A[i][j]) */
A[i][i] = sum + generate_random_number(1.0, RAND_RANGE_MAX);
}
}
/** Generates a random vector.
@param v .
*/
void generate_random_vector(float *v, int matrix_order) {
/* generate vector v */
for (int j = 0; j < matrix_order; j++) {
float val = generate_random_number(RAND_RANGE_MIN, RAND_RANGE_MAX);
v[j] = val;
}
}
/** Generate a random number in a specific range.
@param A The square matrix.
@param v The vector.
@param start .
@param end .
*/
void matrix_vector_multiplication(float *x, float **A, float *v, int matrix_order) {
for (int i = 0; i < matrix_order; i++) {
x[i] = 0;
for (int j = 0; j < matrix_order; j++)
x[i] += A[i][j] * v[j];
}
return;
}
void error_on_computation(float* x, float ** A, float *b, int matrix_order, float *err) {
float error = 0.0, sum = 0.0;
for (size_t i = 0; i < matrix_order; i++) {
sum = 0.0;
for (size_t j = 0; j < matrix_order; j++) {
sum = sum + A[i][j] * x[j];
}
error = error + abs(sum - b[i]);
}
*err = error / matrix_order;
return;
}
std::chrono::duration<double> delta_time(std::chrono::time_point<std::chrono::system_clock> start, std::chrono::time_point<std::chrono::system_clock> end) {
return end - start;
}
cudaError_t error_check(cudaError_t cudaStatus, ERROR_TYPE msgtype, float*dev_a, float*dev_x_solution, float*dev_b, float*dev_prec_values){
if (cudaStatus != cudaSuccess) {
switch(msgtype) {
case (CMALLOC):{
std::cerr << "cudaMalloc failed!" << std::endl;
}
case (MEMCPY):{
std::cerr << "cudaMemcpy failed!" << std::endl;
}
case (ADDK):{
std::cerr << "addKernel launch failed:" << cudaGetErrorString(cudaStatus) << std::endl;
}
case(DEVSYNC):{
std::cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching jacobi!" << std::endl;
}
}
cudaFree(dev_a);
cudaFree(dev_x_solution);
cudaFree(dev_prec_values);
cudaFree(dev_b);
return cudaStatus;
}
}
__global__ void iteration(float * a, float * x_solution, float * b, float * prec_values, unsigned int matrix_order) {
unsigned int j, i;
float sigma = 0.0, newValue = 0.0;
int bx = blockIdx.x, tx = threadIdx.x;
i = tx + bx*blockDim.x;
if (i >= matrix_order) return;
if (i < matrix_order){
sigma = b[i];
int idx_Ai = i*matrix_order;
for (j = 0; j < matrix_order; j++) {
if (i != j) { sigma = sigma - a[idx_Ai + j] * x_solution[j]; }
}
newValue = sigma / a[idx_Ai + i];
prec_values[i] = (x_solution[i] - newValue)*(x_solution[i] - newValue);
x_solution[i] = newValue;
__syncthreads();
}
}
cudaError_t cuda_jacobi_solve(float * a, float * x_solution, float * b, float eps, unsigned int matrix_order, int * max_iter, float *prec) {
unsigned int i, j;
int k = 0, nTiles;
float *dev_a = 0, *dev_x_solution = 0, *dev_b = 0, *dev_prec_values = 0;
float accur = 1.0, sum = 0.0;
float *prec_values = new float[matrix_order];
init_const(prec_values, 0.0, matrix_order);
size_t matrix_size = matrix_order*matrix_order*sizeof(float);
size_t vector_size = matrix_order*sizeof(float);
cudaError_t cudaStatus;
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cudaStatus = cudaMalloc((void**)&dev_a, matrix_size);
error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = cudaMalloc((void**)&dev_x_solution, vector_size);
error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = cudaMalloc((void**)&dev_b, vector_size);
error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = cudaMalloc((void**)&dev_prec_values, vector_size);
error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = cudaMemcpy(dev_a, a, matrix_size, cudaMemcpyHostToDevice);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = cudaMemcpy(dev_x_solution, x_solution, vector_size, cudaMemcpyHostToDevice);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = cudaMemcpy(dev_b, b, vector_size, cudaMemcpyHostToDevice);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaStatus = cudaMemcpy(dev_prec_values, prec_values, vector_size, cudaMemcpyHostToDevice);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
int tileSize = 16;
nTiles = matrix_order/tileSize + (matrix_order%tileSize == 0?0:1);
for (i = 0; i < *max_iter; i++) {
iteration <<<nTiles,tileSize>>> (dev_a, dev_x_solution, dev_b, dev_prec_values, matrix_order);
k++;
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
error_check(cudaStatus, ADDK, dev_a, dev_x_solution, dev_b, dev_prec_values);
// cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
error_check(cudaStatus, DEVSYNC, dev_a, dev_x_solution, dev_b, dev_prec_values);
// Retreive the dev_prec_values vector with all the precision values
cudaStatus = cudaMemcpy(prec_values, dev_prec_values, vector_size, cudaMemcpyDeviceToHost);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
// Computes the precision
sum = 0.0;
for (j = 0; j < matrix_order; j++) {
sum = sum + fabs(prec_values[j]);
}
accur = sqrt(sum);
if (accur <= eps) break;
}
*max_iter = k;
*prec = accur;
cudaStatus = cudaMemcpy(x_solution, dev_x_solution, vector_size, cudaMemcpyDeviceToHost);
error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values);
cudaFree(dev_a);
cudaFree(dev_x_solution);
cudaFree(dev_prec_values);
cudaFree(dev_b);
}
int main(int argc, char *argv[]){
const int matrix_order = atoi(argv[1]); // order of the matrix
int max_iter = atoi(argv[2]); // number of max_iterations
const float epsilon = atof(argv[3]); // precision
int iterations = max_iter;
std::chrono::time_point<std::chrono::system_clock> start_time, end_time;
float *x_solution_h, *b_h, **A_h, *rand_x_h;
float *extended_a = 0;
// Allocate memory for CPU.
A_h = new float *[matrix_order];
b_h = new float[matrix_order];
x_solution_h = new float[matrix_order];
rand_x_h = new float[matrix_order];
if (A_h == NULL || b_h == NULL || rand_x_h == NULL || x_solution_h == NULL) {
std::cerr << "Error while allocating resources." << std::endl;
exit(-1);
}
init_matrix(A_h, matrix_order);
srand(SEED);
generate_random_matrix(A_h, matrix_order);
extended_a = (float*)malloc(matrix_order*matrix_order*sizeof(float));
for (int i = 0; i < matrix_order; i++) {
for (int j = 0; j < matrix_order; j++) {
extended_a[i*matrix_order + j] = A_h[i][j];
}
}
generate_random_vector(rand_x_h, matrix_order);
int repetitions = 20;
float precision = 1.0, err = 0.0;
matrix_vector_multiplication(b_h, A_h, rand_x_h, matrix_order);
for (int m=0; m<repetitions; m++) {
init_const(x_solution_h, 0.0, matrix_order);
iterations = max_iter;
err=0.0;
start_time = std::chrono::system_clock::now();
cuda_jacobi_solve(extended_a, x_solution_h, b_h, epsilon, matrix_order, &iterations, &precision);
end_time = std::chrono::system_clock::now();
std::cout << delta_time(start_time, end_time).count() << "\t" ;
error_on_computation(x_solution_h, A_h, b_h, matrix_order, &err);
}
std::cout << "\t" << iterations << "\t" << precision << "\t" << err << std::endl;
// Release resources
for (int i = 0; i < matrix_order; i++)
delete[] A_h[i];
delete[] A_h;
delete[] b_h;
delete[] rand_x_h;
delete[] x_solution_h;
free(extended_a);
return 0;
} |
b0e7c8002bcdf083b11838805785c809dfe0b486.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2012 Ben Barsdell
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
This file contains the boring boiler-plate code to manage the library.
TODO: Test on 32-bit integer input
Consider accepting 32-bit floats instead of 32-bit ints
*/
//#define DEDISP_DEBUG
//#define DEDISP_BENCHMARK
#include <dedisp.h>
#include <vector>
#include <algorithm> // For std::fill
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
// For copying and scrunching the DM list
#include <thrust/transform.h>
#include <thrust/iterator/constant_iterator.h>
#ifdef DEDISP_BENCHMARK
#include <fstream>
#endif
#if defined(DEDISP_DEBUG) && DEDISP_DEBUG
#include <stdio.h> // For printf
#endif
// TODO: Remove these when done benchmarking
// -----------------------------------------
#if defined(DEDISP_BENCHMARK)
#include <iostream>
using std::cout;
using std::endl;
#include "stopwatch.hpp"
#endif
// -----------------------------------------
#include "gpu_memory.hpp"
#include "transpose.hpp"
#define DEDISP_DEFAULT_GULP_SIZE 65536 //131072
// Note: The implementation of the sub-band algorithm is a prototype only
// Enable at your own risk! It may not be in a working state at all.
//#define USE_SUBBAND_ALGORITHM
#define DEDISP_DEFAULT_SUBBAND_SIZE 32
// TODO: Make sure this doesn't limit GPU constant memory
// available to users.
#define DEDISP_MAX_NCHANS 8192
// Internal word type used for transpose and dedispersion kernel
typedef unsigned int dedisp_word;
// Note: This must be included after the above #define and typedef
#include "kernels_hip.cuh"
// Define plan structure
struct dedisp_plan_struct {
// Size parameters
dedisp_size dm_count;
dedisp_size nchans;
dedisp_size max_delay;
dedisp_size gulp_size;
// Physical parameters
dedisp_float dt;
dedisp_float f0;
dedisp_float df;
// Host arrays
std::vector<dedisp_float> dm_list; // size = dm_count
std::vector<dedisp_float> delay_table; // size = nchans
std::vector<dedisp_bool> killmask; // size = nchans
std::vector<dedisp_size> scrunch_list; // size = dm_count
// Device arrays
thrust::device_vector<dedisp_float> d_dm_list;
thrust::device_vector<dedisp_float> d_delay_table;
thrust::device_vector<dedisp_bool> d_killmask;
thrust::device_vector<dedisp_size> d_scrunch_list;
//StreamType stream;
// Scrunching parameters
dedisp_bool scrunching_enabled;
dedisp_float pulse_width;
dedisp_float scrunch_tol;
};
// Private helper functions
// ------------------------
template<typename T>
T min(T a, T b) { return a<b ? a : b; }
unsigned long div_round_up(unsigned long a, unsigned long b) {
return (a-1) / b + 1;
}
// Internal abstraction for errors
#if defined(DEDISP_DEBUG) && DEDISP_DEBUG
#define throw_error(error) do { \
printf("An error occurred within dedisp on line %d of %s: %s", \
__LINE__, __FILE__, dedisp_get_error_string(error)); \
return (error); } while(0)
#define throw_getter_error(error, retval) do { \
printf("An error occurred within dedisp on line %d of %s: %s", \
__LINE__, __FILE__, dedisp_get_error_string(error)); \
return (retval); } while(0)
#else
#define throw_error(error) return error
#define throw_getter_error(error, retval) return retval
#endif // DEDISP_DEBUG
/*
dedisp_error throw_error(dedisp_error error) {
// Note: Could, e.g., put an error callback in here
return error;
}
*/
dedisp_error update_scrunch_list(dedisp_plan plan) {
if( hipGetLastError() != hipSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
if( !plan->scrunching_enabled || 0 == plan->dm_count ) {
plan->scrunch_list.resize(0);
// Fill with 1's by default for safety
plan->scrunch_list.resize(plan->dm_count, dedisp_size(1));
return DEDISP_NO_ERROR;
}
plan->scrunch_list.resize(plan->dm_count);
dedisp_error error = generate_scrunch_list(&plan->scrunch_list[0],
plan->dm_count,
plan->dt,
&plan->dm_list[0],
plan->nchans,
plan->f0,
plan->df,
plan->pulse_width,
plan->scrunch_tol);
if( error != DEDISP_NO_ERROR ) {
return error;
}
// Allocate on and copy to the device
try {
plan->d_scrunch_list.resize(plan->dm_count);
}
catch(...) {
throw_error(DEDISP_MEM_ALLOC_FAILED);
}
try {
plan->d_scrunch_list = plan->scrunch_list;
}
catch(...) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
return DEDISP_NO_ERROR;
}
// ------------------------
// Public functions
// ----------------
dedisp_error dedisp_create_plan(dedisp_plan* plan_,
dedisp_size nchans,
dedisp_float dt,
dedisp_float f0,
dedisp_float df)
{
// Initialise to NULL for safety
*plan_ = 0;
if( hipGetLastError() != hipSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
int device_idx;
hipGetDevice(&device_idx);
// Check for parameter errors
if( nchans > DEDISP_MAX_NCHANS ) {
throw_error(DEDISP_NCHANS_EXCEEDS_LIMIT);
}
// Force the df parameter to be negative such that
// freq[chan] = f0 + chan * df.
df = -abs(df);
dedisp_plan plan = new dedisp_plan_struct();
if( !plan ) {
throw_error(DEDISP_MEM_ALLOC_FAILED);
}
plan->dm_count = 0;
plan->nchans = nchans;
plan->gulp_size = DEDISP_DEFAULT_GULP_SIZE;
plan->max_delay = 0;
plan->dt = dt;
plan->f0 = f0;
plan->df = df;
//plan->stream = 0;
// Generate delay table and copy to device memory
// Note: The DM factor is left out and applied during dedispersion
plan->delay_table.resize(plan->nchans);
generate_delay_table(&plan->delay_table[0], plan->nchans, dt, f0, df);
try {
plan->d_delay_table.resize(plan->nchans);
}
catch(...) {
dedisp_destroy_plan(plan);
throw_error(DEDISP_MEM_ALLOC_FAILED);
}
try {
plan->d_delay_table = plan->delay_table;
}
catch(...) {
dedisp_destroy_plan(plan);
throw_error(DEDISP_MEM_COPY_FAILED);
}
// Initialise the killmask
plan->killmask.resize(plan->nchans, (dedisp_bool)true);
try {
plan->d_killmask.resize(plan->nchans);
}
catch(...) {
dedisp_destroy_plan(plan);
throw_error(DEDISP_MEM_ALLOC_FAILED);
}
dedisp_error err = dedisp_set_killmask(plan, (dedisp_bool*)0);
if( err != DEDISP_NO_ERROR ) {
dedisp_destroy_plan(plan);
throw_error(err);
}
*plan_ = plan;
return DEDISP_NO_ERROR;
}
dedisp_error dedisp_set_gulp_size(dedisp_plan plan,
dedisp_size gulp_size) {
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
plan->gulp_size = gulp_size;
return DEDISP_NO_ERROR;
}
dedisp_size dedisp_get_gulp_size(dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->gulp_size;
}
dedisp_error dedisp_set_dm_list(dedisp_plan plan,
const dedisp_float* dm_list,
dedisp_size count)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
if( !dm_list ) {
throw_error(DEDISP_INVALID_POINTER);
}
if( hipGetLastError() != hipSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
plan->dm_count = count;
plan->dm_list.assign(dm_list, dm_list+count);
// Copy to the device
try {
plan->d_dm_list.resize(plan->dm_count);
}
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
try {
plan->d_dm_list = plan->dm_list;
}
catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); }
// Calculate the maximum delay and store it in the plan
plan->max_delay = dedisp_size(plan->dm_list[plan->dm_count-1] *
plan->delay_table[plan->nchans-1] + 0.5);
dedisp_error error = update_scrunch_list(plan);
if( error != DEDISP_NO_ERROR ) {
throw_error(error);
}
return DEDISP_NO_ERROR;
}
dedisp_error dedisp_generate_dm_list(dedisp_plan plan,
dedisp_float dm_start, dedisp_float dm_end,
dedisp_float ti, dedisp_float tol)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
if( hipGetLastError() != hipSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
// Generate the DM list (on the host)
plan->dm_list.clear();
generate_dm_list(plan->dm_list,
dm_start, dm_end,
plan->dt, ti, plan->f0, plan->df,
plan->nchans, tol);
plan->dm_count = plan->dm_list.size();
// Allocate device memory for the DM list
try {
plan->d_dm_list.resize(plan->dm_count);
}
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
try {
plan->d_dm_list = plan->dm_list;
}
catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); }
// Calculate the maximum delay and store it in the plan
plan->max_delay = dedisp_size(plan->dm_list[plan->dm_count-1] *
plan->delay_table[plan->nchans-1] + 0.5);
dedisp_error error = update_scrunch_list(plan);
if( error != DEDISP_NO_ERROR ) {
throw_error(error);
}
return DEDISP_NO_ERROR;
}
dedisp_float * dedisp_generate_dm_list_guru (dedisp_float dm_start, dedisp_float dm_end,
double dt, double ti, double f0, double df,
dedisp_size nchans, double tol, dedisp_size * dm_count)
{
std::vector<dedisp_float> dm_table;
generate_dm_list(dm_table,
dm_start, dm_end,
dt, ti, f0, df,
nchans, tol);
*dm_count = dm_table.size();
return &dm_table[0];
}
dedisp_error dedisp_set_device(int device_idx) {
if( hipGetLastError() != hipSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
hipError_t error = hipSetDevice(device_idx);
// Note: hipErrorInvalidValue isn't a documented return value, but
// it still gets returned :/
if( hipErrorInvalidDevice == error ||
hipErrorInvalidValue == error )
throw_error(DEDISP_INVALID_DEVICE_INDEX);
else if( hipErrorSetOnActiveProcess == error )
throw_error(DEDISP_DEVICE_ALREADY_SET);
else if( hipSuccess != error )
throw_error(DEDISP_UNKNOWN_ERROR);
else
return DEDISP_NO_ERROR;
}
dedisp_error dedisp_set_killmask(dedisp_plan plan, const dedisp_bool* killmask)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
if( hipGetLastError() != hipSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
if( 0 != killmask ) {
// Copy killmask to plan (both host and device)
plan->killmask.assign(killmask, killmask + plan->nchans);
try {
plan->d_killmask = plan->killmask;
}
catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); }
}
else {
// Set the killmask to all true
std::fill(plan->killmask.begin(), plan->killmask.end(), (dedisp_bool)true);
thrust::fill(plan->d_killmask.begin(), plan->d_killmask.end(),
(dedisp_bool)true);
}
return DEDISP_NO_ERROR;
}
/*
dedisp_plan dedisp_set_stream(dedisp_plan plan, StreamType stream)
{
plan->stream = stream;
return plan;
}
*/
// Getters
// -------
dedisp_size dedisp_get_max_delay(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); }
return plan->max_delay;
}
dedisp_size dedisp_get_dm_delay(const dedisp_plan plan, int dm_trial) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); }
if (dm_trial < 0 || dm_trial >= int(plan->dm_count) ) { throw_getter_error(DEDISP_UNKNOWN_ERROR,0); }
return (plan->dm_list[dm_trial] * plan->delay_table[plan->nchans-1] + 0.5);
}
dedisp_size dedisp_get_channel_count(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->nchans;
}
dedisp_size dedisp_get_dm_count(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->dm_count;
}
const dedisp_float* dedisp_get_dm_list(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); }
return &plan->dm_list[0];
}
const dedisp_bool* dedisp_get_killmask(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return &plan->killmask[0];
}
dedisp_float dedisp_get_dt(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->dt;
}
dedisp_float dedisp_get_f0(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->f0;
}
dedisp_float dedisp_get_df(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->df;
}
// Warning: Big mother function
dedisp_error dedisp_execute_guru(const dedisp_plan plan,
dedisp_size nsamps,
const dedisp_byte* in,
dedisp_size in_nbits,
dedisp_size in_stride,
dedisp_byte* out,
dedisp_size out_nbits,
dedisp_size out_stride,
dedisp_size first_dm_idx,
dedisp_size dm_count,
unsigned flags)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
if( hipGetLastError() != hipSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
enum {
BITS_PER_BYTE = 8,
BYTES_PER_WORD = sizeof(dedisp_word) / sizeof(dedisp_byte)
};
dedisp_size out_bytes_per_sample = out_nbits / (sizeof(dedisp_byte) *
BITS_PER_BYTE);
if( 0 == in || 0 == out ) {
throw_error(DEDISP_INVALID_POINTER);
}
// Note: Must be careful with integer division
if( in_stride < plan->nchans*in_nbits/(sizeof(dedisp_byte)*BITS_PER_BYTE) ||
out_stride < (nsamps - plan->max_delay)*out_bytes_per_sample ) {
throw_error(DEDISP_INVALID_STRIDE);
}
if( 0 == plan->dm_count ) {
throw_error(DEDISP_NO_DM_LIST_SET);
}
if( nsamps < plan->max_delay ) {
throw_error(DEDISP_TOO_FEW_NSAMPS);
}
// Check for valid synchronisation flags
if( flags & DEDISP_ASYNC && flags & DEDISP_WAIT ) {
throw_error(DEDISP_INVALID_FLAG_COMBINATION);
}
// Check for valid nbits values
if( in_nbits != 1 &&
in_nbits != 2 &&
in_nbits != 4 &&
in_nbits != 8 &&
in_nbits != 16 &&
in_nbits != 32 ) {
throw_error(DEDISP_UNSUPPORTED_IN_NBITS);
}
if( out_nbits != 8 &&
out_nbits != 16 &&
out_nbits != 32 ) {
throw_error(DEDISP_UNSUPPORTED_OUT_NBITS);
}
bool using_host_memory;
if( flags & DEDISP_HOST_POINTERS && flags & DEDISP_DEVICE_POINTERS ) {
throw_error(DEDISP_INVALID_FLAG_COMBINATION);
}
else {
using_host_memory = !(flags & DEDISP_DEVICE_POINTERS);
}
// Copy the lookup tables to constant memory on the device
// TODO: This was much tidier, but thanks to CUDA's insistence on
// breaking its API in v5.0 I had to mess it up like this.
hipMemcpyToSymbolAsync(c_delay_table,
thrust::raw_pointer_cast(&plan->d_delay_table[0]),
plan->nchans * sizeof(dedisp_float),
0, hipMemcpyDeviceToDevice, 0);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if( error != hipSuccess ) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
hipMemcpyToSymbolAsync(c_killmask,
thrust::raw_pointer_cast(&plan->d_killmask[0]),
plan->nchans * sizeof(dedisp_bool),
0, hipMemcpyDeviceToDevice, 0);
hipDeviceSynchronize();
error = hipGetLastError();
if( error != hipSuccess ) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
// Compute the problem decomposition
dedisp_size nsamps_computed = nsamps - plan->max_delay;
// Specify the maximum gulp size
dedisp_size nsamps_computed_gulp_max;
if( using_host_memory ) {
nsamps_computed_gulp_max = min(plan->gulp_size, nsamps_computed);
}
else {
// Just do it in one gulp if given device pointers
nsamps_computed_gulp_max = nsamps_computed;
}
// Just to be sure
// TODO: This seems quite wrong. Why was it here?
/*
if( nsamps_computed_gulp_max < plan->max_delay ) {
throw_error(DEDISP_TOO_FEW_NSAMPS);
}
*/
// Compute derived counts for maximum gulp size [dedisp_word == 4 bytes]
dedisp_size nsamps_gulp_max = nsamps_computed_gulp_max + plan->max_delay;
dedisp_size chans_per_word = sizeof(dedisp_word)*BITS_PER_BYTE / in_nbits;
dedisp_size nchan_words = plan->nchans / chans_per_word;
// We use words for processing but allow arbitrary byte strides, which are
// not necessarily friendly.
bool friendly_in_stride = (0 == in_stride % BYTES_PER_WORD);
// Note: If desired, this could be rounded up, e.g., to a power of 2
dedisp_size in_buf_stride_words = nchan_words;
dedisp_size in_count_gulp_max = nsamps_gulp_max * in_buf_stride_words;
dedisp_size nsamps_padded_gulp_max = div_round_up(nsamps_computed_gulp_max,
DEDISP_SAMPS_PER_THREAD)
* DEDISP_SAMPS_PER_THREAD + plan->max_delay;
dedisp_size in_count_padded_gulp_max =
nsamps_padded_gulp_max * in_buf_stride_words;
// TODO: Make this a parameter?
dedisp_size min_in_nbits = 0;
if( plan->scrunching_enabled ) {
// TODO: This produces corrupt output when equal to 32 !
// Also check whether the unpacker is broken when in_nbits=32 !
min_in_nbits = 16; //32;
}
dedisp_size unpacked_in_nbits = max((int)in_nbits, (int)min_in_nbits);
dedisp_size unpacked_chans_per_word =
sizeof(dedisp_word)*BITS_PER_BYTE / unpacked_in_nbits;
dedisp_size unpacked_nchan_words = plan->nchans / unpacked_chans_per_word;
dedisp_size unpacked_buf_stride_words = unpacked_nchan_words;
dedisp_size unpacked_count_padded_gulp_max =
nsamps_padded_gulp_max * unpacked_buf_stride_words;
dedisp_size out_stride_gulp_samples = nsamps_computed_gulp_max;
dedisp_size out_stride_gulp_bytes =
out_stride_gulp_samples * out_bytes_per_sample;
dedisp_size out_count_gulp_max = out_stride_gulp_bytes * dm_count;
// Organise device memory pointers
// -------------------------------
const dedisp_word* d_in = 0;
dedisp_word* d_transposed = 0;
dedisp_word* d_unpacked = 0;
dedisp_byte* d_out = 0;
thrust::device_vector<dedisp_word> d_in_buf;
thrust::device_vector<dedisp_word> d_transposed_buf;
thrust::device_vector<dedisp_word> d_unpacked_buf;
thrust::device_vector<dedisp_byte> d_out_buf;
// Allocate temporary buffers on the device where necessary
if( using_host_memory || !friendly_in_stride ) {
try { d_in_buf.resize(in_count_gulp_max); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
d_in = thrust::raw_pointer_cast(&d_in_buf[0]);
}
else {
d_in = (dedisp_word*)in;
}
if( using_host_memory ) {
try { d_out_buf.resize(out_count_gulp_max); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
d_out = thrust::raw_pointer_cast(&d_out_buf[0]);
}
else {
d_out = out;
}
//// Note: * 2 here is for the time-scrunched copies of the data
try { d_transposed_buf.resize(in_count_padded_gulp_max/* * 2 */); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
d_transposed = thrust::raw_pointer_cast(&d_transposed_buf[0]);
// Note: * 2 here is for the time-scrunched copies of the data
try { d_unpacked_buf.resize(unpacked_count_padded_gulp_max * 2); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
d_unpacked = thrust::raw_pointer_cast(&d_unpacked_buf[0]);
// -------------------------------
// The stride (in words) between differently-scrunched copies of the
// unpacked data.
dedisp_size scrunch_stride = unpacked_count_padded_gulp_max;
#ifdef USE_SUBBAND_ALGORITHM
dedisp_size sb_size = DEDISP_DEFAULT_SUBBAND_SIZE;
// Note: Setting these two parameters equal should balance the two steps of
// the sub-band algorithm.
dedisp_size dm_size = sb_size; // Ndm'
dedisp_size sb_count = plan->nchans / sb_size;
dedisp_size nom_dm_count = dm_count / dm_size;
thrust::device_vector<dedisp_word> d_intermediate_buf;
try { d_intermediate_buf.resize(nsamps_padded_gulp_max * sb_count
* nom_dm_count); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
dedisp_word* d_intermediate = thrust::raw_pointer_cast(&d_intermediate_buf[0]);
#endif // USE_SUBBAND_ALGORITHM
// TODO: Eventually re-implement streams
hipStream_t stream = 0;//(hipStream_t)plan->stream;
#ifdef DEDISP_BENCHMARK
Stopwatch copy_to_timer;
Stopwatch copy_from_timer;
Stopwatch transpose_timer;
Stopwatch kernel_timer;
#endif
// Gulp loop
for( dedisp_size gulp_samp_idx=0;
gulp_samp_idx<nsamps_computed;
gulp_samp_idx+=nsamps_computed_gulp_max ) {
dedisp_size nsamps_computed_gulp = min(nsamps_computed_gulp_max,
nsamps_computed-gulp_samp_idx);
dedisp_size nsamps_gulp = nsamps_computed_gulp + plan->max_delay;
dedisp_size nsamps_padded_gulp = div_round_up(nsamps_computed_gulp,
DEDISP_SAMPS_PER_THREAD)
* DEDISP_SAMPS_PER_THREAD + plan->max_delay;
#ifdef DEDISP_BENCHMARK
copy_to_timer.start();
#endif
// Copy the input data from host to device if necessary
if( using_host_memory ) {
// Allowing arbitrary byte strides means we must do a strided copy
if( !copy_host_to_device_2d((dedisp_byte*)d_in,
in_buf_stride_words * BYTES_PER_WORD,
in + gulp_samp_idx*in_stride,
in_stride,
nchan_words * BYTES_PER_WORD,
nsamps_gulp) ) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
}
else if( !friendly_in_stride ) {
// Device pointers with unfriendly stride
if( !copy_device_to_device_2d((dedisp_byte*)d_in,
in_buf_stride_words * BYTES_PER_WORD,
in + gulp_samp_idx*in_stride,
in_stride,
nchan_words * BYTES_PER_WORD,
nsamps_gulp) ) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
}
#ifdef DEDISP_BENCHMARK
hipDeviceSynchronize();
copy_to_timer.stop();
transpose_timer.start();
#endif
// Transpose the words in the input
Transpose<dedisp_word> transpose;
transpose.transpose(d_in,
nchan_words, nsamps_gulp,
in_buf_stride_words, nsamps_padded_gulp,
d_transposed);
#ifdef DEDISP_BENCHMARK
hipDeviceSynchronize();
transpose_timer.stop();
kernel_timer.start();
#endif
// Unpack the transposed data
unpack(d_transposed, nsamps_padded_gulp, nchan_words,
d_unpacked,
in_nbits, unpacked_in_nbits);
// Compute time-scrunched copies of the data
if( plan->scrunching_enabled ) {
dedisp_size max_scrunch = plan->scrunch_list[plan->dm_count-1];
dedisp_size scrunch_in_offset = 0;
dedisp_size scrunch_out_offset = scrunch_stride;
for( dedisp_size s=2; s<=max_scrunch; s*=2 ) {
// TODO: Need to pass in stride and count? I.e., nsamps_padded/computed_gulp
//scrunch_x2(&d_transposed[scrunch_in_offset],
// nsamps_padded_gulp/(s/2), nchan_words, in_nbits,
// &d_transposed[scrunch_out_offset]);
scrunch_x2(&d_unpacked[scrunch_in_offset],
nsamps_padded_gulp/(s/2),
unpacked_nchan_words, unpacked_in_nbits,
&d_unpacked[scrunch_out_offset]);
scrunch_in_offset = scrunch_out_offset;
scrunch_out_offset += scrunch_stride / s;
}
}
#ifdef USE_SUBBAND_ALGORITHM
// TODO: This has not been updated to use d_unpacked!
dedisp_size chan_stride = 1;
dedisp_size dm_stride = dm_size;
dedisp_size ostride = nsamps_padded_gulp * sb_count;
dedisp_size batch_size = sb_count;
dedisp_size batch_in_stride = nsamps_padded_gulp * sb_size / chans_per_word;
dedisp_size batch_dm_stride = 0;
dedisp_size batch_chan_stride = sb_size;
dedisp_size batch_out_stride = nsamps_padded_gulp;
/* // Consistency checks
if( (nom_dm_count-1)*dm_stride + (batch_size-1)*batch_dm_stride >= dm_count ) {
throw std::runtime_error("DM STRIDES ARE INCONSISTENT");
}
if( (sb_size-1)*chan_stride + (batch_size-1)*batch_chan_stride >= plan->nchans ) {
throw std::runtime_error("CHAN STRIDES ARE INCONSISTENT");
}
*/
// Both steps
if( !dedisperse(d_transposed,
nsamps_padded_gulp,
nsamps_computed_gulp,
in_nbits,
sb_size,
chan_stride,
thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]),
nom_dm_count,
dm_stride,
(dedisp_byte*)d_intermediate,
ostride,
32,//out_nbits,
batch_size,
batch_in_stride,
batch_dm_stride,
batch_chan_stride,
batch_out_stride) ) {
throw_error(DEDISP_INTERNAL_GPU_ERROR);
}
batch_size = nom_dm_count;
chan_stride = sb_size;
dm_stride = 1;
ostride = out_stride_gulp_samples;
batch_in_stride = nsamps_padded_gulp * sb_count;
batch_dm_stride = 0;
batch_chan_stride = 0;
batch_out_stride = out_stride_gulp_samples * dm_size;
/* // Consistency checks
if( (dm_size-1)*dm_stride + (batch_size-1)*batch_dm_stride >= dm_count ) {
throw std::runtime_error("DM STRIDES ARE INCONSISTENT");
}
if( (sb_count-1)*chan_stride + (batch_size-1)*batch_chan_stride >= plan->nchans ) {
throw std::runtime_error("CHAN STRIDES ARE INCONSISTENT");
}
*/
if( !dedisperse(d_intermediate,
nsamps_padded_gulp,
nsamps_computed_gulp,
32,//in_nbits,
sb_count,
chan_stride,
thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]),
dm_size,
dm_stride,
d_out,
ostride,
out_nbits,
batch_size,
batch_in_stride,
batch_dm_stride,
batch_chan_stride,
batch_out_stride) ) {
throw_error(DEDISP_INTERNAL_GPU_ERROR);
}
#else // Use direct algorithm
if( plan->scrunching_enabled ) {
// TODO: THIS WILL NOT WORK IF dm_count < plan->dm_count !
// Need to avoid assumption that scrunch starts at 1
// Must start the scrunch at the first *requested* DM
thrust::device_vector<dedisp_float> d_scrunched_dm_list(dm_count);
dedisp_size scrunch_start = 0;
dedisp_size scrunch_offset = 0;
for( dedisp_size s=0; s<dm_count; ++s ) {
dedisp_size cur_scrunch = plan->scrunch_list[s];
// Look for segment boundaries
if( s+1 == dm_count || plan->scrunch_list[s+1] != cur_scrunch ) {
//dedisp_size next_scrunch = plan->scrunch_list[s];
//if( next_scrunch != cur_scrunch ) {
dedisp_size scrunch_count = s+1 - scrunch_start;
// Make a copy of the dm list divided by the scrunch factor
// Note: This has the effect of increasing dt in the delay eqn
dedisp_size dm_offset = first_dm_idx + scrunch_start;
thrust::transform(plan->d_dm_list.begin() + dm_offset,
plan->d_dm_list.begin() + dm_offset + scrunch_count,
thrust::make_constant_iterator(cur_scrunch),
d_scrunched_dm_list.begin(),
thrust::divides<dedisp_float>());
dedisp_float* d_scrunched_dm_list_ptr =
thrust::raw_pointer_cast(&d_scrunched_dm_list[0]);
// TODO: Is this how the nsamps vars need to change?
if( !dedisperse(//&d_transposed[scrunch_offset],
&d_unpacked[scrunch_offset],
nsamps_padded_gulp / cur_scrunch,
nsamps_computed_gulp / cur_scrunch,
unpacked_in_nbits, //in_nbits,
plan->nchans,
1,
d_scrunched_dm_list_ptr,
scrunch_count, // dm_count
1,
d_out + scrunch_start*out_stride_gulp_bytes,
out_stride_gulp_samples,
out_nbits,
1, 0, 0, 0, 0) ) {
throw_error(DEDISP_INTERNAL_GPU_ERROR);
}
scrunch_offset += scrunch_stride / cur_scrunch;
scrunch_start += scrunch_count;
}
}
}
else {
// Perform direct dedispersion without scrunching
if( !dedisperse(//d_transposed,
d_unpacked,
nsamps_padded_gulp,
nsamps_computed_gulp,
unpacked_in_nbits, //in_nbits,
plan->nchans,
1,
thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]),
dm_count,
1,
d_out,
out_stride_gulp_samples,
out_nbits,
1, 0, 0, 0, 0) ) {
throw_error(DEDISP_INTERNAL_GPU_ERROR);
}
}
#endif // SB/direct algorithm
#ifdef DEDISP_BENCHMARK
hipDeviceSynchronize();
kernel_timer.stop();
#endif
// Copy output back to host memory if necessary
if( using_host_memory ) {
dedisp_size gulp_samp_byte_idx = gulp_samp_idx * out_bytes_per_sample;
dedisp_size nsamp_bytes_computed_gulp = nsamps_computed_gulp * out_bytes_per_sample;
#ifdef DEDISP_BENCHMARK
copy_from_timer.start();
#endif
if( plan->scrunching_enabled ) {
// TODO: This for-loop isn't a very elegant solution
dedisp_size scrunch_start = 0;
for( dedisp_size s=0; s<dm_count; ++s ) {
dedisp_size cur_scrunch = plan->scrunch_list[s];
// Look for segment boundaries
if( s+1 == dm_count || plan->scrunch_list[s+1] != cur_scrunch ) {
dedisp_size scrunch_count = s+1 - scrunch_start;
dedisp_size src_stride = out_stride_gulp_bytes;
dedisp_byte* src = d_out + scrunch_start * src_stride;
dedisp_byte* dst = (out + scrunch_start * out_stride
+ gulp_samp_byte_idx / cur_scrunch);
dedisp_size width = nsamp_bytes_computed_gulp / cur_scrunch;
dedisp_size height = scrunch_count;
copy_device_to_host_2d(dst, // dst
out_stride, // dst stride
src, // src
src_stride, // src stride
width, // width bytes
height); // height
scrunch_start += scrunch_count;
}
}
}
else {
copy_device_to_host_2d(out + gulp_samp_byte_idx, // dst
out_stride, // dst stride
d_out, // src
out_stride_gulp_bytes, // src stride
nsamp_bytes_computed_gulp, // width bytes
dm_count); // height
}
#ifdef DEDISP_BENCHMARK
hipDeviceSynchronize();
copy_from_timer.stop();
#endif
}
} // End of gulp loop
#ifdef DEDISP_BENCHMARK
cout << "Copy to time: " << copy_to_timer.getTime() << endl;
cout << "Copy from time: " << copy_from_timer.getTime() << endl;
cout << "Transpose time: " << transpose_timer.getTime() << endl;
cout << "Kernel time: " << kernel_timer.getTime() << endl;
float total_time = copy_to_timer.getTime() + copy_from_timer.getTime() + transpose_timer.getTime() + kernel_timer.getTime();
cout << "Total time: " << total_time << endl;
// Append the timing results to a log file
std::ofstream perf_file("perf.log", std::ios::app);
perf_file << copy_to_timer.getTime() << "\t"
<< copy_from_timer.getTime() << "\t"
<< transpose_timer.getTime() << "\t"
<< kernel_timer.getTime() << "\t"
<< total_time << endl;
perf_file.close();
#endif
if( !(flags & DEDISP_ASYNC) ) {
hipStreamSynchronize(stream);
}
// Phew!
return DEDISP_NO_ERROR;
}
dedisp_error dedisp_execute_adv(const dedisp_plan plan,
dedisp_size nsamps,
const dedisp_byte* in,
dedisp_size in_nbits,
dedisp_size in_stride,
dedisp_byte* out,
dedisp_size out_nbits,
dedisp_size out_stride,
unsigned flags)
{
dedisp_size first_dm_idx = 0;
dedisp_size dm_count = plan->dm_count;
return dedisp_execute_guru(plan, nsamps,
in, in_nbits, in_stride,
out, out_nbits, out_stride,
first_dm_idx, dm_count,
flags);
}
// TODO: Consider having the user specify nsamps_computed instead of nsamps
dedisp_error dedisp_execute(const dedisp_plan plan,
dedisp_size nsamps,
const dedisp_byte* in,
dedisp_size in_nbits,
dedisp_byte* out,
dedisp_size out_nbits,
unsigned flags)
{
enum {
BITS_PER_BYTE = 8
};
// Note: The default out_stride is nsamps - plan->max_delay
dedisp_size out_bytes_per_sample =
out_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE);
// Note: Must be careful with integer division
dedisp_size in_stride =
plan->nchans * in_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE);
dedisp_size out_stride = (nsamps - plan->max_delay) * out_bytes_per_sample;
return dedisp_execute_adv(plan, nsamps,
in, in_nbits, in_stride,
out, out_nbits, out_stride,
flags);
}
dedisp_error dedisp_sync(void)
{
if( hipDeviceSynchronize() != hipSuccess )
throw_error(DEDISP_PRIOR_GPU_ERROR);
else
return DEDISP_NO_ERROR;
}
void dedisp_destroy_plan(dedisp_plan plan)
{
if( plan ) {
delete plan;
}
}
const char* dedisp_get_error_string(dedisp_error error)
{
switch( error ) {
case DEDISP_NO_ERROR:
return "No error";
case DEDISP_MEM_ALLOC_FAILED:
return "Memory allocation failed";
case DEDISP_MEM_COPY_FAILED:
return "Memory copy failed";
case DEDISP_INVALID_DEVICE_INDEX:
return "Invalid device index";
case DEDISP_DEVICE_ALREADY_SET:
return "Device is already set and cannot be changed";
case DEDISP_NCHANS_EXCEEDS_LIMIT:
return "No. channels exceeds internal limit";
case DEDISP_INVALID_PLAN:
return "Invalid plan";
case DEDISP_INVALID_POINTER:
return "Invalid pointer";
case DEDISP_INVALID_STRIDE:
return "Invalid stride";
case DEDISP_NO_DM_LIST_SET:
return "No DM list has been set";
case DEDISP_TOO_FEW_NSAMPS:
return "No. samples < maximum delay";
case DEDISP_INVALID_FLAG_COMBINATION:
return "Invalid flag combination";
case DEDISP_UNSUPPORTED_IN_NBITS:
return "Unsupported in_nbits value";
case DEDISP_UNSUPPORTED_OUT_NBITS:
return "Unsupported out_nbits value";
case DEDISP_PRIOR_GPU_ERROR:
return "Prior GPU error.";
case DEDISP_INTERNAL_GPU_ERROR:
return "Internal GPU error. Please contact the author(s).";
case DEDISP_UNKNOWN_ERROR:
return "Unknown error. Please contact the author(s).";
default:
return "Invalid error code";
}
}
dedisp_error dedisp_enable_adaptive_dt(dedisp_plan plan,
dedisp_float pulse_width,
dedisp_float tol)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
plan->scrunching_enabled = true;
plan->pulse_width = pulse_width;
plan->scrunch_tol = tol;
return update_scrunch_list(plan);
}
dedisp_error dedisp_disable_adaptive_dt(dedisp_plan plan) {
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
plan->scrunching_enabled = false;
return update_scrunch_list(plan);
}
dedisp_bool dedisp_using_adaptive_dt(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,false); }
return plan->scrunching_enabled;
}
const dedisp_size* dedisp_get_dt_factors(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); }
return &plan->scrunch_list[0];
}
// ----------------
| b0e7c8002bcdf083b11838805785c809dfe0b486.cu | /*
* Copyright 2012 Ben Barsdell
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
This file contains the boring boiler-plate code to manage the library.
TODO: Test on 32-bit integer input
Consider accepting 32-bit floats instead of 32-bit ints
*/
//#define DEDISP_DEBUG
//#define DEDISP_BENCHMARK
#include <dedisp.h>
#include <vector>
#include <algorithm> // For std::fill
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
// For copying and scrunching the DM list
#include <thrust/transform.h>
#include <thrust/iterator/constant_iterator.h>
#ifdef DEDISP_BENCHMARK
#include <fstream>
#endif
#if defined(DEDISP_DEBUG) && DEDISP_DEBUG
#include <stdio.h> // For printf
#endif
// TODO: Remove these when done benchmarking
// -----------------------------------------
#if defined(DEDISP_BENCHMARK)
#include <iostream>
using std::cout;
using std::endl;
#include "stopwatch.hpp"
#endif
// -----------------------------------------
#include "gpu_memory.hpp"
#include "transpose.hpp"
#define DEDISP_DEFAULT_GULP_SIZE 65536 //131072
// Note: The implementation of the sub-band algorithm is a prototype only
// Enable at your own risk! It may not be in a working state at all.
//#define USE_SUBBAND_ALGORITHM
#define DEDISP_DEFAULT_SUBBAND_SIZE 32
// TODO: Make sure this doesn't limit GPU constant memory
// available to users.
#define DEDISP_MAX_NCHANS 8192
// Internal word type used for transpose and dedispersion kernel
typedef unsigned int dedisp_word;
// Note: This must be included after the above #define and typedef
#include "kernels.cuh"
// Define plan structure
struct dedisp_plan_struct {
// Size parameters
dedisp_size dm_count;
dedisp_size nchans;
dedisp_size max_delay;
dedisp_size gulp_size;
// Physical parameters
dedisp_float dt;
dedisp_float f0;
dedisp_float df;
// Host arrays
std::vector<dedisp_float> dm_list; // size = dm_count
std::vector<dedisp_float> delay_table; // size = nchans
std::vector<dedisp_bool> killmask; // size = nchans
std::vector<dedisp_size> scrunch_list; // size = dm_count
// Device arrays
thrust::device_vector<dedisp_float> d_dm_list;
thrust::device_vector<dedisp_float> d_delay_table;
thrust::device_vector<dedisp_bool> d_killmask;
thrust::device_vector<dedisp_size> d_scrunch_list;
//StreamType stream;
// Scrunching parameters
dedisp_bool scrunching_enabled;
dedisp_float pulse_width;
dedisp_float scrunch_tol;
};
// Private helper functions
// ------------------------
template<typename T>
T min(T a, T b) { return a<b ? a : b; }
unsigned long div_round_up(unsigned long a, unsigned long b) {
return (a-1) / b + 1;
}
// Internal abstraction for errors
#if defined(DEDISP_DEBUG) && DEDISP_DEBUG
#define throw_error(error) do { \
printf("An error occurred within dedisp on line %d of %s: %s", \
__LINE__, __FILE__, dedisp_get_error_string(error)); \
return (error); } while(0)
#define throw_getter_error(error, retval) do { \
printf("An error occurred within dedisp on line %d of %s: %s", \
__LINE__, __FILE__, dedisp_get_error_string(error)); \
return (retval); } while(0)
#else
#define throw_error(error) return error
#define throw_getter_error(error, retval) return retval
#endif // DEDISP_DEBUG
/*
dedisp_error throw_error(dedisp_error error) {
// Note: Could, e.g., put an error callback in here
return error;
}
*/
dedisp_error update_scrunch_list(dedisp_plan plan) {
if( cudaGetLastError() != cudaSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
if( !plan->scrunching_enabled || 0 == plan->dm_count ) {
plan->scrunch_list.resize(0);
// Fill with 1's by default for safety
plan->scrunch_list.resize(plan->dm_count, dedisp_size(1));
return DEDISP_NO_ERROR;
}
plan->scrunch_list.resize(plan->dm_count);
dedisp_error error = generate_scrunch_list(&plan->scrunch_list[0],
plan->dm_count,
plan->dt,
&plan->dm_list[0],
plan->nchans,
plan->f0,
plan->df,
plan->pulse_width,
plan->scrunch_tol);
if( error != DEDISP_NO_ERROR ) {
return error;
}
// Allocate on and copy to the device
try {
plan->d_scrunch_list.resize(plan->dm_count);
}
catch(...) {
throw_error(DEDISP_MEM_ALLOC_FAILED);
}
try {
plan->d_scrunch_list = plan->scrunch_list;
}
catch(...) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
return DEDISP_NO_ERROR;
}
// ------------------------
// Public functions
// ----------------
dedisp_error dedisp_create_plan(dedisp_plan* plan_,
dedisp_size nchans,
dedisp_float dt,
dedisp_float f0,
dedisp_float df)
{
// Initialise to NULL for safety
*plan_ = 0;
if( cudaGetLastError() != cudaSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
int device_idx;
cudaGetDevice(&device_idx);
// Check for parameter errors
if( nchans > DEDISP_MAX_NCHANS ) {
throw_error(DEDISP_NCHANS_EXCEEDS_LIMIT);
}
// Force the df parameter to be negative such that
// freq[chan] = f0 + chan * df.
df = -abs(df);
dedisp_plan plan = new dedisp_plan_struct();
if( !plan ) {
throw_error(DEDISP_MEM_ALLOC_FAILED);
}
plan->dm_count = 0;
plan->nchans = nchans;
plan->gulp_size = DEDISP_DEFAULT_GULP_SIZE;
plan->max_delay = 0;
plan->dt = dt;
plan->f0 = f0;
plan->df = df;
//plan->stream = 0;
// Generate delay table and copy to device memory
// Note: The DM factor is left out and applied during dedispersion
plan->delay_table.resize(plan->nchans);
generate_delay_table(&plan->delay_table[0], plan->nchans, dt, f0, df);
try {
plan->d_delay_table.resize(plan->nchans);
}
catch(...) {
dedisp_destroy_plan(plan);
throw_error(DEDISP_MEM_ALLOC_FAILED);
}
try {
plan->d_delay_table = plan->delay_table;
}
catch(...) {
dedisp_destroy_plan(plan);
throw_error(DEDISP_MEM_COPY_FAILED);
}
// Initialise the killmask
plan->killmask.resize(plan->nchans, (dedisp_bool)true);
try {
plan->d_killmask.resize(plan->nchans);
}
catch(...) {
dedisp_destroy_plan(plan);
throw_error(DEDISP_MEM_ALLOC_FAILED);
}
dedisp_error err = dedisp_set_killmask(plan, (dedisp_bool*)0);
if( err != DEDISP_NO_ERROR ) {
dedisp_destroy_plan(plan);
throw_error(err);
}
*plan_ = plan;
return DEDISP_NO_ERROR;
}
dedisp_error dedisp_set_gulp_size(dedisp_plan plan,
dedisp_size gulp_size) {
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
plan->gulp_size = gulp_size;
return DEDISP_NO_ERROR;
}
dedisp_size dedisp_get_gulp_size(dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->gulp_size;
}
dedisp_error dedisp_set_dm_list(dedisp_plan plan,
const dedisp_float* dm_list,
dedisp_size count)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
if( !dm_list ) {
throw_error(DEDISP_INVALID_POINTER);
}
if( cudaGetLastError() != cudaSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
plan->dm_count = count;
plan->dm_list.assign(dm_list, dm_list+count);
// Copy to the device
try {
plan->d_dm_list.resize(plan->dm_count);
}
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
try {
plan->d_dm_list = plan->dm_list;
}
catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); }
// Calculate the maximum delay and store it in the plan
plan->max_delay = dedisp_size(plan->dm_list[plan->dm_count-1] *
plan->delay_table[plan->nchans-1] + 0.5);
dedisp_error error = update_scrunch_list(plan);
if( error != DEDISP_NO_ERROR ) {
throw_error(error);
}
return DEDISP_NO_ERROR;
}
dedisp_error dedisp_generate_dm_list(dedisp_plan plan,
dedisp_float dm_start, dedisp_float dm_end,
dedisp_float ti, dedisp_float tol)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
if( cudaGetLastError() != cudaSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
// Generate the DM list (on the host)
plan->dm_list.clear();
generate_dm_list(plan->dm_list,
dm_start, dm_end,
plan->dt, ti, plan->f0, plan->df,
plan->nchans, tol);
plan->dm_count = plan->dm_list.size();
// Allocate device memory for the DM list
try {
plan->d_dm_list.resize(plan->dm_count);
}
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
try {
plan->d_dm_list = plan->dm_list;
}
catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); }
// Calculate the maximum delay and store it in the plan
plan->max_delay = dedisp_size(plan->dm_list[plan->dm_count-1] *
plan->delay_table[plan->nchans-1] + 0.5);
dedisp_error error = update_scrunch_list(plan);
if( error != DEDISP_NO_ERROR ) {
throw_error(error);
}
return DEDISP_NO_ERROR;
}
dedisp_float * dedisp_generate_dm_list_guru (dedisp_float dm_start, dedisp_float dm_end,
double dt, double ti, double f0, double df,
dedisp_size nchans, double tol, dedisp_size * dm_count)
{
std::vector<dedisp_float> dm_table;
generate_dm_list(dm_table,
dm_start, dm_end,
dt, ti, f0, df,
nchans, tol);
*dm_count = dm_table.size();
return &dm_table[0];
}
dedisp_error dedisp_set_device(int device_idx) {
if( cudaGetLastError() != cudaSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
cudaError_t error = cudaSetDevice(device_idx);
// Note: cudaErrorInvalidValue isn't a documented return value, but
// it still gets returned :/
if( cudaErrorInvalidDevice == error ||
cudaErrorInvalidValue == error )
throw_error(DEDISP_INVALID_DEVICE_INDEX);
else if( cudaErrorSetOnActiveProcess == error )
throw_error(DEDISP_DEVICE_ALREADY_SET);
else if( cudaSuccess != error )
throw_error(DEDISP_UNKNOWN_ERROR);
else
return DEDISP_NO_ERROR;
}
dedisp_error dedisp_set_killmask(dedisp_plan plan, const dedisp_bool* killmask)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
if( cudaGetLastError() != cudaSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
if( 0 != killmask ) {
// Copy killmask to plan (both host and device)
plan->killmask.assign(killmask, killmask + plan->nchans);
try {
plan->d_killmask = plan->killmask;
}
catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); }
}
else {
// Set the killmask to all true
std::fill(plan->killmask.begin(), plan->killmask.end(), (dedisp_bool)true);
thrust::fill(plan->d_killmask.begin(), plan->d_killmask.end(),
(dedisp_bool)true);
}
return DEDISP_NO_ERROR;
}
/*
dedisp_plan dedisp_set_stream(dedisp_plan plan, StreamType stream)
{
plan->stream = stream;
return plan;
}
*/
// Getters
// -------
dedisp_size dedisp_get_max_delay(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); }
return plan->max_delay;
}
dedisp_size dedisp_get_dm_delay(const dedisp_plan plan, int dm_trial) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); }
if (dm_trial < 0 || dm_trial >= int(plan->dm_count) ) { throw_getter_error(DEDISP_UNKNOWN_ERROR,0); }
return (plan->dm_list[dm_trial] * plan->delay_table[plan->nchans-1] + 0.5);
}
dedisp_size dedisp_get_channel_count(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->nchans;
}
dedisp_size dedisp_get_dm_count(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->dm_count;
}
const dedisp_float* dedisp_get_dm_list(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); }
return &plan->dm_list[0];
}
const dedisp_bool* dedisp_get_killmask(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return &plan->killmask[0];
}
dedisp_float dedisp_get_dt(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->dt;
}
dedisp_float dedisp_get_f0(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->f0;
}
dedisp_float dedisp_get_df(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
return plan->df;
}
// Warning: Big mother function
dedisp_error dedisp_execute_guru(const dedisp_plan plan,
dedisp_size nsamps,
const dedisp_byte* in,
dedisp_size in_nbits,
dedisp_size in_stride,
dedisp_byte* out,
dedisp_size out_nbits,
dedisp_size out_stride,
dedisp_size first_dm_idx,
dedisp_size dm_count,
unsigned flags)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
if( cudaGetLastError() != cudaSuccess ) {
throw_error(DEDISP_PRIOR_GPU_ERROR);
}
enum {
BITS_PER_BYTE = 8,
BYTES_PER_WORD = sizeof(dedisp_word) / sizeof(dedisp_byte)
};
dedisp_size out_bytes_per_sample = out_nbits / (sizeof(dedisp_byte) *
BITS_PER_BYTE);
if( 0 == in || 0 == out ) {
throw_error(DEDISP_INVALID_POINTER);
}
// Note: Must be careful with integer division
if( in_stride < plan->nchans*in_nbits/(sizeof(dedisp_byte)*BITS_PER_BYTE) ||
out_stride < (nsamps - plan->max_delay)*out_bytes_per_sample ) {
throw_error(DEDISP_INVALID_STRIDE);
}
if( 0 == plan->dm_count ) {
throw_error(DEDISP_NO_DM_LIST_SET);
}
if( nsamps < plan->max_delay ) {
throw_error(DEDISP_TOO_FEW_NSAMPS);
}
// Check for valid synchronisation flags
if( flags & DEDISP_ASYNC && flags & DEDISP_WAIT ) {
throw_error(DEDISP_INVALID_FLAG_COMBINATION);
}
// Check for valid nbits values
if( in_nbits != 1 &&
in_nbits != 2 &&
in_nbits != 4 &&
in_nbits != 8 &&
in_nbits != 16 &&
in_nbits != 32 ) {
throw_error(DEDISP_UNSUPPORTED_IN_NBITS);
}
if( out_nbits != 8 &&
out_nbits != 16 &&
out_nbits != 32 ) {
throw_error(DEDISP_UNSUPPORTED_OUT_NBITS);
}
bool using_host_memory;
if( flags & DEDISP_HOST_POINTERS && flags & DEDISP_DEVICE_POINTERS ) {
throw_error(DEDISP_INVALID_FLAG_COMBINATION);
}
else {
using_host_memory = !(flags & DEDISP_DEVICE_POINTERS);
}
// Copy the lookup tables to constant memory on the device
// TODO: This was much tidier, but thanks to CUDA's insistence on
// breaking its API in v5.0 I had to mess it up like this.
cudaMemcpyToSymbolAsync(c_delay_table,
thrust::raw_pointer_cast(&plan->d_delay_table[0]),
plan->nchans * sizeof(dedisp_float),
0, cudaMemcpyDeviceToDevice, 0);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess ) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
cudaMemcpyToSymbolAsync(c_killmask,
thrust::raw_pointer_cast(&plan->d_killmask[0]),
plan->nchans * sizeof(dedisp_bool),
0, cudaMemcpyDeviceToDevice, 0);
cudaDeviceSynchronize();
error = cudaGetLastError();
if( error != cudaSuccess ) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
// Compute the problem decomposition
dedisp_size nsamps_computed = nsamps - plan->max_delay;
// Specify the maximum gulp size
dedisp_size nsamps_computed_gulp_max;
if( using_host_memory ) {
nsamps_computed_gulp_max = min(plan->gulp_size, nsamps_computed);
}
else {
// Just do it in one gulp if given device pointers
nsamps_computed_gulp_max = nsamps_computed;
}
// Just to be sure
// TODO: This seems quite wrong. Why was it here?
/*
if( nsamps_computed_gulp_max < plan->max_delay ) {
throw_error(DEDISP_TOO_FEW_NSAMPS);
}
*/
// Compute derived counts for maximum gulp size [dedisp_word == 4 bytes]
dedisp_size nsamps_gulp_max = nsamps_computed_gulp_max + plan->max_delay;
dedisp_size chans_per_word = sizeof(dedisp_word)*BITS_PER_BYTE / in_nbits;
dedisp_size nchan_words = plan->nchans / chans_per_word;
// We use words for processing but allow arbitrary byte strides, which are
// not necessarily friendly.
bool friendly_in_stride = (0 == in_stride % BYTES_PER_WORD);
// Note: If desired, this could be rounded up, e.g., to a power of 2
dedisp_size in_buf_stride_words = nchan_words;
dedisp_size in_count_gulp_max = nsamps_gulp_max * in_buf_stride_words;
dedisp_size nsamps_padded_gulp_max = div_round_up(nsamps_computed_gulp_max,
DEDISP_SAMPS_PER_THREAD)
* DEDISP_SAMPS_PER_THREAD + plan->max_delay;
dedisp_size in_count_padded_gulp_max =
nsamps_padded_gulp_max * in_buf_stride_words;
// TODO: Make this a parameter?
dedisp_size min_in_nbits = 0;
if( plan->scrunching_enabled ) {
// TODO: This produces corrupt output when equal to 32 !
// Also check whether the unpacker is broken when in_nbits=32 !
min_in_nbits = 16; //32;
}
dedisp_size unpacked_in_nbits = max((int)in_nbits, (int)min_in_nbits);
dedisp_size unpacked_chans_per_word =
sizeof(dedisp_word)*BITS_PER_BYTE / unpacked_in_nbits;
dedisp_size unpacked_nchan_words = plan->nchans / unpacked_chans_per_word;
dedisp_size unpacked_buf_stride_words = unpacked_nchan_words;
dedisp_size unpacked_count_padded_gulp_max =
nsamps_padded_gulp_max * unpacked_buf_stride_words;
dedisp_size out_stride_gulp_samples = nsamps_computed_gulp_max;
dedisp_size out_stride_gulp_bytes =
out_stride_gulp_samples * out_bytes_per_sample;
dedisp_size out_count_gulp_max = out_stride_gulp_bytes * dm_count;
// Organise device memory pointers
// -------------------------------
const dedisp_word* d_in = 0;
dedisp_word* d_transposed = 0;
dedisp_word* d_unpacked = 0;
dedisp_byte* d_out = 0;
thrust::device_vector<dedisp_word> d_in_buf;
thrust::device_vector<dedisp_word> d_transposed_buf;
thrust::device_vector<dedisp_word> d_unpacked_buf;
thrust::device_vector<dedisp_byte> d_out_buf;
// Allocate temporary buffers on the device where necessary
if( using_host_memory || !friendly_in_stride ) {
try { d_in_buf.resize(in_count_gulp_max); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
d_in = thrust::raw_pointer_cast(&d_in_buf[0]);
}
else {
d_in = (dedisp_word*)in;
}
if( using_host_memory ) {
try { d_out_buf.resize(out_count_gulp_max); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
d_out = thrust::raw_pointer_cast(&d_out_buf[0]);
}
else {
d_out = out;
}
//// Note: * 2 here is for the time-scrunched copies of the data
try { d_transposed_buf.resize(in_count_padded_gulp_max/* * 2 */); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
d_transposed = thrust::raw_pointer_cast(&d_transposed_buf[0]);
// Note: * 2 here is for the time-scrunched copies of the data
try { d_unpacked_buf.resize(unpacked_count_padded_gulp_max * 2); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
d_unpacked = thrust::raw_pointer_cast(&d_unpacked_buf[0]);
// -------------------------------
// The stride (in words) between differently-scrunched copies of the
// unpacked data.
dedisp_size scrunch_stride = unpacked_count_padded_gulp_max;
#ifdef USE_SUBBAND_ALGORITHM
dedisp_size sb_size = DEDISP_DEFAULT_SUBBAND_SIZE;
// Note: Setting these two parameters equal should balance the two steps of
// the sub-band algorithm.
dedisp_size dm_size = sb_size; // Ndm'
dedisp_size sb_count = plan->nchans / sb_size;
dedisp_size nom_dm_count = dm_count / dm_size;
thrust::device_vector<dedisp_word> d_intermediate_buf;
try { d_intermediate_buf.resize(nsamps_padded_gulp_max * sb_count
* nom_dm_count); }
catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); }
dedisp_word* d_intermediate = thrust::raw_pointer_cast(&d_intermediate_buf[0]);
#endif // USE_SUBBAND_ALGORITHM
// TODO: Eventually re-implement streams
cudaStream_t stream = 0;//(cudaStream_t)plan->stream;
#ifdef DEDISP_BENCHMARK
Stopwatch copy_to_timer;
Stopwatch copy_from_timer;
Stopwatch transpose_timer;
Stopwatch kernel_timer;
#endif
// Gulp loop
for( dedisp_size gulp_samp_idx=0;
gulp_samp_idx<nsamps_computed;
gulp_samp_idx+=nsamps_computed_gulp_max ) {
dedisp_size nsamps_computed_gulp = min(nsamps_computed_gulp_max,
nsamps_computed-gulp_samp_idx);
dedisp_size nsamps_gulp = nsamps_computed_gulp + plan->max_delay;
dedisp_size nsamps_padded_gulp = div_round_up(nsamps_computed_gulp,
DEDISP_SAMPS_PER_THREAD)
* DEDISP_SAMPS_PER_THREAD + plan->max_delay;
#ifdef DEDISP_BENCHMARK
copy_to_timer.start();
#endif
// Copy the input data from host to device if necessary
if( using_host_memory ) {
// Allowing arbitrary byte strides means we must do a strided copy
if( !copy_host_to_device_2d((dedisp_byte*)d_in,
in_buf_stride_words * BYTES_PER_WORD,
in + gulp_samp_idx*in_stride,
in_stride,
nchan_words * BYTES_PER_WORD,
nsamps_gulp) ) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
}
else if( !friendly_in_stride ) {
// Device pointers with unfriendly stride
if( !copy_device_to_device_2d((dedisp_byte*)d_in,
in_buf_stride_words * BYTES_PER_WORD,
in + gulp_samp_idx*in_stride,
in_stride,
nchan_words * BYTES_PER_WORD,
nsamps_gulp) ) {
throw_error(DEDISP_MEM_COPY_FAILED);
}
}
#ifdef DEDISP_BENCHMARK
cudaDeviceSynchronize();
copy_to_timer.stop();
transpose_timer.start();
#endif
// Transpose the words in the input
Transpose<dedisp_word> transpose;
transpose.transpose(d_in,
nchan_words, nsamps_gulp,
in_buf_stride_words, nsamps_padded_gulp,
d_transposed);
#ifdef DEDISP_BENCHMARK
cudaDeviceSynchronize();
transpose_timer.stop();
kernel_timer.start();
#endif
// Unpack the transposed data
unpack(d_transposed, nsamps_padded_gulp, nchan_words,
d_unpacked,
in_nbits, unpacked_in_nbits);
// Compute time-scrunched copies of the data
if( plan->scrunching_enabled ) {
dedisp_size max_scrunch = plan->scrunch_list[plan->dm_count-1];
dedisp_size scrunch_in_offset = 0;
dedisp_size scrunch_out_offset = scrunch_stride;
for( dedisp_size s=2; s<=max_scrunch; s*=2 ) {
// TODO: Need to pass in stride and count? I.e., nsamps_padded/computed_gulp
//scrunch_x2(&d_transposed[scrunch_in_offset],
// nsamps_padded_gulp/(s/2), nchan_words, in_nbits,
// &d_transposed[scrunch_out_offset]);
scrunch_x2(&d_unpacked[scrunch_in_offset],
nsamps_padded_gulp/(s/2),
unpacked_nchan_words, unpacked_in_nbits,
&d_unpacked[scrunch_out_offset]);
scrunch_in_offset = scrunch_out_offset;
scrunch_out_offset += scrunch_stride / s;
}
}
#ifdef USE_SUBBAND_ALGORITHM
// TODO: This has not been updated to use d_unpacked!
dedisp_size chan_stride = 1;
dedisp_size dm_stride = dm_size;
dedisp_size ostride = nsamps_padded_gulp * sb_count;
dedisp_size batch_size = sb_count;
dedisp_size batch_in_stride = nsamps_padded_gulp * sb_size / chans_per_word;
dedisp_size batch_dm_stride = 0;
dedisp_size batch_chan_stride = sb_size;
dedisp_size batch_out_stride = nsamps_padded_gulp;
/* // Consistency checks
if( (nom_dm_count-1)*dm_stride + (batch_size-1)*batch_dm_stride >= dm_count ) {
throw std::runtime_error("DM STRIDES ARE INCONSISTENT");
}
if( (sb_size-1)*chan_stride + (batch_size-1)*batch_chan_stride >= plan->nchans ) {
throw std::runtime_error("CHAN STRIDES ARE INCONSISTENT");
}
*/
// Both steps
if( !dedisperse(d_transposed,
nsamps_padded_gulp,
nsamps_computed_gulp,
in_nbits,
sb_size,
chan_stride,
thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]),
nom_dm_count,
dm_stride,
(dedisp_byte*)d_intermediate,
ostride,
32,//out_nbits,
batch_size,
batch_in_stride,
batch_dm_stride,
batch_chan_stride,
batch_out_stride) ) {
throw_error(DEDISP_INTERNAL_GPU_ERROR);
}
batch_size = nom_dm_count;
chan_stride = sb_size;
dm_stride = 1;
ostride = out_stride_gulp_samples;
batch_in_stride = nsamps_padded_gulp * sb_count;
batch_dm_stride = 0;
batch_chan_stride = 0;
batch_out_stride = out_stride_gulp_samples * dm_size;
/* // Consistency checks
if( (dm_size-1)*dm_stride + (batch_size-1)*batch_dm_stride >= dm_count ) {
throw std::runtime_error("DM STRIDES ARE INCONSISTENT");
}
if( (sb_count-1)*chan_stride + (batch_size-1)*batch_chan_stride >= plan->nchans ) {
throw std::runtime_error("CHAN STRIDES ARE INCONSISTENT");
}
*/
if( !dedisperse(d_intermediate,
nsamps_padded_gulp,
nsamps_computed_gulp,
32,//in_nbits,
sb_count,
chan_stride,
thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]),
dm_size,
dm_stride,
d_out,
ostride,
out_nbits,
batch_size,
batch_in_stride,
batch_dm_stride,
batch_chan_stride,
batch_out_stride) ) {
throw_error(DEDISP_INTERNAL_GPU_ERROR);
}
#else // Use direct algorithm
if( plan->scrunching_enabled ) {
// TODO: THIS WILL NOT WORK IF dm_count < plan->dm_count !
// Need to avoid assumption that scrunch starts at 1
// Must start the scrunch at the first *requested* DM
thrust::device_vector<dedisp_float> d_scrunched_dm_list(dm_count);
dedisp_size scrunch_start = 0;
dedisp_size scrunch_offset = 0;
for( dedisp_size s=0; s<dm_count; ++s ) {
dedisp_size cur_scrunch = plan->scrunch_list[s];
// Look for segment boundaries
if( s+1 == dm_count || plan->scrunch_list[s+1] != cur_scrunch ) {
//dedisp_size next_scrunch = plan->scrunch_list[s];
//if( next_scrunch != cur_scrunch ) {
dedisp_size scrunch_count = s+1 - scrunch_start;
// Make a copy of the dm list divided by the scrunch factor
// Note: This has the effect of increasing dt in the delay eqn
dedisp_size dm_offset = first_dm_idx + scrunch_start;
thrust::transform(plan->d_dm_list.begin() + dm_offset,
plan->d_dm_list.begin() + dm_offset + scrunch_count,
thrust::make_constant_iterator(cur_scrunch),
d_scrunched_dm_list.begin(),
thrust::divides<dedisp_float>());
dedisp_float* d_scrunched_dm_list_ptr =
thrust::raw_pointer_cast(&d_scrunched_dm_list[0]);
// TODO: Is this how the nsamps vars need to change?
if( !dedisperse(//&d_transposed[scrunch_offset],
&d_unpacked[scrunch_offset],
nsamps_padded_gulp / cur_scrunch,
nsamps_computed_gulp / cur_scrunch,
unpacked_in_nbits, //in_nbits,
plan->nchans,
1,
d_scrunched_dm_list_ptr,
scrunch_count, // dm_count
1,
d_out + scrunch_start*out_stride_gulp_bytes,
out_stride_gulp_samples,
out_nbits,
1, 0, 0, 0, 0) ) {
throw_error(DEDISP_INTERNAL_GPU_ERROR);
}
scrunch_offset += scrunch_stride / cur_scrunch;
scrunch_start += scrunch_count;
}
}
}
else {
// Perform direct dedispersion without scrunching
if( !dedisperse(//d_transposed,
d_unpacked,
nsamps_padded_gulp,
nsamps_computed_gulp,
unpacked_in_nbits, //in_nbits,
plan->nchans,
1,
thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]),
dm_count,
1,
d_out,
out_stride_gulp_samples,
out_nbits,
1, 0, 0, 0, 0) ) {
throw_error(DEDISP_INTERNAL_GPU_ERROR);
}
}
#endif // SB/direct algorithm
#ifdef DEDISP_BENCHMARK
cudaDeviceSynchronize();
kernel_timer.stop();
#endif
// Copy output back to host memory if necessary
if( using_host_memory ) {
dedisp_size gulp_samp_byte_idx = gulp_samp_idx * out_bytes_per_sample;
dedisp_size nsamp_bytes_computed_gulp = nsamps_computed_gulp * out_bytes_per_sample;
#ifdef DEDISP_BENCHMARK
copy_from_timer.start();
#endif
if( plan->scrunching_enabled ) {
// TODO: This for-loop isn't a very elegant solution
dedisp_size scrunch_start = 0;
for( dedisp_size s=0; s<dm_count; ++s ) {
dedisp_size cur_scrunch = plan->scrunch_list[s];
// Look for segment boundaries
if( s+1 == dm_count || plan->scrunch_list[s+1] != cur_scrunch ) {
dedisp_size scrunch_count = s+1 - scrunch_start;
dedisp_size src_stride = out_stride_gulp_bytes;
dedisp_byte* src = d_out + scrunch_start * src_stride;
dedisp_byte* dst = (out + scrunch_start * out_stride
+ gulp_samp_byte_idx / cur_scrunch);
dedisp_size width = nsamp_bytes_computed_gulp / cur_scrunch;
dedisp_size height = scrunch_count;
copy_device_to_host_2d(dst, // dst
out_stride, // dst stride
src, // src
src_stride, // src stride
width, // width bytes
height); // height
scrunch_start += scrunch_count;
}
}
}
else {
copy_device_to_host_2d(out + gulp_samp_byte_idx, // dst
out_stride, // dst stride
d_out, // src
out_stride_gulp_bytes, // src stride
nsamp_bytes_computed_gulp, // width bytes
dm_count); // height
}
#ifdef DEDISP_BENCHMARK
cudaDeviceSynchronize();
copy_from_timer.stop();
#endif
}
} // End of gulp loop
#ifdef DEDISP_BENCHMARK
cout << "Copy to time: " << copy_to_timer.getTime() << endl;
cout << "Copy from time: " << copy_from_timer.getTime() << endl;
cout << "Transpose time: " << transpose_timer.getTime() << endl;
cout << "Kernel time: " << kernel_timer.getTime() << endl;
float total_time = copy_to_timer.getTime() + copy_from_timer.getTime() + transpose_timer.getTime() + kernel_timer.getTime();
cout << "Total time: " << total_time << endl;
// Append the timing results to a log file
std::ofstream perf_file("perf.log", std::ios::app);
perf_file << copy_to_timer.getTime() << "\t"
<< copy_from_timer.getTime() << "\t"
<< transpose_timer.getTime() << "\t"
<< kernel_timer.getTime() << "\t"
<< total_time << endl;
perf_file.close();
#endif
if( !(flags & DEDISP_ASYNC) ) {
cudaStreamSynchronize(stream);
}
// Phew!
return DEDISP_NO_ERROR;
}
dedisp_error dedisp_execute_adv(const dedisp_plan plan,
dedisp_size nsamps,
const dedisp_byte* in,
dedisp_size in_nbits,
dedisp_size in_stride,
dedisp_byte* out,
dedisp_size out_nbits,
dedisp_size out_stride,
unsigned flags)
{
dedisp_size first_dm_idx = 0;
dedisp_size dm_count = plan->dm_count;
return dedisp_execute_guru(plan, nsamps,
in, in_nbits, in_stride,
out, out_nbits, out_stride,
first_dm_idx, dm_count,
flags);
}
// TODO: Consider having the user specify nsamps_computed instead of nsamps
dedisp_error dedisp_execute(const dedisp_plan plan,
dedisp_size nsamps,
const dedisp_byte* in,
dedisp_size in_nbits,
dedisp_byte* out,
dedisp_size out_nbits,
unsigned flags)
{
enum {
BITS_PER_BYTE = 8
};
// Note: The default out_stride is nsamps - plan->max_delay
dedisp_size out_bytes_per_sample =
out_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE);
// Note: Must be careful with integer division
dedisp_size in_stride =
plan->nchans * in_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE);
dedisp_size out_stride = (nsamps - plan->max_delay) * out_bytes_per_sample;
return dedisp_execute_adv(plan, nsamps,
in, in_nbits, in_stride,
out, out_nbits, out_stride,
flags);
}
dedisp_error dedisp_sync(void)
{
if( cudaDeviceSynchronize() != cudaSuccess )
throw_error(DEDISP_PRIOR_GPU_ERROR);
else
return DEDISP_NO_ERROR;
}
void dedisp_destroy_plan(dedisp_plan plan)
{
if( plan ) {
delete plan;
}
}
const char* dedisp_get_error_string(dedisp_error error)
{
switch( error ) {
case DEDISP_NO_ERROR:
return "No error";
case DEDISP_MEM_ALLOC_FAILED:
return "Memory allocation failed";
case DEDISP_MEM_COPY_FAILED:
return "Memory copy failed";
case DEDISP_INVALID_DEVICE_INDEX:
return "Invalid device index";
case DEDISP_DEVICE_ALREADY_SET:
return "Device is already set and cannot be changed";
case DEDISP_NCHANS_EXCEEDS_LIMIT:
return "No. channels exceeds internal limit";
case DEDISP_INVALID_PLAN:
return "Invalid plan";
case DEDISP_INVALID_POINTER:
return "Invalid pointer";
case DEDISP_INVALID_STRIDE:
return "Invalid stride";
case DEDISP_NO_DM_LIST_SET:
return "No DM list has been set";
case DEDISP_TOO_FEW_NSAMPS:
return "No. samples < maximum delay";
case DEDISP_INVALID_FLAG_COMBINATION:
return "Invalid flag combination";
case DEDISP_UNSUPPORTED_IN_NBITS:
return "Unsupported in_nbits value";
case DEDISP_UNSUPPORTED_OUT_NBITS:
return "Unsupported out_nbits value";
case DEDISP_PRIOR_GPU_ERROR:
return "Prior GPU error.";
case DEDISP_INTERNAL_GPU_ERROR:
return "Internal GPU error. Please contact the author(s).";
case DEDISP_UNKNOWN_ERROR:
return "Unknown error. Please contact the author(s).";
default:
return "Invalid error code";
}
}
dedisp_error dedisp_enable_adaptive_dt(dedisp_plan plan,
dedisp_float pulse_width,
dedisp_float tol)
{
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
plan->scrunching_enabled = true;
plan->pulse_width = pulse_width;
plan->scrunch_tol = tol;
return update_scrunch_list(plan);
}
dedisp_error dedisp_disable_adaptive_dt(dedisp_plan plan) {
if( !plan ) { throw_error(DEDISP_INVALID_PLAN); }
plan->scrunching_enabled = false;
return update_scrunch_list(plan);
}
dedisp_bool dedisp_using_adaptive_dt(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,false); }
return plan->scrunching_enabled;
}
const dedisp_size* dedisp_get_dt_factors(const dedisp_plan plan) {
if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); }
if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); }
return &plan->scrunch_list[0];
}
// ----------------
|
0851549d8e22770245c552ab370698672327feac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "2dfft.hpp"
using namespace std;
/* Calcuate FFT with cuFTT */
float fft_cuda(double** idata, double** odata, int Nx, int Ny) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/* Allocate memory for data on device, then copy data */
double *idata_c, *odata_c;
hipMalloc(&odata_c, sizeof(double) * Nx * Ny);
hipMalloc(&idata_c, sizeof(double) * Nx * Ny);
hipfftDoubleComplex *idata_cx, *odata_cx;
hipMalloc(&idata_cx, sizeof(hipfftDoubleComplex) * Nx * Ny);
hipMalloc(&odata_cx, sizeof(hipfftDoubleComplex) * Nx * Ny);
for (int i=0; i<Nx; i++) {
hipMemcpy(&idata_c[i*Ny], &idata[i][0], sizeof(double)*Ny, hipMemcpyHostToDevice);
}
/* Convert data into hipfftDoubleComplex */
/* set 1 block with 256 threads */
hipLaunchKernelGGL(( real2complex), dim3(1), dim3(8), 0, 0, idata_c, idata_cx, Nx*Ny);
hipDeviceSynchronize();
/* FFT Plans */
hipfftHandle plan;
hipfftPlan2d(&plan, Nx, Ny, HIPFFT_Z2Z);
// for (int i=0; i<Nx; i++) {
// for (int j=0; j<Ny; j++) {
// printf("%d", i*Nx+j);
// printf("%f", idata_c[i*Nx+j]);
// }
// printf("\n");
// }
// auto start = chrono::high_resolution_clock::now();
/* Forward FFT */
hipEventRecord(start);
hipfftExecZ2Z(plan, idata_cx, odata_cx, HIPFFT_FORWARD);
hipEventRecord(stop);
hipEventSynchronize(stop);
/* stop the time */
/* std::chrono::_V2::system_clock::time_point finish */
float duration = 0; // milliseconds
hipEventElapsedTime(&duration, start, stop);
/* Convert cufft back to double array */
/* set 1 block with 8 threads */
hipLaunchKernelGGL(( complex2real), dim3(1), dim3(8), 0, 0, odata_cx, odata_c, Nx*Ny);
hipDeviceSynchronize();
for (int i=0; i<Nx; i++) {
hipMemcpy(&odata[i][0], &odata_c[i*Ny], sizeof(double)*Ny, hipMemcpyDeviceToHost);
}
hipfftDestroy(plan);
hipFree(idata_c);
hipFree(odata_c);
hipFree(idata_cx);
hipFree(odata_cx);
return duration;
}
/* convert a double array to cuffComplex data type. Imaginary parts are
* set to 0
*/
__global__ void real2complex(double *f, hipfftDoubleComplex *fc, int N) {
/* Assume 1D grid of 1D blocks */
int index = blockIdx.x *blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (index < N) {
fc[index].x = f[index];
fc[index].y = 0;
index += stride;
}
}
/* convert a cuffComplex data type to a double array.
*/
__global__ void complex2real(hipfftDoubleComplex *fc, double *f, int N) {
/* Assume 1D grid of 1D blocks */
int index = blockIdx.x *blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (index < N) {
f[index] = fc[index].x;
index += stride;
}
} | 0851549d8e22770245c552ab370698672327feac.cu | #include "2dfft.hpp"
using namespace std;
/* Calcuate FFT with cuFTT */
float fft_cuda(double** idata, double** odata, int Nx, int Ny) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* Allocate memory for data on device, then copy data */
double *idata_c, *odata_c;
cudaMalloc(&odata_c, sizeof(double) * Nx * Ny);
cudaMalloc(&idata_c, sizeof(double) * Nx * Ny);
cufftDoubleComplex *idata_cx, *odata_cx;
cudaMalloc(&idata_cx, sizeof(cufftDoubleComplex) * Nx * Ny);
cudaMalloc(&odata_cx, sizeof(cufftDoubleComplex) * Nx * Ny);
for (int i=0; i<Nx; i++) {
cudaMemcpy(&idata_c[i*Ny], &idata[i][0], sizeof(double)*Ny, cudaMemcpyHostToDevice);
}
/* Convert data into cufftDoubleComplex */
/* set 1 block with 256 threads */
real2complex<<<1, 8>>>(idata_c, idata_cx, Nx*Ny);
cudaDeviceSynchronize();
/* FFT Plans */
cufftHandle plan;
cufftPlan2d(&plan, Nx, Ny, CUFFT_Z2Z);
// for (int i=0; i<Nx; i++) {
// for (int j=0; j<Ny; j++) {
// printf("%d", i*Nx+j);
// printf("%f", idata_c[i*Nx+j]);
// }
// printf("\n");
// }
// auto start = chrono::high_resolution_clock::now();
/* Forward FFT */
cudaEventRecord(start);
cufftExecZ2Z(plan, idata_cx, odata_cx, CUFFT_FORWARD);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
/* stop the time */
/* std::chrono::_V2::system_clock::time_point finish */
float duration = 0; // milliseconds
cudaEventElapsedTime(&duration, start, stop);
/* Convert cufft back to double array */
/* set 1 block with 8 threads */
complex2real<<<1, 8>>>(odata_cx, odata_c, Nx*Ny);
cudaDeviceSynchronize();
for (int i=0; i<Nx; i++) {
cudaMemcpy(&odata[i][0], &odata_c[i*Ny], sizeof(double)*Ny, cudaMemcpyDeviceToHost);
}
cufftDestroy(plan);
cudaFree(idata_c);
cudaFree(odata_c);
cudaFree(idata_cx);
cudaFree(odata_cx);
return duration;
}
/* convert a double array to cuffComplex data type. Imaginary parts are
* set to 0
*/
__global__ void real2complex(double *f, cufftDoubleComplex *fc, int N) {
/* Assume 1D grid of 1D blocks */
int index = blockIdx.x *blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (index < N) {
fc[index].x = f[index];
fc[index].y = 0;
index += stride;
}
}
/* convert a cuffComplex data type to a double array.
*/
__global__ void complex2real(cufftDoubleComplex *fc, double *f, int N) {
/* Assume 1D grid of 1D blocks */
int index = blockIdx.x *blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (index < N) {
f[index] = fc[index].x;
index += stride;
}
} |
108c7d7ff520e9a9696f2bd6a4a1249212c3c365.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iostream>
#include <numeric>
#include <map>
#include <time.h>
#include <valarray>
#include <string>
#include <hdf5.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_fp16.h>
#include "range.hpp"
#include "utils.hpp"
#define NUM_ROWS 28
#define NUM_COLS 28
#define NUM_CHANNELS 1
#define NUM_DIGITS 10
#define TILE_WIDTH 32
#define MAX_THREADS 1024
static int FLAGS_batch_size = 10000;
static std::string FLAGS_testdata{};
static std::string FLAGS_model{};
// Data and reference data dimensions
static int xdims[] = { FLAGS_batch_size, NUM_ROWS, NUM_COLS, NUM_CHANNELS };
static int rdims[] = { FLAGS_batch_size, NUM_DIGITS };
// Model dimensions
static int conv1dims[] = { 5, 5, 1, 32 };
static int conv2dims[] = { 5, 5, 32, 64 };
static int fc1dims[] = { 1024, 128 };
static int fc2dims[] = { 128, 10 };
struct dims {
int dim[4];
};
__global__ void unroll_x_kernel(float *X, float *X_unroll, dims x, dims w, dims y) {
int xoffset, uoffset;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int n = blockIdx.y;
int H_filter = w.dim[0];
int W_filter = w.dim[1];
int H_out = y.dim[1];
int W_out = y.dim[2];
int C = w.dim[2];
int W_unroll = H_out * W_out;
int H_unroll = C * H_filter * W_filter;
int c = index / W_unroll;
int s = index % W_unroll;
int h_out = s / W_out;
int w_out = s % W_out;
if (index < C * W_unroll) {
for (int p = 0; p < H_filter; p++) {
for (int q = 0; q < W_filter; q++) {
uoffset = (n * H_unroll + (c * H_filter * W_filter + p * W_filter + q)) * W_unroll + s;
xoffset = ((n * x.dim[1] + (h_out + p)) * x.dim[2] + (w_out + q)) * x.dim[3] + c;
X_unroll[uoffset] = X[xoffset];
}
}
}
}
__global__ void reroll_y_kernel(float *Y, float *Y_roll, dims y) {
int yoffset, roffset;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int n = blockIdx.y;
int y_roll_row = index / (y.dim[1] * y.dim[2]);
int y_roll_col = index % (y.dim[1] * y.dim[2]);
int y_row = y_roll_col / y.dim[2];
int y_col = y_roll_col % y.dim[2];
int y_width = y.dim[1] * y.dim[2];
int y_height = y.dim[3];
if (index < y.dim[1] * y.dim[2] * y.dim[3]) {
roffset = ((n * y.dim[1] + y_row) * y.dim[2] + y_col) * y.dim[3] + y_roll_row;
yoffset = (n * y_height + y_roll_row) * y_width + y_roll_col;
Y_roll[roffset] = (Y[yoffset] < 0) ? 0 : Y[yoffset];
}
}
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
__shared__ float subTileA[TILE_WIDTH][TILE_WIDTH];
__shared__ float subTileB[TILE_WIDTH][TILE_WIDTH];
int n = blockIdx.z;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
__half Cvalue = __float2half(0.0);
// float Cvalue = 0.0;
int numOfTiles = numAColumns / TILE_WIDTH;
if (numAColumns % TILE_WIDTH) numOfTiles++;
for (int m = 0; m < numOfTiles; m++) {
if ((m * TILE_WIDTH + tx < numAColumns) && (Row < numARows)) {
subTileA[ty][tx] = A[Row * numAColumns + m * TILE_WIDTH + tx];
}
else {
subTileA[ty][tx] = 0.0;
}
if ((m * TILE_WIDTH + ty < numBRows) && (Col < numBColumns)) {
subTileB[ty][tx] = B[(n * numBRows + (m * TILE_WIDTH + ty)) * numBColumns + Col];
}
else {
subTileB[ty][tx] = 0.0;
}
__syncthreads();
#pragma unroll
for (int k = 0; k < TILE_WIDTH; k++) {
__half a = __float2half(subTileA[ty][k]);
__half b = __float2half(subTileB[k][tx]);
Cvalue += __hmul(a, b);
}
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) {
C[(n * numCRows + Row) * numCColumns + Col] = __half2float(Cvalue);
}
}
__global__ void average_pool_kernel(float *X, float *Y, int pool_size, dims x, dims y) {
int xoffset, yoffset;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int n = blockIdx.y;
int h = index / (y.dim[2] * y.dim[3]);
int w = (index % (y.dim[2] * y.dim[3])) / y.dim[3];
int m = (index % (y.dim[2] * y.dim[3])) % y.dim[3];
float acc = 0;
float size = (float)(pool_size * pool_size);
if (index < y.dim[1] * y.dim[2] * y.dim[3]) {
for (int p = 0; p < pool_size; p++) {
for (int q = 0; q < pool_size; q++) {
xoffset = ((n * x.dim[1] + (pool_size * h + p)) * x.dim[2] + (pool_size * w + q)) * x.dim[3] + m;
acc += X[xoffset] / size;
}
}
yoffset = ((n * y.dim[1] + h) * y.dim[2] + w) * y.dim[3] + m;
Y[yoffset] = acc;
}
}
__global__ void relu_kernel(float *X, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
X[i] = (X[i] < 0) ? 0 : X[i];
}
}
void unroll_weights(const float *W, float *W_unroll, dims w) {
int c, m, row, col;
int unroll_offset, offset;
int filter_h = w.dim[0];
int filter_w = w.dim[1];
int C = w.dim[2];
int M = w.dim[3];
for (row = 0; row < filter_h; row++) {
for (col = 0; col < filter_w; col++) {
for (c = 0; c < C; c++) {
for (m = 0; m < M; m++) {
unroll_offset = ((m * C + c) * filter_h + row) * filter_w + col;
offset = ((row * filter_w + col) * C + c) * M + m;
W_unroll[unroll_offset] = W[offset];
}
}
}
}
}
void conv_forward_unroll(const float *x, const float *w, const float *w2, float *fc1, float *fc2, float *out, const int xdims[4], const int wdims[4], const int ydims[4], const int pooldims[4], const int w2dims[4], const int y2dims[4], const int pool2dims[4], const int w3dims[2], const int y3dims[2], const int w4dims[2], const int y4dims[2], int pool_size) {
float *device_x, *device_y, *device_pool, *device_x_unroll, *device_w_unroll, *device_y_unroll;
float *device_y_2, *device_pool_2, *device_x_unroll_2, *device_w_unroll_2, *device_y_unroll_2;
float *device_fc1, *device_fc2, *device_fully, *device_out;
dims y_d, w_d, x_d, pool_d;
dims y_d_2, w_d_2, pool_d_2;
for (int i = 0; i < 4; i++) {
x_d.dim[i] = xdims[i];
y_d.dim[i] = ydims[i];
w_d.dim[i] = wdims[i];
pool_d.dim[i] = pooldims[i];
y_d_2.dim[i] = y2dims[i];
w_d_2.dim[i] = w2dims[i];
pool_d_2.dim[i] = pool2dims[i];
}
int size_w_3 = sizeof(float) * w3dims[0] * w3dims[1];
int size_y_3 = sizeof(float) * y3dims[0] * y3dims[1];
int size_w_4 = sizeof(float) * w4dims[0] * w4dims[1];
int size_y_4 = sizeof(float) * y4dims[0] * y4dims[1];
int numARows3 = pool2dims[0], numAColumns3 = pool2dims[1] * pool2dims[2] * pool2dims[3];
int numBRows3 = w3dims[0], numBColumns3 = w3dims[1];
int numCRows3 = y3dims[0], numCColumns3 = y3dims[1];
int numARows4 = y3dims[0], numAColumns4 = y3dims[1];
int numBRows4 = w4dims[0], numBColumns4 = w4dims[1];
int numCRows4 = y4dims[0], numCColumns4 = y4dims[1];
int numAColumns = wdims[0] * wdims[1] * wdims[2], numARows = ydims[3];
int numBColumns = ydims[1] * ydims[2], numBRows = wdims[0] * wdims[1] * wdims[2];
int numCColumns = numBColumns, numCRows = numARows;
int numAColumns2 = w2dims[0] * w2dims[1] * w2dims[2], numARows2 = y2dims[3];
int numBColumns2 = y2dims[1] * y2dims[2], numBRows2 = w2dims[0] * w2dims[1] * w2dims[2];
int numCColumns2 = numBColumns2, numCRows2 = numARows2;
int size_x = sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3];
int size_y = sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3];
int size_pool = sizeof(float) * pooldims[0] * pooldims[1] * pooldims[2] * pooldims[3];
int size_x_unroll = sizeof(float) * xdims[0] * wdims[0] * wdims[1] * wdims[2] * ydims[1] * ydims[2];
int size_w_unroll = sizeof(float) * wdims[0] * wdims[1] * wdims[2] * ydims[3];
int size_y_unroll = sizeof(float) * xdims[0] * ydims[1] * ydims[2] * ydims[3];
int size_y_2 = sizeof(float) * y2dims[0] * y2dims[1] * y2dims[2] * y2dims[3];
int size_pool_2 = sizeof(float) * pool2dims[0] * pool2dims[1] * pool2dims[2] * pool2dims[3];
int size_x_unroll_2 = sizeof(float) * pooldims[0] * w2dims[0] * w2dims[1] * w2dims[2] * y2dims[1] * y2dims[2];
int size_w_unroll_2 = sizeof(float) * w2dims[0] * w2dims[1] * w2dims[2] * y2dims[3];
int size_y_unroll_2 = sizeof(float) * pooldims[0] * y2dims[1] * y2dims[2] * y2dims[3];
hipMalloc((void **)&device_x, size_x);
hipMalloc((void **)&device_y, size_y);
hipMalloc((void **)&device_pool, size_pool);
hipMalloc((void **)&device_x_unroll, size_x_unroll);
hipMalloc((void **)&device_w_unroll, size_w_unroll);
hipMalloc((void **)&device_y_unroll, size_y_unroll);
hipMalloc((void **)&device_y_2, size_y_2);
hipMalloc((void **)&device_pool_2, size_pool_2);
hipMalloc((void **)&device_x_unroll_2, size_x_unroll_2);
hipMalloc((void **)&device_w_unroll_2, size_w_unroll_2);
hipMalloc((void **)&device_y_unroll_2, size_y_unroll_2);
hipMalloc((void **)&device_fc1, size_w_3);
hipMalloc((void **)&device_fully, size_y_3);
hipMalloc((void **)&device_fc2, size_w_4);
hipMalloc((void **)&device_out, size_y_4);
float * w_unroll = (float *)malloc(size_w_unroll);
unroll_weights(w, w_unroll, w_d);
float * w_unroll_2 = (float *)malloc(size_w_unroll_2);
unroll_weights(w2, w_unroll_2, w_d_2);
hipMemcpy(device_x, x, size_x, hipMemcpyHostToDevice);
hipMemcpy(device_w_unroll, w_unroll, size_w_unroll, hipMemcpyHostToDevice);
hipMemcpy(device_w_unroll_2, w_unroll_2, size_w_unroll_2, hipMemcpyHostToDevice);
hipMemcpy(device_fc1, fc1, size_w_3, hipMemcpyHostToDevice);
hipMemcpy(device_fc2, fc2, size_w_4, hipMemcpyHostToDevice);
dim3 DimBlock_unroll_x(MAX_THREADS, 1, 1);
dim3 DimGrid_unroll_x(ceil((float)(wdims[2] * ydims[1] * ydims[2]) / MAX_THREADS), xdims[0], 1);
dim3 DimBlock_matmul(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid_matmul(ceil((float)(ydims[1] * ydims[2]) / TILE_WIDTH), ceil((float)(ydims[3]) / TILE_WIDTH), xdims[0]);
dim3 DimBlock_reroll_y(MAX_THREADS, 1, 1);
dim3 DimGrid_reroll_y(ceil((float)(ydims[1] * ydims[2] * ydims[3]) / MAX_THREADS), xdims[0], 1);
dim3 DimBlock_pool(MAX_THREADS, 1, 1);
dim3 DimGrid_pool(ceil((float)(pooldims[1] * pooldims[2] * pooldims[3]) / MAX_THREADS), xdims[0], 1);
dim3 DimBlock_unroll_x_2(MAX_THREADS, 1, 1);
dim3 DimGrid_unroll_x_2(ceil((float)(w2dims[2] * y2dims[1] * y2dims[2]) / MAX_THREADS), pooldims[0], 1);
dim3 DimBlock_matmul_2(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid_matmul_2(ceil((float)(y2dims[1] * y2dims[2]) / TILE_WIDTH), ceil((float)(y2dims[3]) / TILE_WIDTH), pooldims[0]);
dim3 DimBlock_reroll_y_2(MAX_THREADS, 1, 1);
dim3 DimGrid_reroll_y_2(ceil((float)(y2dims[1] * y2dims[2] * y2dims[3]) / MAX_THREADS), pooldims[0], 1);
dim3 DimBlock_pool_2(MAX_THREADS, 1, 1);
dim3 DimGrid_pool_2(ceil((float)(pool2dims[1] * pool2dims[2] * pool2dims[3]) / MAX_THREADS), pooldims[0], 1);
dim3 DimBlock_fully(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid_fully(ceil((float)(numCColumns3) / TILE_WIDTH), ceil((float)(numCRows3) / TILE_WIDTH), 1);
dim3 DimBlock_fully_2(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid_fully_2(ceil((float)(numCColumns4) / TILE_WIDTH), ceil((float)(numCRows4) / TILE_WIDTH), 1);
hipLaunchKernelGGL(( unroll_x_kernel) , dim3(DimGrid_unroll_x), dim3(DimBlock_unroll_x), 0, 0, device_x, device_x_unroll, x_d, w_d, y_d);
hipLaunchKernelGGL(( matrixMultiplyShared) , dim3(DimGrid_matmul), dim3(DimBlock_matmul), 0, 0, device_w_unroll, device_x_unroll, device_y_unroll, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipLaunchKernelGGL(( reroll_y_kernel) , dim3(DimGrid_reroll_y), dim3(DimBlock_reroll_y), 0, 0, device_y_unroll, device_y, y_d);
hipLaunchKernelGGL(( average_pool_kernel) , dim3(DimGrid_pool), dim3(DimBlock_pool), 0, 0, device_y, device_pool, pool_size, y_d, pool_d);
hipLaunchKernelGGL(( unroll_x_kernel) , dim3(DimGrid_unroll_x_2), dim3(DimBlock_unroll_x_2), 0, 0, device_pool, device_x_unroll_2, pool_d, w_d_2, y_d_2);
// hipEvent_t start, stop;
// float time;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord(start, NULL);
hipLaunchKernelGGL(( matrixMultiplyShared) , dim3(DimGrid_matmul_2), dim3(DimBlock_matmul_2), 0, 0, device_w_unroll_2, device_x_unroll_2, device_y_unroll_2, numARows2, numAColumns2, numBRows2, numBColumns2, numCRows2, numCColumns2);
// hipEventRecord(stop, NULL);
// hipEventSynchronize(stop);
// hipEventElapsedTime(&time, start, stop);
// printf("%f\n", time);
hipLaunchKernelGGL(( reroll_y_kernel) , dim3(DimGrid_reroll_y_2), dim3(DimBlock_reroll_y_2), 0, 0, device_y_unroll_2, device_y_2, y_d_2);
hipLaunchKernelGGL(( average_pool_kernel) , dim3(DimGrid_pool_2), dim3(DimBlock_pool_2), 0, 0, device_y_2, device_pool_2, pool_size, y_d_2, pool_d_2);
hipLaunchKernelGGL(( matrixMultiplyShared) , dim3(DimGrid_fully), dim3(DimBlock_fully), 0, 0, device_pool_2, device_fc1, device_fully, numARows3, numAColumns3, numBRows3, numBColumns3, numCRows3, numCColumns3);
hipLaunchKernelGGL(( relu_kernel) , dim3(ceil((float)(y3dims[0] * y3dims[1]) / MAX_THREADS)), dim3(MAX_THREADS), 0, 0, device_fully, y3dims[0] * y3dims[1]);
hipLaunchKernelGGL(( matrixMultiplyShared) , dim3(DimGrid_fully_2), dim3(DimBlock_fully_2), 0, 0, device_fully, device_fc2, device_out, numARows4, numAColumns4, numBRows4, numBColumns4, numCRows4, numCColumns4);
hipMemcpy(out, device_out, size_y_4, hipMemcpyDeviceToHost);
hipFree(device_fc1);
hipFree(device_fc2);
hipFree(device_fully);
hipFree(device_out);
hipFree(device_y_2);
hipFree(device_pool_2);
hipFree(device_y_unroll_2);
hipFree(device_x_unroll_2);
hipFree(device_w_unroll_2);
hipFree(device_x);
hipFree(device_y);
hipFree(device_pool);
hipFree(device_y_unroll);
hipFree(device_x_unroll);
hipFree(device_w_unroll);
}
static int loadData(float *x, float *y) {
// Open the data file
const auto file_id =
H5Fopen(FLAGS_testdata.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
// Open the dataset x and y
const auto x_id = H5Dopen2(file_id, "/x", H5P_DEFAULT);
const auto y_id = H5Dopen2(file_id, "/y", H5P_DEFAULT);
// Get the dataset x dimensions
const auto xspace = H5Dget_space(x_id);
const auto xndims = H5Sget_simple_extent_ndims(xspace);
assert(xndims == 4);
hsize_t *input_dims = allocate<hsize_t>(xndims);
//hsize_t input_dims[xndims];
H5Sget_simple_extent_dims(xspace, input_dims, NULL);
if (input_dims[0] != FLAGS_batch_size) {
std::cout << "data size does not match batch size specified!\n";
return 1; // return error
}
std::cout << "input dimensions = " << input_dims[0] << " x " << input_dims[1]
<< " x " << input_dims[2] << " x " << input_dims[3] << "\n";
// Read the dataset x and y
check_success(
H5Dread(x_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, x));
check_success(
H5Dread(y_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, y));
// Close the dataset x and y
check_success(H5Dclose(x_id));
check_success(H5Dclose(y_id));
// Close the file
check_success(H5Fclose(file_id));
delete[] input_dims;
// return success
return 0;
}
static void loadModel(float *conv1, float *conv2, float *fc1, float *fc2) {
// Open the model file
const auto file_id = H5Fopen(FLAGS_model.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
// Open the dataset
const auto conv1_id = H5Dopen2(file_id, "/conv1", H5P_DEFAULT);
const auto conv2_id = H5Dopen2(file_id, "/conv2", H5P_DEFAULT);
const auto fc1_id = H5Dopen2(file_id, "/fc1", H5P_DEFAULT);
const auto fc2_id = H5Dopen2(file_id, "/fc2", H5P_DEFAULT);
// Read the dataset
check_success(H5Dread(conv1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, conv1));
check_success(H5Dread(conv2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, conv2));
check_success(
H5Dread(fc1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc1));
check_success(
H5Dread(fc2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc2));
// Close the dataset x and y
check_success(H5Dclose(conv1_id));
check_success(H5Dclose(conv2_id));
check_success(H5Dclose(fc1_id));
check_success(H5Dclose(fc2_id));
// Close the file
check_success(H5Fclose(file_id));
}
// Choose the guess with largest score
static void argmax(const float *X, const int xdims[2], int *Y) {
for (const auto i : range(0, xdims[0])) {
auto max_idx = 0;
auto max = X[i * xdims[1]];
for (const auto j : range(0, xdims[1])) {
const auto elem = X[(i * xdims[1]) + j];
if (elem > max) {
max_idx = j;
max = elem;
}
}
Y[i] = max_idx;
}
}
// Forward operation for the CNN, a combination of conv layer + average pooling
// + relu
void forward_operation(float *x, float *conv1, float *conv2, float *fc1,
float *fc2, int *out) {
// conv layer
const int pool_size = 2;
const int adims[] = {xdims[0], (xdims[1] - conv1dims[0] + 1), (xdims[2] - conv1dims[1] + 1), conv1dims[3]};
const int bdims[] = {adims[0], adims[1] / pool_size, adims[2] / pool_size, adims[3]};
const int cdims[] = {bdims[0], (bdims[1] - conv2dims[0] + 1), (bdims[2] - conv2dims[1] + 1), conv2dims[3]};
const int ddims[] = {cdims[0], cdims[1] / pool_size, cdims[2] / pool_size, cdims[3]};
const int edims[] = {ddims[0], fc1dims[1]};
const int fdims[] = {edims[0], fc2dims[1]};
auto f = zeros<float>(fdims);
conv_forward_unroll(x, conv1, conv2, fc1, fc2, f, xdims, conv1dims, adims, bdims, conv2dims, cdims, ddims, fc1dims, edims, fc2dims, fdims, pool_size);
argmax(f, fdims, out);
delete[] f;
}
int main(int argc, char **argv) {
if (argc != 3 && argc != 4) {
std::cerr << "\n"
<< "This program performs the forward opertion step for "
"Convolutional Neural Network(CNN). "
"Sample usage: \n"
<< argv[0]
<< " [../data/test10.hdf5] [../data/model.hdf5] [10]\n";
return -1;
}
FLAGS_testdata = std::string(argv[1]);
FLAGS_model = std::string(argv[2]);
if (argc == 3) {
const std::map<std::string, int> default_batch_sizes{
{ "../data/test2.hdf5", 2 },
{ "../data/test10.hdf5", 10 },
{ "../data/test100.hdf5", 100 },
{ "../data/testfull.hdf5", 10000 } };
const auto batch_size_in_map = default_batch_sizes.find(FLAGS_testdata);
if (batch_size_in_map == default_batch_sizes.end()) {
std::cerr << "\nERROR:: Unrecognized file " << FLAGS_testdata << " batch_size must be specified.\n";
return -1;
}
FLAGS_batch_size = batch_size_in_map->second;
}
else if (argc == 4) {
FLAGS_batch_size = atoi(argv[3]);
}
xdims[0] = FLAGS_batch_size;
rdims[0] = FLAGS_batch_size;
// Load data into x and y
float *x = allocate<float>(xdims);
float *y = allocate<float>(rdims);
loadData(x, y);
// Load model
float *conv1 = allocate<float>(conv1dims);
float *conv2 = allocate<float>(conv2dims);
float *fc1 = allocate<float>(fc1dims);
float *fc2 = allocate<float>(fc2dims);
loadModel(conv1, conv2, fc1, fc2);
// Perform foward opertion
int *out = zeros<int>(FLAGS_batch_size);
// get start time
const auto start = now();
forward_operation(x, conv1, conv2, fc1, fc2, out);
// get end time
const auto end = now();
// get elapsed time in milliseconds
const auto elapsed =
std::chrono::duration<double, std::milli>(end - start).count();
// Get reference
int *ref = zeros<int>(FLAGS_batch_size);
argmax(y, rdims, ref);
// Calculate correctness
int num_correct = 0;
for (const auto i : range(0, FLAGS_batch_size)) {
if (out[i] == ref[i]) {
num_correct++;
}
}
std::cout << "Done with " << FLAGS_batch_size << " queries in "
<< "elapsed = " << elapsed << " milliseconds. Correctness: "
<< static_cast<float>(num_correct) / FLAGS_batch_size << "\n";
delete[] x;
delete[] y;
delete[] conv1;
delete[] conv2;
delete[] fc1;
delete[] fc2;
delete[] out;
delete[] ref;
return 0;
}
| 108c7d7ff520e9a9696f2bd6a4a1249212c3c365.cu | #include <algorithm>
#include <cassert>
#include <cstddef>
#include <iostream>
#include <numeric>
#include <map>
#include <time.h>
#include <valarray>
#include <string>
#include <hdf5.h>
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <cuda_fp16.h>
#include "range.hpp"
#include "utils.hpp"
#define NUM_ROWS 28
#define NUM_COLS 28
#define NUM_CHANNELS 1
#define NUM_DIGITS 10
#define TILE_WIDTH 32
#define MAX_THREADS 1024
static int FLAGS_batch_size = 10000;
static std::string FLAGS_testdata{};
static std::string FLAGS_model{};
// Data and reference data dimensions
static int xdims[] = { FLAGS_batch_size, NUM_ROWS, NUM_COLS, NUM_CHANNELS };
static int rdims[] = { FLAGS_batch_size, NUM_DIGITS };
// Model dimensions
static int conv1dims[] = { 5, 5, 1, 32 };
static int conv2dims[] = { 5, 5, 32, 64 };
static int fc1dims[] = { 1024, 128 };
static int fc2dims[] = { 128, 10 };
struct dims {
int dim[4];
};
__global__ void unroll_x_kernel(float *X, float *X_unroll, dims x, dims w, dims y) {
int xoffset, uoffset;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int n = blockIdx.y;
int H_filter = w.dim[0];
int W_filter = w.dim[1];
int H_out = y.dim[1];
int W_out = y.dim[2];
int C = w.dim[2];
int W_unroll = H_out * W_out;
int H_unroll = C * H_filter * W_filter;
int c = index / W_unroll;
int s = index % W_unroll;
int h_out = s / W_out;
int w_out = s % W_out;
if (index < C * W_unroll) {
for (int p = 0; p < H_filter; p++) {
for (int q = 0; q < W_filter; q++) {
uoffset = (n * H_unroll + (c * H_filter * W_filter + p * W_filter + q)) * W_unroll + s;
xoffset = ((n * x.dim[1] + (h_out + p)) * x.dim[2] + (w_out + q)) * x.dim[3] + c;
X_unroll[uoffset] = X[xoffset];
}
}
}
}
__global__ void reroll_y_kernel(float *Y, float *Y_roll, dims y) {
int yoffset, roffset;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int n = blockIdx.y;
int y_roll_row = index / (y.dim[1] * y.dim[2]);
int y_roll_col = index % (y.dim[1] * y.dim[2]);
int y_row = y_roll_col / y.dim[2];
int y_col = y_roll_col % y.dim[2];
int y_width = y.dim[1] * y.dim[2];
int y_height = y.dim[3];
if (index < y.dim[1] * y.dim[2] * y.dim[3]) {
roffset = ((n * y.dim[1] + y_row) * y.dim[2] + y_col) * y.dim[3] + y_roll_row;
yoffset = (n * y_height + y_roll_row) * y_width + y_roll_col;
Y_roll[roffset] = (Y[yoffset] < 0) ? 0 : Y[yoffset];
}
}
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
__shared__ float subTileA[TILE_WIDTH][TILE_WIDTH];
__shared__ float subTileB[TILE_WIDTH][TILE_WIDTH];
int n = blockIdx.z;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
__half Cvalue = __float2half(0.0);
// float Cvalue = 0.0;
int numOfTiles = numAColumns / TILE_WIDTH;
if (numAColumns % TILE_WIDTH) numOfTiles++;
for (int m = 0; m < numOfTiles; m++) {
if ((m * TILE_WIDTH + tx < numAColumns) && (Row < numARows)) {
subTileA[ty][tx] = A[Row * numAColumns + m * TILE_WIDTH + tx];
}
else {
subTileA[ty][tx] = 0.0;
}
if ((m * TILE_WIDTH + ty < numBRows) && (Col < numBColumns)) {
subTileB[ty][tx] = B[(n * numBRows + (m * TILE_WIDTH + ty)) * numBColumns + Col];
}
else {
subTileB[ty][tx] = 0.0;
}
__syncthreads();
#pragma unroll
for (int k = 0; k < TILE_WIDTH; k++) {
__half a = __float2half(subTileA[ty][k]);
__half b = __float2half(subTileB[k][tx]);
Cvalue += __hmul(a, b);
}
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) {
C[(n * numCRows + Row) * numCColumns + Col] = __half2float(Cvalue);
}
}
__global__ void average_pool_kernel(float *X, float *Y, int pool_size, dims x, dims y) {
int xoffset, yoffset;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int n = blockIdx.y;
int h = index / (y.dim[2] * y.dim[3]);
int w = (index % (y.dim[2] * y.dim[3])) / y.dim[3];
int m = (index % (y.dim[2] * y.dim[3])) % y.dim[3];
float acc = 0;
float size = (float)(pool_size * pool_size);
if (index < y.dim[1] * y.dim[2] * y.dim[3]) {
for (int p = 0; p < pool_size; p++) {
for (int q = 0; q < pool_size; q++) {
xoffset = ((n * x.dim[1] + (pool_size * h + p)) * x.dim[2] + (pool_size * w + q)) * x.dim[3] + m;
acc += X[xoffset] / size;
}
}
yoffset = ((n * y.dim[1] + h) * y.dim[2] + w) * y.dim[3] + m;
Y[yoffset] = acc;
}
}
__global__ void relu_kernel(float *X, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
X[i] = (X[i] < 0) ? 0 : X[i];
}
}
void unroll_weights(const float *W, float *W_unroll, dims w) {
int c, m, row, col;
int unroll_offset, offset;
int filter_h = w.dim[0];
int filter_w = w.dim[1];
int C = w.dim[2];
int M = w.dim[3];
for (row = 0; row < filter_h; row++) {
for (col = 0; col < filter_w; col++) {
for (c = 0; c < C; c++) {
for (m = 0; m < M; m++) {
unroll_offset = ((m * C + c) * filter_h + row) * filter_w + col;
offset = ((row * filter_w + col) * C + c) * M + m;
W_unroll[unroll_offset] = W[offset];
}
}
}
}
}
void conv_forward_unroll(const float *x, const float *w, const float *w2, float *fc1, float *fc2, float *out, const int xdims[4], const int wdims[4], const int ydims[4], const int pooldims[4], const int w2dims[4], const int y2dims[4], const int pool2dims[4], const int w3dims[2], const int y3dims[2], const int w4dims[2], const int y4dims[2], int pool_size) {
float *device_x, *device_y, *device_pool, *device_x_unroll, *device_w_unroll, *device_y_unroll;
float *device_y_2, *device_pool_2, *device_x_unroll_2, *device_w_unroll_2, *device_y_unroll_2;
float *device_fc1, *device_fc2, *device_fully, *device_out;
dims y_d, w_d, x_d, pool_d;
dims y_d_2, w_d_2, pool_d_2;
for (int i = 0; i < 4; i++) {
x_d.dim[i] = xdims[i];
y_d.dim[i] = ydims[i];
w_d.dim[i] = wdims[i];
pool_d.dim[i] = pooldims[i];
y_d_2.dim[i] = y2dims[i];
w_d_2.dim[i] = w2dims[i];
pool_d_2.dim[i] = pool2dims[i];
}
int size_w_3 = sizeof(float) * w3dims[0] * w3dims[1];
int size_y_3 = sizeof(float) * y3dims[0] * y3dims[1];
int size_w_4 = sizeof(float) * w4dims[0] * w4dims[1];
int size_y_4 = sizeof(float) * y4dims[0] * y4dims[1];
int numARows3 = pool2dims[0], numAColumns3 = pool2dims[1] * pool2dims[2] * pool2dims[3];
int numBRows3 = w3dims[0], numBColumns3 = w3dims[1];
int numCRows3 = y3dims[0], numCColumns3 = y3dims[1];
int numARows4 = y3dims[0], numAColumns4 = y3dims[1];
int numBRows4 = w4dims[0], numBColumns4 = w4dims[1];
int numCRows4 = y4dims[0], numCColumns4 = y4dims[1];
int numAColumns = wdims[0] * wdims[1] * wdims[2], numARows = ydims[3];
int numBColumns = ydims[1] * ydims[2], numBRows = wdims[0] * wdims[1] * wdims[2];
int numCColumns = numBColumns, numCRows = numARows;
int numAColumns2 = w2dims[0] * w2dims[1] * w2dims[2], numARows2 = y2dims[3];
int numBColumns2 = y2dims[1] * y2dims[2], numBRows2 = w2dims[0] * w2dims[1] * w2dims[2];
int numCColumns2 = numBColumns2, numCRows2 = numARows2;
int size_x = sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3];
int size_y = sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3];
int size_pool = sizeof(float) * pooldims[0] * pooldims[1] * pooldims[2] * pooldims[3];
int size_x_unroll = sizeof(float) * xdims[0] * wdims[0] * wdims[1] * wdims[2] * ydims[1] * ydims[2];
int size_w_unroll = sizeof(float) * wdims[0] * wdims[1] * wdims[2] * ydims[3];
int size_y_unroll = sizeof(float) * xdims[0] * ydims[1] * ydims[2] * ydims[3];
int size_y_2 = sizeof(float) * y2dims[0] * y2dims[1] * y2dims[2] * y2dims[3];
int size_pool_2 = sizeof(float) * pool2dims[0] * pool2dims[1] * pool2dims[2] * pool2dims[3];
int size_x_unroll_2 = sizeof(float) * pooldims[0] * w2dims[0] * w2dims[1] * w2dims[2] * y2dims[1] * y2dims[2];
int size_w_unroll_2 = sizeof(float) * w2dims[0] * w2dims[1] * w2dims[2] * y2dims[3];
int size_y_unroll_2 = sizeof(float) * pooldims[0] * y2dims[1] * y2dims[2] * y2dims[3];
cudaMalloc((void **)&device_x, size_x);
cudaMalloc((void **)&device_y, size_y);
cudaMalloc((void **)&device_pool, size_pool);
cudaMalloc((void **)&device_x_unroll, size_x_unroll);
cudaMalloc((void **)&device_w_unroll, size_w_unroll);
cudaMalloc((void **)&device_y_unroll, size_y_unroll);
cudaMalloc((void **)&device_y_2, size_y_2);
cudaMalloc((void **)&device_pool_2, size_pool_2);
cudaMalloc((void **)&device_x_unroll_2, size_x_unroll_2);
cudaMalloc((void **)&device_w_unroll_2, size_w_unroll_2);
cudaMalloc((void **)&device_y_unroll_2, size_y_unroll_2);
cudaMalloc((void **)&device_fc1, size_w_3);
cudaMalloc((void **)&device_fully, size_y_3);
cudaMalloc((void **)&device_fc2, size_w_4);
cudaMalloc((void **)&device_out, size_y_4);
float * w_unroll = (float *)malloc(size_w_unroll);
unroll_weights(w, w_unroll, w_d);
float * w_unroll_2 = (float *)malloc(size_w_unroll_2);
unroll_weights(w2, w_unroll_2, w_d_2);
cudaMemcpy(device_x, x, size_x, cudaMemcpyHostToDevice);
cudaMemcpy(device_w_unroll, w_unroll, size_w_unroll, cudaMemcpyHostToDevice);
cudaMemcpy(device_w_unroll_2, w_unroll_2, size_w_unroll_2, cudaMemcpyHostToDevice);
cudaMemcpy(device_fc1, fc1, size_w_3, cudaMemcpyHostToDevice);
cudaMemcpy(device_fc2, fc2, size_w_4, cudaMemcpyHostToDevice);
dim3 DimBlock_unroll_x(MAX_THREADS, 1, 1);
dim3 DimGrid_unroll_x(ceil((float)(wdims[2] * ydims[1] * ydims[2]) / MAX_THREADS), xdims[0], 1);
dim3 DimBlock_matmul(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid_matmul(ceil((float)(ydims[1] * ydims[2]) / TILE_WIDTH), ceil((float)(ydims[3]) / TILE_WIDTH), xdims[0]);
dim3 DimBlock_reroll_y(MAX_THREADS, 1, 1);
dim3 DimGrid_reroll_y(ceil((float)(ydims[1] * ydims[2] * ydims[3]) / MAX_THREADS), xdims[0], 1);
dim3 DimBlock_pool(MAX_THREADS, 1, 1);
dim3 DimGrid_pool(ceil((float)(pooldims[1] * pooldims[2] * pooldims[3]) / MAX_THREADS), xdims[0], 1);
dim3 DimBlock_unroll_x_2(MAX_THREADS, 1, 1);
dim3 DimGrid_unroll_x_2(ceil((float)(w2dims[2] * y2dims[1] * y2dims[2]) / MAX_THREADS), pooldims[0], 1);
dim3 DimBlock_matmul_2(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid_matmul_2(ceil((float)(y2dims[1] * y2dims[2]) / TILE_WIDTH), ceil((float)(y2dims[3]) / TILE_WIDTH), pooldims[0]);
dim3 DimBlock_reroll_y_2(MAX_THREADS, 1, 1);
dim3 DimGrid_reroll_y_2(ceil((float)(y2dims[1] * y2dims[2] * y2dims[3]) / MAX_THREADS), pooldims[0], 1);
dim3 DimBlock_pool_2(MAX_THREADS, 1, 1);
dim3 DimGrid_pool_2(ceil((float)(pool2dims[1] * pool2dims[2] * pool2dims[3]) / MAX_THREADS), pooldims[0], 1);
dim3 DimBlock_fully(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid_fully(ceil((float)(numCColumns3) / TILE_WIDTH), ceil((float)(numCRows3) / TILE_WIDTH), 1);
dim3 DimBlock_fully_2(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid_fully_2(ceil((float)(numCColumns4) / TILE_WIDTH), ceil((float)(numCRows4) / TILE_WIDTH), 1);
unroll_x_kernel <<<DimGrid_unroll_x, DimBlock_unroll_x>>> (device_x, device_x_unroll, x_d, w_d, y_d);
matrixMultiplyShared <<<DimGrid_matmul, DimBlock_matmul>>> (device_w_unroll, device_x_unroll, device_y_unroll, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
reroll_y_kernel <<<DimGrid_reroll_y, DimBlock_reroll_y>>> (device_y_unroll, device_y, y_d);
average_pool_kernel <<<DimGrid_pool, DimBlock_pool>>> (device_y, device_pool, pool_size, y_d, pool_d);
unroll_x_kernel <<<DimGrid_unroll_x_2, DimBlock_unroll_x_2>>> (device_pool, device_x_unroll_2, pool_d, w_d_2, y_d_2);
// cudaEvent_t start, stop;
// float time;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start, NULL);
matrixMultiplyShared <<<DimGrid_matmul_2, DimBlock_matmul_2>>> (device_w_unroll_2, device_x_unroll_2, device_y_unroll_2, numARows2, numAColumns2, numBRows2, numBColumns2, numCRows2, numCColumns2);
// cudaEventRecord(stop, NULL);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// printf("%f\n", time);
reroll_y_kernel <<<DimGrid_reroll_y_2, DimBlock_reroll_y_2>>> (device_y_unroll_2, device_y_2, y_d_2);
average_pool_kernel <<<DimGrid_pool_2, DimBlock_pool_2>>> (device_y_2, device_pool_2, pool_size, y_d_2, pool_d_2);
matrixMultiplyShared <<<DimGrid_fully, DimBlock_fully>>> (device_pool_2, device_fc1, device_fully, numARows3, numAColumns3, numBRows3, numBColumns3, numCRows3, numCColumns3);
relu_kernel <<<ceil((float)(y3dims[0] * y3dims[1]) / MAX_THREADS), MAX_THREADS>>> (device_fully, y3dims[0] * y3dims[1]);
matrixMultiplyShared <<<DimGrid_fully_2, DimBlock_fully_2>>> (device_fully, device_fc2, device_out, numARows4, numAColumns4, numBRows4, numBColumns4, numCRows4, numCColumns4);
cudaMemcpy(out, device_out, size_y_4, cudaMemcpyDeviceToHost);
cudaFree(device_fc1);
cudaFree(device_fc2);
cudaFree(device_fully);
cudaFree(device_out);
cudaFree(device_y_2);
cudaFree(device_pool_2);
cudaFree(device_y_unroll_2);
cudaFree(device_x_unroll_2);
cudaFree(device_w_unroll_2);
cudaFree(device_x);
cudaFree(device_y);
cudaFree(device_pool);
cudaFree(device_y_unroll);
cudaFree(device_x_unroll);
cudaFree(device_w_unroll);
}
static int loadData(float *x, float *y) {
// Open the data file
const auto file_id =
H5Fopen(FLAGS_testdata.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
// Open the dataset x and y
const auto x_id = H5Dopen2(file_id, "/x", H5P_DEFAULT);
const auto y_id = H5Dopen2(file_id, "/y", H5P_DEFAULT);
// Get the dataset x dimensions
const auto xspace = H5Dget_space(x_id);
const auto xndims = H5Sget_simple_extent_ndims(xspace);
assert(xndims == 4);
hsize_t *input_dims = allocate<hsize_t>(xndims);
//hsize_t input_dims[xndims];
H5Sget_simple_extent_dims(xspace, input_dims, NULL);
if (input_dims[0] != FLAGS_batch_size) {
std::cout << "data size does not match batch size specified!\n";
return 1; // return error
}
std::cout << "input dimensions = " << input_dims[0] << " x " << input_dims[1]
<< " x " << input_dims[2] << " x " << input_dims[3] << "\n";
// Read the dataset x and y
check_success(
H5Dread(x_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, x));
check_success(
H5Dread(y_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, y));
// Close the dataset x and y
check_success(H5Dclose(x_id));
check_success(H5Dclose(y_id));
// Close the file
check_success(H5Fclose(file_id));
delete[] input_dims;
// return success
return 0;
}
static void loadModel(float *conv1, float *conv2, float *fc1, float *fc2) {
// Open the model file
const auto file_id = H5Fopen(FLAGS_model.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
// Open the dataset
const auto conv1_id = H5Dopen2(file_id, "/conv1", H5P_DEFAULT);
const auto conv2_id = H5Dopen2(file_id, "/conv2", H5P_DEFAULT);
const auto fc1_id = H5Dopen2(file_id, "/fc1", H5P_DEFAULT);
const auto fc2_id = H5Dopen2(file_id, "/fc2", H5P_DEFAULT);
// Read the dataset
check_success(H5Dread(conv1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, conv1));
check_success(H5Dread(conv2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, conv2));
check_success(
H5Dread(fc1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc1));
check_success(
H5Dread(fc2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc2));
// Close the dataset x and y
check_success(H5Dclose(conv1_id));
check_success(H5Dclose(conv2_id));
check_success(H5Dclose(fc1_id));
check_success(H5Dclose(fc2_id));
// Close the file
check_success(H5Fclose(file_id));
}
// Choose the guess with largest score
static void argmax(const float *X, const int xdims[2], int *Y) {
for (const auto i : range(0, xdims[0])) {
auto max_idx = 0;
auto max = X[i * xdims[1]];
for (const auto j : range(0, xdims[1])) {
const auto elem = X[(i * xdims[1]) + j];
if (elem > max) {
max_idx = j;
max = elem;
}
}
Y[i] = max_idx;
}
}
// Forward operation for the CNN, a combination of conv layer + average pooling
// + relu
void forward_operation(float *x, float *conv1, float *conv2, float *fc1,
float *fc2, int *out) {
// conv layer
const int pool_size = 2;
const int adims[] = {xdims[0], (xdims[1] - conv1dims[0] + 1), (xdims[2] - conv1dims[1] + 1), conv1dims[3]};
const int bdims[] = {adims[0], adims[1] / pool_size, adims[2] / pool_size, adims[3]};
const int cdims[] = {bdims[0], (bdims[1] - conv2dims[0] + 1), (bdims[2] - conv2dims[1] + 1), conv2dims[3]};
const int ddims[] = {cdims[0], cdims[1] / pool_size, cdims[2] / pool_size, cdims[3]};
const int edims[] = {ddims[0], fc1dims[1]};
const int fdims[] = {edims[0], fc2dims[1]};
auto f = zeros<float>(fdims);
conv_forward_unroll(x, conv1, conv2, fc1, fc2, f, xdims, conv1dims, adims, bdims, conv2dims, cdims, ddims, fc1dims, edims, fc2dims, fdims, pool_size);
argmax(f, fdims, out);
delete[] f;
}
int main(int argc, char **argv) {
if (argc != 3 && argc != 4) {
std::cerr << "\n"
<< "This program performs the forward opertion step for "
"Convolutional Neural Network(CNN). "
"Sample usage: \n"
<< argv[0]
<< " [../data/test10.hdf5] [../data/model.hdf5] [10]\n";
return -1;
}
FLAGS_testdata = std::string(argv[1]);
FLAGS_model = std::string(argv[2]);
if (argc == 3) {
const std::map<std::string, int> default_batch_sizes{
{ "../data/test2.hdf5", 2 },
{ "../data/test10.hdf5", 10 },
{ "../data/test100.hdf5", 100 },
{ "../data/testfull.hdf5", 10000 } };
const auto batch_size_in_map = default_batch_sizes.find(FLAGS_testdata);
if (batch_size_in_map == default_batch_sizes.end()) {
std::cerr << "\nERROR:: Unrecognized file " << FLAGS_testdata << " batch_size must be specified.\n";
return -1;
}
FLAGS_batch_size = batch_size_in_map->second;
}
else if (argc == 4) {
FLAGS_batch_size = atoi(argv[3]);
}
xdims[0] = FLAGS_batch_size;
rdims[0] = FLAGS_batch_size;
// Load data into x and y
float *x = allocate<float>(xdims);
float *y = allocate<float>(rdims);
loadData(x, y);
// Load model
float *conv1 = allocate<float>(conv1dims);
float *conv2 = allocate<float>(conv2dims);
float *fc1 = allocate<float>(fc1dims);
float *fc2 = allocate<float>(fc2dims);
loadModel(conv1, conv2, fc1, fc2);
// Perform foward opertion
int *out = zeros<int>(FLAGS_batch_size);
// get start time
const auto start = now();
forward_operation(x, conv1, conv2, fc1, fc2, out);
// get end time
const auto end = now();
// get elapsed time in milliseconds
const auto elapsed =
std::chrono::duration<double, std::milli>(end - start).count();
// Get reference
int *ref = zeros<int>(FLAGS_batch_size);
argmax(y, rdims, ref);
// Calculate correctness
int num_correct = 0;
for (const auto i : range(0, FLAGS_batch_size)) {
if (out[i] == ref[i]) {
num_correct++;
}
}
std::cout << "Done with " << FLAGS_batch_size << " queries in "
<< "elapsed = " << elapsed << " milliseconds. Correctness: "
<< static_cast<float>(num_correct) / FLAGS_batch_size << "\n";
delete[] x;
delete[] y;
delete[] conv1;
delete[] conv2;
delete[] fc1;
delete[] fc2;
delete[] out;
delete[] ref;
return 0;
}
|
11e37ff3e221d88f71673380fbf31bfabe8e00f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Academic License - for use in teaching, academic research, and meeting
// course requirements at degree granting institutions only. Not for
// government, commercial, or other organizational use.
// File: MEDT.cu
//
// GPU Coder version : 1.4
// CUDA/C/C++ source code generated on : 06-Dec-2020 18:02:22
//
// Include Files
#include "MEDT.h"
#include "MEDT_emxutil.h"
#include "MWCudaDimUtility.h"
#include "MWLaunchParametersUtilities.h"
#include <cstring>
// Function Declarations
static __global__ void MEDT_kernel1(const emxArray_boolean_T *image, int i,
emxArray_real_T *newimage);
static void gpuEmxFree_boolean_T(emxArray_boolean_T *inter);
static void gpuEmxFree_real_T(emxArray_real_T *inter);
static void gpuEmxMemcpyCpuToGpu_boolean_T(const emxArray_boolean_T *cpu,
emxArray_boolean_T *inter, emxArray_boolean_T *gpu);
static void gpuEmxMemcpyCpuToGpu_real_T(const emxArray_real_T *cpu,
emxArray_real_T *inter, emxArray_real_T *gpu);
static void gpuEmxMemcpyGpuToCpu_real_T(emxArray_real_T *cpu, emxArray_real_T
*inter);
static void gpuEmxReset_boolean_T(emxArray_boolean_T *inter);
static void gpuEmxReset_real_T(emxArray_real_T *inter);
// Function Definitions
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const emxArray_boolean_T *image
// int i
// emxArray_real_T *newimage
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void MEDT_kernel1(const
emxArray_boolean_T *image, int i, emxArray_real_T *newimage)
{
unsigned int threadId;
unsigned int threadStride;
unsigned int idx;
int vlen;
boolean_T maxval;
int b_i;
int c_i;
int j;
long long loopEnd;
unsigned int tmpIndex;
threadId = static_cast<unsigned int>(mwGetGlobalThreadIndex());
threadStride = static_cast<unsigned int>(mwGetTotalThreadsLaunched());
loopEnd = (static_cast<long long>((image->size[1] - 1)) + 1LL) * (static_cast<
long long>((i - 1)) + 1LL) - 1LL;
for (idx = threadId; idx <= static_cast<unsigned int>(loopEnd); idx +=
threadStride) {
j = static_cast<int>((idx % static_cast<unsigned int>(image->size[1])));
tmpIndex = (idx - static_cast<unsigned int>(j)) / static_cast<unsigned int>
(image->size[1]);
b_i = static_cast<int>(tmpIndex);
vlen = image->size[1];
maxval = image->data[b_i];
for (c_i = 0; c_i <= vlen - 2; c_i++) {
if (static_cast<int>(maxval) < static_cast<int>(image->data[b_i +
image->size[0] * (c_i + 1)])) {
maxval = image->data[b_i + image->size[0] * (c_i + 1)];
}
}
if (static_cast<int>(image->data[b_i + image->size[0] * j]) == static_cast<
int>(maxval)) {
newimage->data[b_i + newimage->size[0] * j] = 255.0;
} else {
newimage->data[b_i + newimage->size[0] * j] = 0.0;
}
}
}
//
// Arguments : emxArray_boolean_T *inter
// Return Type : void
//
static void gpuEmxFree_boolean_T(emxArray_boolean_T *inter)
{
hipFree(inter->data);
hipFree(inter->size);
}
//
// Arguments : emxArray_real_T *inter
// Return Type : void
//
static void gpuEmxFree_real_T(emxArray_real_T *inter)
{
hipFree(inter->data);
hipFree(inter->size);
}
//
// Arguments : const emxArray_boolean_T *cpu
// emxArray_boolean_T *inter
// emxArray_boolean_T *gpu
// Return Type : void
//
static void gpuEmxMemcpyCpuToGpu_boolean_T(const emxArray_boolean_T *cpu,
emxArray_boolean_T *inter, emxArray_boolean_T *gpu)
{
int actualSize;
int i;
int allocatingSize;
if (inter->numDimensions < cpu->numDimensions) {
inter->numDimensions = cpu->numDimensions;
hipFree(inter->size);
hipMalloc(&inter->size, inter->numDimensions * sizeof(int));
} else {
inter->numDimensions = cpu->numDimensions;
}
actualSize = 1;
for (i = 0; i < cpu->numDimensions; i++) {
actualSize *= cpu->size[i];
}
if (inter->allocatedSize < actualSize) {
if (inter->canFreeData) {
hipFree(inter->data);
}
allocatingSize = cpu->allocatedSize;
if (allocatingSize < actualSize) {
allocatingSize = actualSize;
}
inter->allocatedSize = allocatingSize;
inter->canFreeData = true;
hipMalloc(&inter->data, inter->allocatedSize * sizeof(boolean_T));
}
hipMemcpy(inter->data, cpu->data, actualSize * sizeof(boolean_T),
hipMemcpyHostToDevice);
hipMemcpy(inter->size, cpu->size, cpu->numDimensions * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(gpu, inter, 32ULL, hipMemcpyHostToDevice);
}
//
// Arguments : const emxArray_real_T *cpu
// emxArray_real_T *inter
// emxArray_real_T *gpu
// Return Type : void
//
static void gpuEmxMemcpyCpuToGpu_real_T(const emxArray_real_T *cpu,
emxArray_real_T *inter, emxArray_real_T *gpu)
{
int actualSize;
int i;
int allocatingSize;
if (inter->numDimensions < cpu->numDimensions) {
inter->numDimensions = cpu->numDimensions;
hipFree(inter->size);
hipMalloc(&inter->size, inter->numDimensions * sizeof(int));
} else {
inter->numDimensions = cpu->numDimensions;
}
actualSize = 1;
for (i = 0; i < cpu->numDimensions; i++) {
actualSize *= cpu->size[i];
}
if (inter->allocatedSize < actualSize) {
if (inter->canFreeData) {
hipFree(inter->data);
}
allocatingSize = cpu->allocatedSize;
if (allocatingSize < actualSize) {
allocatingSize = actualSize;
}
inter->allocatedSize = allocatingSize;
inter->canFreeData = true;
hipMalloc(&inter->data, inter->allocatedSize * sizeof(double));
}
hipMemcpy(inter->data, cpu->data, actualSize * sizeof(double),
hipMemcpyHostToDevice);
hipMemcpy(inter->size, cpu->size, cpu->numDimensions * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(gpu, inter, 32ULL, hipMemcpyHostToDevice);
}
//
// Arguments : emxArray_real_T *cpu
// emxArray_real_T *inter
// Return Type : void
//
static void gpuEmxMemcpyGpuToCpu_real_T(emxArray_real_T *cpu, emxArray_real_T
*inter)
{
int actualSize;
int i;
actualSize = 1;
for (i = 0; i < cpu->numDimensions; i++) {
actualSize *= cpu->size[i];
}
hipMemcpy(cpu->data, inter->data, actualSize * sizeof(double),
hipMemcpyDeviceToHost);
hipMemcpy(cpu->size, inter->size, inter->numDimensions * sizeof(int),
hipMemcpyDeviceToHost);
}
//
// Arguments : emxArray_boolean_T *inter
// Return Type : void
//
static void gpuEmxReset_boolean_T(emxArray_boolean_T *inter)
{
std::memset(inter, 0, sizeof(emxArray_boolean_T));
}
//
// Arguments : emxArray_real_T *inter
// Return Type : void
//
static void gpuEmxReset_real_T(emxArray_real_T *inter)
{
std::memset(inter, 0, sizeof(emxArray_real_T));
}
//
// Arguments : const emxArray_boolean_T *image
// emxArray_real_T *newimage
// Return Type : void
//
void MEDT(const emxArray_boolean_T *image, emxArray_real_T *newimage)
{
int i;
int i1;
emxArray_boolean_T *gpu_image;
dim3 grid;
dim3 block;
boolean_T validLaunchParams;
emxArray_real_T *gpu_newimage;
boolean_T newimage_dirtyOnGpu;
emxArray_boolean_T inter_image;
emxArray_real_T inter_newimage;
hipMalloc(&gpu_newimage, 32ULL);
gpuEmxReset_real_T(&inter_newimage);
hipMalloc(&gpu_image, 32ULL);
gpuEmxReset_boolean_T(&inter_image);
newimage_dirtyOnGpu = false;
// numero de datos
i = image->size[0];
i1 = newimage->size[0] * newimage->size[1];
newimage->size[0] = image->size[0];
newimage->size[1] = image->size[1];
emxEnsureCapacity_real_T(newimage, i1);
validLaunchParams = mwGetLaunchParameters(static_cast<double>((((image->size[1]
- 1) + 1LL) * ((i - 1) + 1LL))), &grid, &block, 1024U, 65535U);
if (validLaunchParams) {
gpuEmxMemcpyCpuToGpu_boolean_T(image, &inter_image, gpu_image);
gpuEmxMemcpyCpuToGpu_real_T(newimage, &inter_newimage, gpu_newimage);
hipLaunchKernelGGL(( MEDT_kernel1), dim3(grid), dim3(block), 0, 0, gpu_image, i, gpu_newimage);
newimage_dirtyOnGpu = true;
}
if (newimage_dirtyOnGpu) {
gpuEmxMemcpyGpuToCpu_real_T(newimage, &inter_newimage);
}
gpuEmxFree_boolean_T(&inter_image);
hipFree(gpu_image);
gpuEmxFree_real_T(&inter_newimage);
hipFree(gpu_newimage);
}
//
// File trailer for MEDT.cu
//
// [EOF]
//
| 11e37ff3e221d88f71673380fbf31bfabe8e00f6.cu | //
// Academic License - for use in teaching, academic research, and meeting
// course requirements at degree granting institutions only. Not for
// government, commercial, or other organizational use.
// File: MEDT.cu
//
// GPU Coder version : 1.4
// CUDA/C/C++ source code generated on : 06-Dec-2020 18:02:22
//
// Include Files
#include "MEDT.h"
#include "MEDT_emxutil.h"
#include "MWCudaDimUtility.h"
#include "MWLaunchParametersUtilities.h"
#include <cstring>
// Function Declarations
static __global__ void MEDT_kernel1(const emxArray_boolean_T *image, int i,
emxArray_real_T *newimage);
static void gpuEmxFree_boolean_T(emxArray_boolean_T *inter);
static void gpuEmxFree_real_T(emxArray_real_T *inter);
static void gpuEmxMemcpyCpuToGpu_boolean_T(const emxArray_boolean_T *cpu,
emxArray_boolean_T *inter, emxArray_boolean_T *gpu);
static void gpuEmxMemcpyCpuToGpu_real_T(const emxArray_real_T *cpu,
emxArray_real_T *inter, emxArray_real_T *gpu);
static void gpuEmxMemcpyGpuToCpu_real_T(emxArray_real_T *cpu, emxArray_real_T
*inter);
static void gpuEmxReset_boolean_T(emxArray_boolean_T *inter);
static void gpuEmxReset_real_T(emxArray_real_T *inter);
// Function Definitions
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const emxArray_boolean_T *image
// int i
// emxArray_real_T *newimage
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void MEDT_kernel1(const
emxArray_boolean_T *image, int i, emxArray_real_T *newimage)
{
unsigned int threadId;
unsigned int threadStride;
unsigned int idx;
int vlen;
boolean_T maxval;
int b_i;
int c_i;
int j;
long long loopEnd;
unsigned int tmpIndex;
threadId = static_cast<unsigned int>(mwGetGlobalThreadIndex());
threadStride = static_cast<unsigned int>(mwGetTotalThreadsLaunched());
loopEnd = (static_cast<long long>((image->size[1] - 1)) + 1LL) * (static_cast<
long long>((i - 1)) + 1LL) - 1LL;
for (idx = threadId; idx <= static_cast<unsigned int>(loopEnd); idx +=
threadStride) {
j = static_cast<int>((idx % static_cast<unsigned int>(image->size[1])));
tmpIndex = (idx - static_cast<unsigned int>(j)) / static_cast<unsigned int>
(image->size[1]);
b_i = static_cast<int>(tmpIndex);
vlen = image->size[1];
maxval = image->data[b_i];
for (c_i = 0; c_i <= vlen - 2; c_i++) {
if (static_cast<int>(maxval) < static_cast<int>(image->data[b_i +
image->size[0] * (c_i + 1)])) {
maxval = image->data[b_i + image->size[0] * (c_i + 1)];
}
}
if (static_cast<int>(image->data[b_i + image->size[0] * j]) == static_cast<
int>(maxval)) {
newimage->data[b_i + newimage->size[0] * j] = 255.0;
} else {
newimage->data[b_i + newimage->size[0] * j] = 0.0;
}
}
}
//
// Arguments : emxArray_boolean_T *inter
// Return Type : void
//
static void gpuEmxFree_boolean_T(emxArray_boolean_T *inter)
{
cudaFree(inter->data);
cudaFree(inter->size);
}
//
// Arguments : emxArray_real_T *inter
// Return Type : void
//
static void gpuEmxFree_real_T(emxArray_real_T *inter)
{
cudaFree(inter->data);
cudaFree(inter->size);
}
//
// Arguments : const emxArray_boolean_T *cpu
// emxArray_boolean_T *inter
// emxArray_boolean_T *gpu
// Return Type : void
//
static void gpuEmxMemcpyCpuToGpu_boolean_T(const emxArray_boolean_T *cpu,
emxArray_boolean_T *inter, emxArray_boolean_T *gpu)
{
int actualSize;
int i;
int allocatingSize;
if (inter->numDimensions < cpu->numDimensions) {
inter->numDimensions = cpu->numDimensions;
cudaFree(inter->size);
cudaMalloc(&inter->size, inter->numDimensions * sizeof(int));
} else {
inter->numDimensions = cpu->numDimensions;
}
actualSize = 1;
for (i = 0; i < cpu->numDimensions; i++) {
actualSize *= cpu->size[i];
}
if (inter->allocatedSize < actualSize) {
if (inter->canFreeData) {
cudaFree(inter->data);
}
allocatingSize = cpu->allocatedSize;
if (allocatingSize < actualSize) {
allocatingSize = actualSize;
}
inter->allocatedSize = allocatingSize;
inter->canFreeData = true;
cudaMalloc(&inter->data, inter->allocatedSize * sizeof(boolean_T));
}
cudaMemcpy(inter->data, cpu->data, actualSize * sizeof(boolean_T),
cudaMemcpyHostToDevice);
cudaMemcpy(inter->size, cpu->size, cpu->numDimensions * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(gpu, inter, 32ULL, cudaMemcpyHostToDevice);
}
//
// Arguments : const emxArray_real_T *cpu
// emxArray_real_T *inter
// emxArray_real_T *gpu
// Return Type : void
//
static void gpuEmxMemcpyCpuToGpu_real_T(const emxArray_real_T *cpu,
emxArray_real_T *inter, emxArray_real_T *gpu)
{
int actualSize;
int i;
int allocatingSize;
if (inter->numDimensions < cpu->numDimensions) {
inter->numDimensions = cpu->numDimensions;
cudaFree(inter->size);
cudaMalloc(&inter->size, inter->numDimensions * sizeof(int));
} else {
inter->numDimensions = cpu->numDimensions;
}
actualSize = 1;
for (i = 0; i < cpu->numDimensions; i++) {
actualSize *= cpu->size[i];
}
if (inter->allocatedSize < actualSize) {
if (inter->canFreeData) {
cudaFree(inter->data);
}
allocatingSize = cpu->allocatedSize;
if (allocatingSize < actualSize) {
allocatingSize = actualSize;
}
inter->allocatedSize = allocatingSize;
inter->canFreeData = true;
cudaMalloc(&inter->data, inter->allocatedSize * sizeof(double));
}
cudaMemcpy(inter->data, cpu->data, actualSize * sizeof(double),
cudaMemcpyHostToDevice);
cudaMemcpy(inter->size, cpu->size, cpu->numDimensions * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(gpu, inter, 32ULL, cudaMemcpyHostToDevice);
}
//
// Arguments : emxArray_real_T *cpu
// emxArray_real_T *inter
// Return Type : void
//
static void gpuEmxMemcpyGpuToCpu_real_T(emxArray_real_T *cpu, emxArray_real_T
*inter)
{
int actualSize;
int i;
actualSize = 1;
for (i = 0; i < cpu->numDimensions; i++) {
actualSize *= cpu->size[i];
}
cudaMemcpy(cpu->data, inter->data, actualSize * sizeof(double),
cudaMemcpyDeviceToHost);
cudaMemcpy(cpu->size, inter->size, inter->numDimensions * sizeof(int),
cudaMemcpyDeviceToHost);
}
//
// Arguments : emxArray_boolean_T *inter
// Return Type : void
//
static void gpuEmxReset_boolean_T(emxArray_boolean_T *inter)
{
std::memset(inter, 0, sizeof(emxArray_boolean_T));
}
//
// Arguments : emxArray_real_T *inter
// Return Type : void
//
static void gpuEmxReset_real_T(emxArray_real_T *inter)
{
std::memset(inter, 0, sizeof(emxArray_real_T));
}
//
// Arguments : const emxArray_boolean_T *image
// emxArray_real_T *newimage
// Return Type : void
//
void MEDT(const emxArray_boolean_T *image, emxArray_real_T *newimage)
{
int i;
int i1;
emxArray_boolean_T *gpu_image;
dim3 grid;
dim3 block;
boolean_T validLaunchParams;
emxArray_real_T *gpu_newimage;
boolean_T newimage_dirtyOnGpu;
emxArray_boolean_T inter_image;
emxArray_real_T inter_newimage;
cudaMalloc(&gpu_newimage, 32ULL);
gpuEmxReset_real_T(&inter_newimage);
cudaMalloc(&gpu_image, 32ULL);
gpuEmxReset_boolean_T(&inter_image);
newimage_dirtyOnGpu = false;
// numero de datos
i = image->size[0];
i1 = newimage->size[0] * newimage->size[1];
newimage->size[0] = image->size[0];
newimage->size[1] = image->size[1];
emxEnsureCapacity_real_T(newimage, i1);
validLaunchParams = mwGetLaunchParameters(static_cast<double>((((image->size[1]
- 1) + 1LL) * ((i - 1) + 1LL))), &grid, &block, 1024U, 65535U);
if (validLaunchParams) {
gpuEmxMemcpyCpuToGpu_boolean_T(image, &inter_image, gpu_image);
gpuEmxMemcpyCpuToGpu_real_T(newimage, &inter_newimage, gpu_newimage);
MEDT_kernel1<<<grid, block>>>(gpu_image, i, gpu_newimage);
newimage_dirtyOnGpu = true;
}
if (newimage_dirtyOnGpu) {
gpuEmxMemcpyGpuToCpu_real_T(newimage, &inter_newimage);
}
gpuEmxFree_boolean_T(&inter_image);
cudaFree(gpu_image);
gpuEmxFree_real_T(&inter_newimage);
cudaFree(gpu_newimage);
}
//
// File trailer for MEDT.cu
//
// [EOF]
//
|
018e1c694ca87b552d0692eb6e7341caf3881e04.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "helper.cuh"
__global__ void preScan(unsigned int* deviceInput, unsigned int* deviceOutput, int cnt,
unsigned int* deviceSum)
{
extern __shared__ unsigned int temp[];
int cntInB = blockDim.x * 2;
int idxInG = cntInB * blockIdx.x + threadIdx.x;
int idxInB = threadIdx.x;
temp[2 * idxInB] = 0;
temp[2 * idxInB +1] = 0;
if (idxInG < cnt)
{
temp[idxInB] = deviceInput[idxInG];
}
if (idxInG + blockDim.x < cnt)
{
temp[idxInB + blockDim.x] = deviceInput[idxInG + blockDim.x];
}
int offset = 1;
for (int d = cntInB >> 1; d > 0; d>>=1)
{
__syncthreads();
if (threadIdx.x < d)
{
int ai = offset - 1 + offset * (threadIdx.x * 2);
int bi = ai + offset;
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
//before clear the last element, move the last element to deviceSums.
if (threadIdx.x == 0)
{
deviceSum[blockIdx.x] = temp[cntInB - 1];
temp[cntInB - 1] = 0;
}
//downsweep
for (int d = 1; d < cntInB; d *=2)
{
offset >>= 1;
__syncthreads();
if (threadIdx.x < d)
{
int ai = offset - 1 + offset * (threadIdx.x * 2);
int bi = ai + offset;
unsigned int be = temp[bi];
temp[bi] += temp[ai];
temp[ai] = be;
}
}
if (idxInG < cnt)
{
deviceOutput[idxInG] = temp[idxInB];
}
if (idxInG + blockDim.x < cnt)
{
deviceOutput[idxInG + blockDim.x] = temp[idxInB + blockDim.x];
}
}
__global__ void addInc(unsigned int* deviceInput, unsigned int* deviceOutput, int eleCnt,
unsigned int* deviceInc)
{
/*
__shared__ int inc;
if (threadIdx.x == 0)
{
inc = deviceInc[blockIdx.x];
}
__syncthreads();
*/
int inc = deviceInc[blockIdx.x];
int cntInB = blockDim.x * 2;
int idxInG = blockIdx.x * cntInB + threadIdx.x;
if (idxInG < eleCnt)
{
deviceOutput[idxInG] = deviceInput[idxInG] + inc;
}
if (idxInG + blockDim.x < eleCnt)
{
deviceOutput[idxInG + blockDim.x] = deviceInput[idxInG + blockDim.x] + inc;
}
}
/*input: allocated and initialized device memory
* output: allocated device memory
* cnt: size
*
* return the sum of all element;
*/
unsigned int prefixSum(unsigned int* deviceInput, unsigned int* deviceOutput, int eleCnt)
{
/*Test:
int eleCnt = 1025;
unsigned int* deviceInput;
hipMalloc(&deviceInput, sizeof(unsigned int) * eleCnt);
unsigned int* deviceOutput;
hipMalloc(&deviceOutput, sizeof(unsigned int) * eleCnt);
unsigned int* hostInput;
hostInput = (unsigned int*)malloc(sizeof(unsigned int) * eleCnt);
for (size_t i = 0; i < eleCnt; ++i)
{
hostInput[i] = 1;
}
hipMemcpy(deviceInput, hostInput, sizeof(unsigned int) * eleCnt, hipMemcpyHostToDevice);
*/
dim3 blockDim(256);
int eleCntInB = blockDim.x * 2;
unsigned int sharedMemSize = eleCntInB * sizeof(unsigned int);
dim3 gridDim((eleCnt+ eleCntInB - 1) / eleCntInB);
int blockCnt = gridDim.x;
unsigned int* deviceSum;
hipMalloc(&deviceSum, sizeof(unsigned int)*blockCnt);
unsigned int* deviceInc;
hipMalloc(&deviceInc, sizeof(unsigned int)*blockCnt);
unsigned int* deviceTotalSum;
hipMalloc(&deviceTotalSum, sizeof(unsigned int));
hipLaunchKernelGGL(( preScan), dim3(gridDim), dim3(blockDim), sharedMemSize, 0, deviceInput, deviceOutput, eleCnt,
deviceSum);
hipLaunchKernelGGL(( preScan), dim3(1), dim3(blockDim), sharedMemSize, 0, deviceSum, deviceInc, blockCnt,
deviceTotalSum);
hipLaunchKernelGGL(( addInc), dim3(gridDim), dim3(blockDim), 0, 0, deviceOutput, deviceOutput, eleCnt,
deviceInc);
/*Test Output:
unsigned int* hostScanOut = (unsigned int*)malloc(sizeof(unsigned int) * eleCnt);
hipMemcpy(hostScanOut, deviceOutput, sizeof(unsigned int) * eleCnt, hipMemcpyDeviceToHost);
printf("Final result\n");
for (size_t i = 0; i < eleCnt; ++i)
{
printf("%d ", hostScanOut[i]);
}
*/
unsigned int hostTotalSum;
hipMemcpy(&hostTotalSum, deviceTotalSum, sizeof(unsigned int), hipMemcpyDeviceToHost);
hipFree(deviceInc);
hipFree(deviceSum);
hipFree(deviceTotalSum);
return hostTotalSum;
} | 018e1c694ca87b552d0692eb6e7341caf3881e04.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "helper.cuh"
__global__ void preScan(unsigned int* deviceInput, unsigned int* deviceOutput, int cnt,
unsigned int* deviceSum)
{
extern __shared__ unsigned int temp[];
int cntInB = blockDim.x * 2;
int idxInG = cntInB * blockIdx.x + threadIdx.x;
int idxInB = threadIdx.x;
temp[2 * idxInB] = 0;
temp[2 * idxInB +1] = 0;
if (idxInG < cnt)
{
temp[idxInB] = deviceInput[idxInG];
}
if (idxInG + blockDim.x < cnt)
{
temp[idxInB + blockDim.x] = deviceInput[idxInG + blockDim.x];
}
int offset = 1;
for (int d = cntInB >> 1; d > 0; d>>=1)
{
__syncthreads();
if (threadIdx.x < d)
{
int ai = offset - 1 + offset * (threadIdx.x * 2);
int bi = ai + offset;
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
//before clear the last element, move the last element to deviceSums.
if (threadIdx.x == 0)
{
deviceSum[blockIdx.x] = temp[cntInB - 1];
temp[cntInB - 1] = 0;
}
//downsweep
for (int d = 1; d < cntInB; d *=2)
{
offset >>= 1;
__syncthreads();
if (threadIdx.x < d)
{
int ai = offset - 1 + offset * (threadIdx.x * 2);
int bi = ai + offset;
unsigned int be = temp[bi];
temp[bi] += temp[ai];
temp[ai] = be;
}
}
if (idxInG < cnt)
{
deviceOutput[idxInG] = temp[idxInB];
}
if (idxInG + blockDim.x < cnt)
{
deviceOutput[idxInG + blockDim.x] = temp[idxInB + blockDim.x];
}
}
__global__ void addInc(unsigned int* deviceInput, unsigned int* deviceOutput, int eleCnt,
unsigned int* deviceInc)
{
/*
__shared__ int inc;
if (threadIdx.x == 0)
{
inc = deviceInc[blockIdx.x];
}
__syncthreads();
*/
int inc = deviceInc[blockIdx.x];
int cntInB = blockDim.x * 2;
int idxInG = blockIdx.x * cntInB + threadIdx.x;
if (idxInG < eleCnt)
{
deviceOutput[idxInG] = deviceInput[idxInG] + inc;
}
if (idxInG + blockDim.x < eleCnt)
{
deviceOutput[idxInG + blockDim.x] = deviceInput[idxInG + blockDim.x] + inc;
}
}
/*input: allocated and initialized device memory
* output: allocated device memory
* cnt: size
*
* return the sum of all element;
*/
unsigned int prefixSum(unsigned int* deviceInput, unsigned int* deviceOutput, int eleCnt)
{
/*Test:
int eleCnt = 1025;
unsigned int* deviceInput;
cudaMalloc(&deviceInput, sizeof(unsigned int) * eleCnt);
unsigned int* deviceOutput;
cudaMalloc(&deviceOutput, sizeof(unsigned int) * eleCnt);
unsigned int* hostInput;
hostInput = (unsigned int*)malloc(sizeof(unsigned int) * eleCnt);
for (size_t i = 0; i < eleCnt; ++i)
{
hostInput[i] = 1;
}
cudaMemcpy(deviceInput, hostInput, sizeof(unsigned int) * eleCnt, cudaMemcpyHostToDevice);
*/
dim3 blockDim(256);
int eleCntInB = blockDim.x * 2;
unsigned int sharedMemSize = eleCntInB * sizeof(unsigned int);
dim3 gridDim((eleCnt+ eleCntInB - 1) / eleCntInB);
int blockCnt = gridDim.x;
unsigned int* deviceSum;
cudaMalloc(&deviceSum, sizeof(unsigned int)*blockCnt);
unsigned int* deviceInc;
cudaMalloc(&deviceInc, sizeof(unsigned int)*blockCnt);
unsigned int* deviceTotalSum;
cudaMalloc(&deviceTotalSum, sizeof(unsigned int));
preScan<<<gridDim, blockDim, sharedMemSize>>>(deviceInput, deviceOutput, eleCnt,
deviceSum);
preScan<<<1, blockDim, sharedMemSize>>>(deviceSum, deviceInc, blockCnt,
deviceTotalSum);
addInc<<<gridDim, blockDim>>>(deviceOutput, deviceOutput, eleCnt,
deviceInc);
/*Test Output:
unsigned int* hostScanOut = (unsigned int*)malloc(sizeof(unsigned int) * eleCnt);
cudaMemcpy(hostScanOut, deviceOutput, sizeof(unsigned int) * eleCnt, cudaMemcpyDeviceToHost);
printf("Final result\n");
for (size_t i = 0; i < eleCnt; ++i)
{
printf("%d ", hostScanOut[i]);
}
*/
unsigned int hostTotalSum;
cudaMemcpy(&hostTotalSum, deviceTotalSum, sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaFree(deviceInc);
cudaFree(deviceSum);
cudaFree(deviceTotalSum);
return hostTotalSum;
} |
c61690a3438f0d3d0cd312e1c312036c776fb263.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/detail/copy_range.cuh>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/mr/device_memory_resource.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <hip/hip_runtime.h>
#include <memory>
namespace {
template <typename T>
void in_place_copy_range(
cudf::column_view const& source, cudf::mutable_column_view& target,
cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin,
hipStream_t stream = 0) {
auto p_source_device_view =
cudf::column_device_view::create(source, stream);
if (p_source_device_view->has_nulls()) {
cudf::experimental::detail::copy_range(
cudf::experimental::detail::make_null_replacement_iterator<T>(
*p_source_device_view, T()) + source_begin,
cudf::experimental::detail::make_validity_iterator(
*p_source_device_view) + source_begin,
target, target_begin, target_begin + (source_end - source_begin),
stream);
}
else {
cudf::experimental::detail::copy_range(
p_source_device_view->begin<T>() + source_begin,
thrust::make_constant_iterator(true), // dummy
target, target_begin, target_begin + (source_end - source_begin),
stream);
}
}
struct in_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::mutable_column_view& target;
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>(), void>
operator()(cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin, hipStream_t stream = 0) {
in_place_copy_range<T>(
source, target, source_begin, source_end, target_begin, stream);
}
template <typename T>
std::enable_if_t<not cudf::is_fixed_width<T>(), void>
operator()(cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin, hipStream_t stream = 0) {
CUDF_FAIL("in-place copy does not work for variable width types.");
}
};
struct out_of_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::column_view const& target;
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>(), std::unique_ptr<cudf::column>>
operator()(
cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0) {
auto p_ret = std::make_unique<cudf::column>(target, stream, mr);
if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) {
p_ret->set_null_mask(
cudf::create_null_mask(p_ret->size(), cudf::ALL_VALID, stream, mr), 0);
}
if (source_end != source_begin) { // otherwise no-op
auto ret_view = p_ret->mutable_view();
in_place_copy_range<T>(
source, ret_view, source_begin, source_end, target_begin, stream);
}
return p_ret;
}
template <typename T>
std::enable_if_t<std::is_same<cudf::string_view, T>::value,
std::unique_ptr<cudf::column>>
operator()(
cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0) {
auto target_end = target_begin + (source_end - source_begin);
auto p_source_device_view =
cudf::column_device_view::create(source, stream);
if (source.has_nulls()) {
return cudf::strings::detail::copy_range(
cudf::experimental::detail::
make_null_replacement_iterator<cudf::string_view>(
*p_source_device_view, cudf::string_view()) + source_begin,
cudf::experimental::detail::make_validity_iterator(
*p_source_device_view) + source_begin,
cudf::strings_column_view(target), target_begin, target_end,
mr, stream);
}
else {
return cudf::strings::detail::copy_range(
p_source_device_view->begin<cudf::string_view>() + source_begin,
thrust::make_constant_iterator(true),
cudf::strings_column_view(target), target_begin, target_end,
mr, stream);
}
}
};
}
namespace cudf {
namespace experimental {
namespace detail {
void copy_range(column_view const& source, mutable_column_view& target,
size_type source_begin, size_type source_end,
size_type target_begin,
hipStream_t stream) {
CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true,
"In-place copy_range does not support variable-sized types.");
CUDF_EXPECTS((source_begin <= source_end) &&
(source_begin >= 0) &&
(source_begin < source.size()) &&
(source_end <= source.size()) &&
(target_begin >= 0) &&
(target_begin < target.size()) &&
(target_begin + (source_end - source_begin) <=
target.size()) &&
// overflow
(target_begin + (source_end - source_begin) >= target_begin),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false),
"target should be nullable if source has null values.");
if (source_end != source_begin) { // otherwise no-op
cudf::experimental::type_dispatcher(
target.type(),
in_place_copy_range_dispatch{source, target},
source_begin, source_end, target_begin, stream);
}
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin, size_type source_end,
size_type target_begin,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) {
CUDF_EXPECTS((source_begin >= 0) &&
(source_begin <= source_end) &&
(source_begin < source.size()) &&
(source_end <= source.size()) &&
(target_begin >= 0) &&
(target_begin < target.size()) &&
(target_begin + (source_end - source_begin) <=
target.size()) &&
// overflow
(target_begin + (source_end - source_begin) >= target_begin),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
return cudf::experimental::type_dispatcher(
target.type(),
out_of_place_copy_range_dispatch{source, target},
source_begin, source_end, target_begin, mr, stream);
}
} // namespace detail
void copy_range(column_view const& source, mutable_column_view& target,
size_type source_begin, size_type source_end,
size_type target_begin) {
return detail::copy_range(source, target, source_begin, source_end,
target_begin, 0);
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin, size_type source_end,
size_type target_begin,
rmm::mr::device_memory_resource* mr) {
return detail::copy_range(source, target, source_begin, source_end,
target_begin, mr, 0);
}
} // namespace experimental
} // namespace cudf
| c61690a3438f0d3d0cd312e1c312036c776fb263.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/detail/copy_range.cuh>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/mr/device_memory_resource.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <cuda_runtime.h>
#include <memory>
namespace {
template <typename T>
void in_place_copy_range(
cudf::column_view const& source, cudf::mutable_column_view& target,
cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin,
cudaStream_t stream = 0) {
auto p_source_device_view =
cudf::column_device_view::create(source, stream);
if (p_source_device_view->has_nulls()) {
cudf::experimental::detail::copy_range(
cudf::experimental::detail::make_null_replacement_iterator<T>(
*p_source_device_view, T()) + source_begin,
cudf::experimental::detail::make_validity_iterator(
*p_source_device_view) + source_begin,
target, target_begin, target_begin + (source_end - source_begin),
stream);
}
else {
cudf::experimental::detail::copy_range(
p_source_device_view->begin<T>() + source_begin,
thrust::make_constant_iterator(true), // dummy
target, target_begin, target_begin + (source_end - source_begin),
stream);
}
}
struct in_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::mutable_column_view& target;
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>(), void>
operator()(cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin, cudaStream_t stream = 0) {
in_place_copy_range<T>(
source, target, source_begin, source_end, target_begin, stream);
}
template <typename T>
std::enable_if_t<not cudf::is_fixed_width<T>(), void>
operator()(cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin, cudaStream_t stream = 0) {
CUDF_FAIL("in-place copy does not work for variable width types.");
}
};
struct out_of_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::column_view const& target;
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>(), std::unique_ptr<cudf::column>>
operator()(
cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0) {
auto p_ret = std::make_unique<cudf::column>(target, stream, mr);
if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) {
p_ret->set_null_mask(
cudf::create_null_mask(p_ret->size(), cudf::ALL_VALID, stream, mr), 0);
}
if (source_end != source_begin) { // otherwise no-op
auto ret_view = p_ret->mutable_view();
in_place_copy_range<T>(
source, ret_view, source_begin, source_end, target_begin, stream);
}
return p_ret;
}
template <typename T>
std::enable_if_t<std::is_same<cudf::string_view, T>::value,
std::unique_ptr<cudf::column>>
operator()(
cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0) {
auto target_end = target_begin + (source_end - source_begin);
auto p_source_device_view =
cudf::column_device_view::create(source, stream);
if (source.has_nulls()) {
return cudf::strings::detail::copy_range(
cudf::experimental::detail::
make_null_replacement_iterator<cudf::string_view>(
*p_source_device_view, cudf::string_view()) + source_begin,
cudf::experimental::detail::make_validity_iterator(
*p_source_device_view) + source_begin,
cudf::strings_column_view(target), target_begin, target_end,
mr, stream);
}
else {
return cudf::strings::detail::copy_range(
p_source_device_view->begin<cudf::string_view>() + source_begin,
thrust::make_constant_iterator(true),
cudf::strings_column_view(target), target_begin, target_end,
mr, stream);
}
}
};
}
namespace cudf {
namespace experimental {
namespace detail {
void copy_range(column_view const& source, mutable_column_view& target,
size_type source_begin, size_type source_end,
size_type target_begin,
cudaStream_t stream) {
CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true,
"In-place copy_range does not support variable-sized types.");
CUDF_EXPECTS((source_begin <= source_end) &&
(source_begin >= 0) &&
(source_begin < source.size()) &&
(source_end <= source.size()) &&
(target_begin >= 0) &&
(target_begin < target.size()) &&
(target_begin + (source_end - source_begin) <=
target.size()) &&
// overflow
(target_begin + (source_end - source_begin) >= target_begin),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false),
"target should be nullable if source has null values.");
if (source_end != source_begin) { // otherwise no-op
cudf::experimental::type_dispatcher(
target.type(),
in_place_copy_range_dispatch{source, target},
source_begin, source_end, target_begin, stream);
}
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin, size_type source_end,
size_type target_begin,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) {
CUDF_EXPECTS((source_begin >= 0) &&
(source_begin <= source_end) &&
(source_begin < source.size()) &&
(source_end <= source.size()) &&
(target_begin >= 0) &&
(target_begin < target.size()) &&
(target_begin + (source_end - source_begin) <=
target.size()) &&
// overflow
(target_begin + (source_end - source_begin) >= target_begin),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
return cudf::experimental::type_dispatcher(
target.type(),
out_of_place_copy_range_dispatch{source, target},
source_begin, source_end, target_begin, mr, stream);
}
} // namespace detail
void copy_range(column_view const& source, mutable_column_view& target,
size_type source_begin, size_type source_end,
size_type target_begin) {
return detail::copy_range(source, target, source_begin, source_end,
target_begin, 0);
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin, size_type source_end,
size_type target_begin,
rmm::mr::device_memory_resource* mr) {
return detail::copy_range(source, target, source_begin, source_end,
target_begin, mr, 0);
}
} // namespace experimental
} // namespace cudf
|
98d4238a32dd14e4b1a7508f55d41dbcec61985c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void offset_access(float *a, int s, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid + s < n)
{
a[tid + s] = a[tid + s] + 1;
}
}
__global__ void strided_access(float *a, int s, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid * s < n)
{
a[tid * s] = a[tid * s] + 1;
}
}
int main()
{
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
hipError_t err = hipSuccess;
int nMB = 128;
float ms;
int blockSize = 1024;
int n = nMB * 1024 * 1024 / sizeof(float);
float *d_a;
err = hipMalloc(&d_a, n * sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Memory not allocated (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int i;
for (i = 0; i <= 32; ++i)
{
err = hipMemset(d_a, 0.0, n * sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Data not written (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipEventRecord(startEvent);
hipLaunchKernelGGL(( strided_access), dim3(n / blockSize), dim3(blockSize), 0, 0, d_a, i, n);
hipEventRecord(stopEvent);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&ms, startEvent, stopEvent);
printf("%d, %f\n", i, ms);
}
printf("Just checking\n");
} | 98d4238a32dd14e4b1a7508f55d41dbcec61985c.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void offset_access(float *a, int s, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid + s < n)
{
a[tid + s] = a[tid + s] + 1;
}
}
__global__ void strided_access(float *a, int s, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid * s < n)
{
a[tid * s] = a[tid * s] + 1;
}
}
int main()
{
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
cudaError_t err = cudaSuccess;
int nMB = 128;
float ms;
int blockSize = 1024;
int n = nMB * 1024 * 1024 / sizeof(float);
float *d_a;
err = cudaMalloc(&d_a, n * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Memory not allocated (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int i;
for (i = 0; i <= 32; ++i)
{
err = cudaMemset(d_a, 0.0, n * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Data not written (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaEventRecord(startEvent);
strided_access<<<n / blockSize, blockSize>>>(d_a, i, n);
cudaEventRecord(stopEvent);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
printf("%d, %f\n", i, ms);
}
printf("Just checking\n");
} |
4a7517f5cce203f47d53cc5b61f0f6072b4f50ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <caffe2/core/context_gpu.h>
#include "caffe2/operator/diagonal_op.h"
namespace caffe2 {
int diagonal_op_step(const Tensor& tensor) {
auto step = 0;
for (auto d : tensor.sizes().vec()) {
step = step * d + 1;
}
return step;
}
int diagonal_op_size(const Tensor& tensor) {
auto size = tensor.dim(0);
for (auto d : tensor.sizes().vec()) {
if (size > d) size = d;
}
return size;
}
int diagonal_op_offset(const Tensor& tensor,
const std::vector<int64_t>& offset) {
auto off = 0, i = 0;
for (auto d : tensor.sizes().vec()) {
off = off * d + offset[i++];
}
return off;
}
namespace {
__global__ void DiagonalKernel(const int N, const int C, const int D,
const float* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i * C + D]; }
}
} // namespace
template <>
bool DiagonalOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto size = diagonal_op_size(X);
Y->Resize(size);
if (size > 0) {
auto step = diagonal_op_step(X);
auto offset = diagonal_op_offset(X, offset_);
hipLaunchKernelGGL(( DiagonalKernel), dim3(CAFFE_GET_BLOCKS(Y->size())), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
Y->size(), step, offset, X.data<float>(), Y->mutable_data<float>());
}
return true;
}
namespace {
__global__ void DiagonalGradientKernel(const int N, const int C, const int D,
const float* dY, float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (i >= D && (i - D) % C == 0 ? dY[i] : 0);
}
}
} // namespace
template <>
bool DiagonalGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
auto size = diagonal_op_size(X);
DCHECK_EQ(dY.size(), size);
if (size > 0) {
auto step = diagonal_op_step(X);
auto offset = diagonal_op_offset(X, offset_);
hipLaunchKernelGGL(( DiagonalGradientKernel), dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
dX->size(), step, offset, dY.data<float>(), dX->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(Diagonal, DiagonalOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DiagonalGradient,
DiagonalGradientOp<float, CUDAContext>);
} // namespace caffe2
| 4a7517f5cce203f47d53cc5b61f0f6072b4f50ad.cu | #include <caffe2/core/context_gpu.h>
#include "caffe2/operator/diagonal_op.h"
namespace caffe2 {
int diagonal_op_step(const Tensor& tensor) {
auto step = 0;
for (auto d : tensor.sizes().vec()) {
step = step * d + 1;
}
return step;
}
int diagonal_op_size(const Tensor& tensor) {
auto size = tensor.dim(0);
for (auto d : tensor.sizes().vec()) {
if (size > d) size = d;
}
return size;
}
int diagonal_op_offset(const Tensor& tensor,
const std::vector<int64_t>& offset) {
auto off = 0, i = 0;
for (auto d : tensor.sizes().vec()) {
off = off * d + offset[i++];
}
return off;
}
namespace {
__global__ void DiagonalKernel(const int N, const int C, const int D,
const float* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i * C + D]; }
}
} // namespace
template <>
bool DiagonalOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto size = diagonal_op_size(X);
Y->Resize(size);
if (size > 0) {
auto step = diagonal_op_step(X);
auto offset = diagonal_op_offset(X, offset_);
DiagonalKernel<<<CAFFE_GET_BLOCKS(Y->size()), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
Y->size(), step, offset, X.data<float>(), Y->mutable_data<float>());
}
return true;
}
namespace {
__global__ void DiagonalGradientKernel(const int N, const int C, const int D,
const float* dY, float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (i >= D && (i - D) % C == 0 ? dY[i] : 0);
}
}
} // namespace
template <>
bool DiagonalGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
auto size = diagonal_op_size(X);
DCHECK_EQ(dY.size(), size);
if (size > 0) {
auto step = diagonal_op_step(X);
auto offset = diagonal_op_offset(X, offset_);
DiagonalGradientKernel<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
dX->size(), step, offset, dY.data<float>(), dX->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(Diagonal, DiagonalOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DiagonalGradient,
DiagonalGradientOp<float, CUDAContext>);
} // namespace caffe2
|
967cf5396e3ca38decbfe13a0cbaa8e0738adc87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl_2x2.cu, normal z -> d, Tue Aug 30 09:38:32 2016
@author Ichitaro Yamazaki
*/
#include "magma_internal.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_2x2_lower(
int m,
const double* W, int ldw,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( 1, 0 );
double D11 = MAGMA_D_DIV( W( 1, 1 ), D21 );
double D22 = MAGMA_D_DIV( W( 0, 0 ), MAGMA_D_CONJ( D21 ) );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_D_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_2x2_upper(
int m,
const double *W, int ldw,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( m, 1 );
double D11 = MAGMA_D_DIV( W( m+1, 1 ), MAGMA_D_CONJ( D21 ) );
double D22 = MAGMA_D_DIV( W( m, 0 ), D21 );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_D_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/***************************************************************************//**
Purpose
-------
DLASCL_2x2 scales the M by M real matrix A by the 2-by-2 pivot.
TYPE specifies that A may be upper or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
dW DOUBLE PRECISION vector, dimension (2*lddw)
The matrix containing the 2-by-2 pivot.
@param[in]
lddw INTEGER
The leading dimension of the array W. LDDA >= max(1,M).
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be scaled by dW. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_2x2
*******************************************************************************/
extern "C" void
magmablas_dlascl_2x2_q(
magma_type_t type, magma_int_t m,
magmaDouble_const_ptr dW, magma_int_t lddw,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if (type == MagmaLower) {
hipLaunchKernelGGL(( dlascl_2x2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda);
}
else {
hipLaunchKernelGGL(( dlascl_2x2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda);
}
}
| 967cf5396e3ca38decbfe13a0cbaa8e0738adc87.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl_2x2.cu, normal z -> d, Tue Aug 30 09:38:32 2016
@author Ichitaro Yamazaki
*/
#include "magma_internal.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_2x2_lower(
int m,
const double* W, int ldw,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( 1, 0 );
double D11 = MAGMA_D_DIV( W( 1, 1 ), D21 );
double D22 = MAGMA_D_DIV( W( 0, 0 ), MAGMA_D_CONJ( D21 ) );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_D_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_2x2_upper(
int m,
const double *W, int ldw,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( m, 1 );
double D11 = MAGMA_D_DIV( W( m+1, 1 ), MAGMA_D_CONJ( D21 ) );
double D22 = MAGMA_D_DIV( W( m, 0 ), D21 );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_D_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/***************************************************************************//**
Purpose
-------
DLASCL_2x2 scales the M by M real matrix A by the 2-by-2 pivot.
TYPE specifies that A may be upper or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
dW DOUBLE PRECISION vector, dimension (2*lddw)
The matrix containing the 2-by-2 pivot.
@param[in]
lddw INTEGER
The leading dimension of the array W. LDDA >= max(1,M).
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be scaled by dW. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_2x2
*******************************************************************************/
extern "C" void
magmablas_dlascl_2x2_q(
magma_type_t type, magma_int_t m,
magmaDouble_const_ptr dW, magma_int_t lddw,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if (type == MagmaLower) {
dlascl_2x2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda);
}
else {
dlascl_2x2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda);
}
}
|
f6914dbad98da0796696f41090b3a8c123b9f4d2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
int main()
{
return 0;
} | f6914dbad98da0796696f41090b3a8c123b9f4d2.cu | #include <cuda.h>
int main()
{
return 0;
} |
bbox_util.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <functional>
#include <map>
#include <vector>
//#include "thrust/functional.h"
//#include "thrust/sort.h"
#include "caffe/common.hpp"
#include "caffe/util/bbox_util.hpp"
namespace caffe {
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else {
// Unknown code type.
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
double* bbox_data);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PermuteDataKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedByIdxKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
/* thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());*/
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * 7] = image_id;
detection_data[index * 7 + 1] = label;
detection_data[index * 7 + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, 7);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( GetDetectionsKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_det)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
} // namespace caffe
| bbox_util.cu | #include <algorithm>
#include <functional>
#include <map>
#include <vector>
//#include "thrust/functional.h"
//#include "thrust/sort.h"
#include "caffe/common.hpp"
#include "caffe/util/bbox_util.hpp"
namespace caffe {
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else {
// Unknown code type.
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
DecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
double* bbox_data);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
PermuteDataKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedByIdxKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
/* thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());*/
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * 7] = image_id;
detection_data[index * 7 + 1] = label;
detection_data[index * 7 + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, 7);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
GetDetectionsKernel<Dtype><<<CAFFE_GET_BLOCKS(num_det),
CAFFE_CUDA_NUM_THREADS>>>(num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
} // namespace caffe
|
bc0f2018069cf694fcaf1e3eabdcba0d831c36a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
//#include "utils.h"
const int N= 1024; // matrix size will be NxN
const int K = 32; // Our tile size
int compare_matrices(float *gpu, float *ref, int N)
{
int result = 0;
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++)
{
if (ref[i + j*N] != gpu[i + j*N])
{result = 1;}
// printf("%d\t", (int)gpu[i + j*N]);
}
//printf("\n");
}
return result;
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, int N)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
// The following functions and kernels are for your references
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// Write two tiled versions of transpose -- One using shared memory.
// To be launched with one thread per element, in KxK threadblocks.
// You will determine for each thread (x,y) in tile the element (i,j) of global output matrix.
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
int x = blockIdx.x * K + threadIdx.x;
int y = blockIdx.y * K + threadIdx.y;
// define our input index for this thread and output idex for this thread.
int in_idx = x + N * y;
int out_idx = y + N * x;
for(int i = 0; i < K; i+= N)
{
out[out_idx+i] = in[in_idx +i * N];
}
}
__global__ void
transpose_parallel_per_element_tiled_shared(float in[], float out[])
{
// Same algo as above but using shared memory.
// Define our tile using K
__shared__ float tile[K][K+1];
int idx = threadIdx.x;
int x = blockIdx.x * K;
int y = blockIdx.y * K;
// Read tile into shared memory
for ( int i=0; i < K; i++)
{
tile[idx][i] = in[( y + i ) * N + ( x + idx ) ];
}
// sync the threads
__syncthreads();
// write the data out of shared memory into global memory
for ( int i=0; i < K; i++)
{
out [( x + i )* N + ( y + idx) ] = tile[i][idx];
}
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in, N);
transpose_CPU(in, gold);
float *d_in, *d_out;
hipMalloc(&d_in, numbytes);
hipMalloc(&d_out, numbytes);
hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
hipLaunchKernelGGL(( transpose_serial), dim3(1),dim3(1), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipMemcpy(d_out, d_in, numbytes, hipMemcpyDeviceToDevice); //clean d_out
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(1),dim3(N), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;} //clean out
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipMemcpy(d_out, d_in, numbytes, hipMemcpyDeviceToDevice); //clean d_out
// Tiled versions
//const int K= 16;
dim3 blocks_tiled(N/K,N/K);
dim3 threads_tiled(K,K);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled), dim3(blocks_tiled),dim3(threads_tiled), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipMemcpy(d_out, d_in, numbytes, hipMemcpyDeviceToDevice); //clean d_out
dim3 blocks_tiled_sh(N/K,N/K);
dim3 threads_tiled_sh(K,K);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled_shared), dim3(blocks_tiled_sh),dim3(threads_tiled_sh), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_shared %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipFree(d_in);
hipFree(d_out);
}
| bc0f2018069cf694fcaf1e3eabdcba0d831c36a2.cu | #include <stdio.h>
#include "gputimer.h"
//#include "utils.h"
const int N= 1024; // matrix size will be NxN
const int K = 32; // Our tile size
int compare_matrices(float *gpu, float *ref, int N)
{
int result = 0;
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++)
{
if (ref[i + j*N] != gpu[i + j*N])
{result = 1;}
// printf("%d\t", (int)gpu[i + j*N]);
}
//printf("\n");
}
return result;
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, int N)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
// The following functions and kernels are for your references
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// Write two tiled versions of transpose -- One using shared memory.
// To be launched with one thread per element, in KxK threadblocks.
// You will determine for each thread (x,y) in tile the element (i,j) of global output matrix.
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
int x = blockIdx.x * K + threadIdx.x;
int y = blockIdx.y * K + threadIdx.y;
// define our input index for this thread and output idex for this thread.
int in_idx = x + N * y;
int out_idx = y + N * x;
for(int i = 0; i < K; i+= N)
{
out[out_idx+i] = in[in_idx +i * N];
}
}
__global__ void
transpose_parallel_per_element_tiled_shared(float in[], float out[])
{
// Same algo as above but using shared memory.
// Define our tile using K
__shared__ float tile[K][K+1];
int idx = threadIdx.x;
int x = blockIdx.x * K;
int y = blockIdx.y * K;
// Read tile into shared memory
for ( int i=0; i < K; i++)
{
tile[idx][i] = in[( y + i ) * N + ( x + idx ) ];
}
// sync the threads
__syncthreads();
// write the data out of shared memory into global memory
for ( int i=0; i < K; i++)
{
out [( x + i )* N + ( y + idx) ] = tile[i][idx];
}
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in, N);
transpose_CPU(in, gold);
float *d_in, *d_out;
cudaMalloc(&d_in, numbytes);
cudaMalloc(&d_out, numbytes);
cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaMemcpy(d_out, d_in, numbytes, cudaMemcpyDeviceToDevice); //clean d_out
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;} //clean out
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaMemcpy(d_out, d_in, numbytes, cudaMemcpyDeviceToDevice); //clean d_out
// Tiled versions
//const int K= 16;
dim3 blocks_tiled(N/K,N/K);
dim3 threads_tiled(K,K);
timer.Start();
transpose_parallel_per_element_tiled<<<blocks_tiled,threads_tiled>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaMemcpy(d_out, d_in, numbytes, cudaMemcpyDeviceToDevice); //clean d_out
dim3 blocks_tiled_sh(N/K,N/K);
dim3 threads_tiled_sh(K,K);
timer.Start();
transpose_parallel_per_element_tiled_shared<<<blocks_tiled_sh,threads_tiled_sh>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_shared %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaFree(d_in);
cudaFree(d_out);
}
|
e7ba309fbcc4b1a17492e7d4bd646324f78342a6.hip | // !!! This is a file automatically generated by hipify!!!
/**
* 2DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
#include "polybenchUtilFuncts.h"
#include <hip/hip_runtime.h>
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define NI 2048
#define NJ 2048
#define NK 2048
#define NUM NI*NJ
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
#define NUM_CHUNK 1024
#define CHUNK_SIZE NI/NUM_CHUNK
#define NUM_SM 8
#define OFFSET NUM_SM*DIM_THREAD_BLOCK_X
/* Can switch DATA_TYPE between float and double */
typedef double DATA_TYPE;
void init(DATA_TYPE ** A, DATA_TYPE **B)
{
int i, j;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
A[i][j] = ((DATA_TYPE) i*j) / NI;
B[i][j] = A[i][j];
}
}
}
void compareResults(DATA_TYPE **B, DATA_TYPE **B_outputFromGpu)
{
int i, j, fail;
fail = 0;
// Compare a and b
for (i=1; i < (NI-1); i++)
{
for (j=1; j < (NJ-1); j++)
{
if (percentDiff(B[i][j], B_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( 0 );
}
void bandwidthtest(DATA_TYPE ** A,DATA_TYPE** B)
{
DATA_TYPE *A_gpu;
size_t pitch;
hipEvent_t start[2],stop[2];
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
hipEventCreate(&start[0]);
hipEventCreate(&start[1]);
hipEventCreate(&stop[0]);
hipEventCreate(&stop[1]);
hipMallocPitch(&A_gpu, &pitch, NJ*sizeof(DATA_TYPE),NI);
fprintf(stdout,"finish malloc pitch\n");
//cudamemcpyasync2d HtoD
hipEventRecord(start[0],0);
for (int i = 0 ;i <NUM_CHUNK;i++){
hipMemcpy2DAsync(A_gpu+CHUNK_SIZE*i,pitch,&A[0][CHUNK_SIZE*i], NJ*sizeof(DATA_TYPE),CHUNK_SIZE*sizeof(DATA_TYPE),NI,hipMemcpyHostToDevice,0);
}
fprintf(stdout,"finish copy2d htod \n");
hipDeviceSynchronize();
hipEventRecord(stop[0]);
hipEventSynchronize(stop[0]);
hipEventElapsedTime(&elapsedTimeInMs, start[0], stop[0]);
bandwidthInMBs = ((float)(1<<10) * NI*NJ*sizeof(DATA_TYPE) ) / (elapsedTimeInMs * (float)(1 << 20));
fprintf(stdout,"HtoD cudamemcpy2d Bandwidth = %.1f MB/s, Time= %.1f Ms \n", bandwidthInMBs, elapsedTimeInMs);
//cudamemcpyasync2d DtoH
hipEventRecord(start[1],0);
for (int i =0 ;i <NUM_CHUNK; i++){
hipMemcpy2DAsync(&B[0][CHUNK_SIZE*i], NJ*sizeof(DATA_TYPE),A_gpu+CHUNK_SIZE*i,pitch,CHUNK_SIZE*sizeof(DATA_TYPE),NI,hipMemcpyDeviceToHost,0);
}
hipDeviceSynchronize();
hipEventRecord(stop[1]);
hipEventSynchronize(stop[1]);
hipEventElapsedTime(&elapsedTimeInMs, start[1], stop[1]);
bandwidthInMBs = ((float)(1<<10) * NI*NJ*sizeof(DATA_TYPE) ) / (elapsedTimeInMs * (float)(1 << 20));
fprintf(stdout,"DtoH cudamemcpy2d Bandwidth = %.1f MB/s, Time= %.1f Ms \n", bandwidthInMBs, elapsedTimeInMs);
hipEventDestroy(start[0]);
hipEventDestroy(start[1]);
hipEventDestroy(stop[0]);
hipEventDestroy(stop[1]);
hipFree(A_gpu);
}
int main(int argc, char *argv[])
{
int t;
DATA_TYPE **C_outputFromGpu, **A, **B;
DATA_TYPE *co,*aa,*bb;
C_outputFromGpu=(DATA_TYPE **)malloc(sizeof(DATA_TYPE *)*NI);
hipHostMalloc((void **)&co, sizeof(DATA_TYPE) * NI * NJ, hipHostMallocPortable);
for (t=0;t<NI;t++)
C_outputFromGpu[t]=co+t*NJ;
A=(DATA_TYPE **)malloc(sizeof(DATA_TYPE *)*NI);
hipHostMalloc((void **)&aa, sizeof(DATA_TYPE) * NI * NJ, hipHostMallocPortable);
for (t=0;t<NI;t++)
A[t]=aa+t*NK;
B=(DATA_TYPE **)malloc(sizeof(DATA_TYPE *)*NK);
hipHostMalloc((void **)&bb, sizeof(DATA_TYPE) * NI * NJ, hipHostMallocPortable);
for (t=0;t<NK;t++)
B[t]=bb+t*NJ;
fprintf(stdout,"finish allocation\n");
init(A,B);
fprintf(stdout,"finish initialization\n");
GPU_argv_init();
bandwidthtest(A,C_outputFromGpu);
fprintf(stdout,"finish test\n");
compareResults(C_outputFromGpu,B);
fprintf(stdout,"finish verify\n");
return 0;
}
| e7ba309fbcc4b1a17492e7d4bd646324f78342a6.cu | /**
* 2DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector_types.h>
#include "polybenchUtilFuncts.h"
#include <cuda.h>
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define NI 2048
#define NJ 2048
#define NK 2048
#define NUM NI*NJ
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
#define NUM_CHUNK 1024
#define CHUNK_SIZE NI/NUM_CHUNK
#define NUM_SM 8
#define OFFSET NUM_SM*DIM_THREAD_BLOCK_X
/* Can switch DATA_TYPE between float and double */
typedef double DATA_TYPE;
void init(DATA_TYPE ** A, DATA_TYPE **B)
{
int i, j;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
A[i][j] = ((DATA_TYPE) i*j) / NI;
B[i][j] = A[i][j];
}
}
}
void compareResults(DATA_TYPE **B, DATA_TYPE **B_outputFromGpu)
{
int i, j, fail;
fail = 0;
// Compare a and b
for (i=1; i < (NI-1); i++)
{
for (j=1; j < (NJ-1); j++)
{
if (percentDiff(B[i][j], B_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( 0 );
}
void bandwidthtest(DATA_TYPE ** A,DATA_TYPE** B)
{
DATA_TYPE *A_gpu;
size_t pitch;
cudaEvent_t start[2],stop[2];
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
cudaEventCreate(&start[0]);
cudaEventCreate(&start[1]);
cudaEventCreate(&stop[0]);
cudaEventCreate(&stop[1]);
cudaMallocPitch(&A_gpu, &pitch, NJ*sizeof(DATA_TYPE),NI);
fprintf(stdout,"finish malloc pitch\n");
//cudamemcpyasync2d HtoD
cudaEventRecord(start[0],0);
for (int i = 0 ;i <NUM_CHUNK;i++){
cudaMemcpy2DAsync(A_gpu+CHUNK_SIZE*i,pitch,&A[0][CHUNK_SIZE*i], NJ*sizeof(DATA_TYPE),CHUNK_SIZE*sizeof(DATA_TYPE),NI,cudaMemcpyHostToDevice,0);
}
fprintf(stdout,"finish copy2d htod \n");
cudaDeviceSynchronize();
cudaEventRecord(stop[0]);
cudaEventSynchronize(stop[0]);
cudaEventElapsedTime(&elapsedTimeInMs, start[0], stop[0]);
bandwidthInMBs = ((float)(1<<10) * NI*NJ*sizeof(DATA_TYPE) ) / (elapsedTimeInMs * (float)(1 << 20));
fprintf(stdout,"HtoD cudamemcpy2d Bandwidth = %.1f MB/s, Time= %.1f Ms \n", bandwidthInMBs, elapsedTimeInMs);
//cudamemcpyasync2d DtoH
cudaEventRecord(start[1],0);
for (int i =0 ;i <NUM_CHUNK; i++){
cudaMemcpy2DAsync(&B[0][CHUNK_SIZE*i], NJ*sizeof(DATA_TYPE),A_gpu+CHUNK_SIZE*i,pitch,CHUNK_SIZE*sizeof(DATA_TYPE),NI,cudaMemcpyDeviceToHost,0);
}
cudaDeviceSynchronize();
cudaEventRecord(stop[1]);
cudaEventSynchronize(stop[1]);
cudaEventElapsedTime(&elapsedTimeInMs, start[1], stop[1]);
bandwidthInMBs = ((float)(1<<10) * NI*NJ*sizeof(DATA_TYPE) ) / (elapsedTimeInMs * (float)(1 << 20));
fprintf(stdout,"DtoH cudamemcpy2d Bandwidth = %.1f MB/s, Time= %.1f Ms \n", bandwidthInMBs, elapsedTimeInMs);
cudaEventDestroy(start[0]);
cudaEventDestroy(start[1]);
cudaEventDestroy(stop[0]);
cudaEventDestroy(stop[1]);
cudaFree(A_gpu);
}
int main(int argc, char *argv[])
{
int t;
DATA_TYPE **C_outputFromGpu, **A, **B;
DATA_TYPE *co,*aa,*bb;
C_outputFromGpu=(DATA_TYPE **)malloc(sizeof(DATA_TYPE *)*NI);
cudaHostAlloc((void **)&co, sizeof(DATA_TYPE) * NI * NJ, cudaHostAllocPortable);
for (t=0;t<NI;t++)
C_outputFromGpu[t]=co+t*NJ;
A=(DATA_TYPE **)malloc(sizeof(DATA_TYPE *)*NI);
cudaHostAlloc((void **)&aa, sizeof(DATA_TYPE) * NI * NJ, cudaHostAllocPortable);
for (t=0;t<NI;t++)
A[t]=aa+t*NK;
B=(DATA_TYPE **)malloc(sizeof(DATA_TYPE *)*NK);
cudaHostAlloc((void **)&bb, sizeof(DATA_TYPE) * NI * NJ, cudaHostAllocPortable);
for (t=0;t<NK;t++)
B[t]=bb+t*NJ;
fprintf(stdout,"finish allocation\n");
init(A,B);
fprintf(stdout,"finish initialization\n");
GPU_argv_init();
bandwidthtest(A,C_outputFromGpu);
fprintf(stdout,"finish test\n");
compareResults(C_outputFromGpu,B);
fprintf(stdout,"finish verify\n");
return 0;
}
|
c279409b4b2bf1d98e68def1804088c0a0636438.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_minus_4_back;
int xdim0_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_minus_4_back;
int ydim0_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_minus_4_back;
int xdim1_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_minus_4_back;
int ydim1_update_halo_kernel2_zvel_minus_4_back_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_minus_4_back*(y)+xdim0_update_halo_kernel2_zvel_minus_4_back*ydim0_update_halo_kernel2_zvel_minus_4_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_minus_4_back*(y)+xdim1_update_halo_kernel2_zvel_minus_4_back*ydim1_update_halo_kernel2_zvel_minus_4_back*(z))
//user function
__device__
inline void update_halo_kernel2_zvel_minus_4_back(double *zvel0, double *zvel1, const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = -zvel0[OPS_ACC0(0,0,4)];
if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = -zvel1[OPS_ACC1(0,0,4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_minus_4_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_zvel_minus_4_back + idx_z * 1 * xdim0_update_halo_kernel2_zvel_minus_4_back * ydim0_update_halo_kernel2_zvel_minus_4_back;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_zvel_minus_4_back + idx_z * 1 * xdim1_update_halo_kernel2_zvel_minus_4_back * ydim1_update_halo_kernel2_zvel_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_minus_4_back(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_zvel_minus_4_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(85,"update_halo_kernel2_zvel_minus_4_back");
OPS_kernels[85].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_4_back_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_4_back_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_4_back_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_4_back_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_minus_4_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_zvel_minus_4_back_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_minus_4_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_zvel_minus_4_back_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_minus_4_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_zvel_minus_4_back_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_minus_4_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_zvel_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[85].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_minus_4_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[85].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[85].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[85].transfer += ops_compute_transfer(dim, range, &arg1);
}
| c279409b4b2bf1d98e68def1804088c0a0636438.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_minus_4_back;
int xdim0_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_minus_4_back;
int ydim0_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_minus_4_back;
int xdim1_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_minus_4_back;
int ydim1_update_halo_kernel2_zvel_minus_4_back_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_minus_4_back*(y)+xdim0_update_halo_kernel2_zvel_minus_4_back*ydim0_update_halo_kernel2_zvel_minus_4_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_minus_4_back*(y)+xdim1_update_halo_kernel2_zvel_minus_4_back*ydim1_update_halo_kernel2_zvel_minus_4_back*(z))
//user function
__device__
inline void update_halo_kernel2_zvel_minus_4_back(double *zvel0, double *zvel1, const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = -zvel0[OPS_ACC0(0,0,4)];
if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = -zvel1[OPS_ACC1(0,0,4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_minus_4_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_zvel_minus_4_back + idx_z * 1 * xdim0_update_halo_kernel2_zvel_minus_4_back * ydim0_update_halo_kernel2_zvel_minus_4_back;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_zvel_minus_4_back + idx_z * 1 * xdim1_update_halo_kernel2_zvel_minus_4_back * ydim1_update_halo_kernel2_zvel_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_minus_4_back(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_zvel_minus_4_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(85,"update_halo_kernel2_zvel_minus_4_back");
OPS_kernels[85].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_4_back_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_4_back_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_4_back_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_4_back_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_minus_4_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_zvel_minus_4_back_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_minus_4_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_zvel_minus_4_back_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_minus_4_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_zvel_minus_4_back_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_minus_4_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_zvel_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[85].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_zvel_minus_4_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[85].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[85].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[85].transfer += ops_compute_transfer(dim, range, &arg1);
}
|
b4024b730c4663564d36c862cc99c41b8f18d147.hip | // !!! This is a file automatically generated by hipify!!!
// take inversion of sample covariance matrices
// which is approximation of Fisher information
void connector::invert_fi_back(){
// initialize a handle
hipblasHandle_t handle;
hipblasCreate(&handle);
// useless arrays..
int *p_arr, *info_arr;
// temporarily store fisher info estimate
double *device_fi[N_EXPERIMENTS], *device_fi_;
hipMalloc((void**)&p_arr, N_CENTERS * N_EXPERIMENTS * sizeof(int));
hipMalloc((void**)&info_arr, N_EXPERIMENTS * sizeof(int));
// exist in global memory
hipMalloc((void**)&device_fi_,
N_CENTERS * N_CENTERS * N_EXPERIMENTS * sizeof(double));
for(int n = 0; n < N_EXPERIMENTS; n++)
device_fi[n] = device_fi_ + N_CENTERS * N_CENTERS * n;
/*
// LU decompositions
hipblasDgetrfBatched(handle, N_CENTERS, dev_fi_inv_,
N_CENTERS, p_arr, info_arr, N_EXPERIMENTS);
// inversion
hipblasDgetriBatched(handle, N_CENTERS, dev_fi_inv_, N_CENTERS, p_arr,
device_fi, N_CENTERS, info_arr, N_EXPERIMENTS);
*/
// copy back to host
hipMemcpy(fi_est, device_fi_,
N_CENTERS * N_CENTERS * N_EXPERIMENTS * sizeof(double), hipMemcpyDeviceToHost);
hipFree(p_arr);
hipFree(info_arr);
hipFree(device_fi_);
hipblasDestroy(handle);
}
| b4024b730c4663564d36c862cc99c41b8f18d147.cu | // take inversion of sample covariance matrices
// which is approximation of Fisher information
void connector::invert_fi_back(){
// initialize a handle
cublasHandle_t handle;
cublasCreate(&handle);
// useless arrays..
int *p_arr, *info_arr;
// temporarily store fisher info estimate
double *device_fi[N_EXPERIMENTS], *device_fi_;
cudaMalloc((void**)&p_arr, N_CENTERS * N_EXPERIMENTS * sizeof(int));
cudaMalloc((void**)&info_arr, N_EXPERIMENTS * sizeof(int));
// exist in global memory
cudaMalloc((void**)&device_fi_,
N_CENTERS * N_CENTERS * N_EXPERIMENTS * sizeof(double));
for(int n = 0; n < N_EXPERIMENTS; n++)
device_fi[n] = device_fi_ + N_CENTERS * N_CENTERS * n;
/*
// LU decompositions
cublasDgetrfBatched(handle, N_CENTERS, dev_fi_inv_,
N_CENTERS, p_arr, info_arr, N_EXPERIMENTS);
// inversion
cublasDgetriBatched(handle, N_CENTERS, dev_fi_inv_, N_CENTERS, p_arr,
device_fi, N_CENTERS, info_arr, N_EXPERIMENTS);
*/
// copy back to host
cudaMemcpy(fi_est, device_fi_,
N_CENTERS * N_CENTERS * N_EXPERIMENTS * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(p_arr);
cudaFree(info_arr);
cudaFree(device_fi_);
cublasDestroy(handle);
}
|
b6c5c3782173097f230730250a0c1a22963ffe5e.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by yejiongtao on 2019/5/2.
//
#include "cuda_encoder.h"
#include "galois.h"
#include "cycletimer.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include <stdio.h>
const int BYTES_PER_THREAD = 1;
const int THREADS_PER_BLOCK = 128;
char *cuda_parity_rows;
char *cuda_mult_table;
//#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) cudaAssert((ans), __FILE__, __LINE__);
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__inline__ int updiv(int a, int b) {
return (a + b - 1) / b;
}
void init_cuda(char *parity_rows, int r, int c) {
cudaCheckError(hipMalloc(&cuda_parity_rows, r * c * sizeof(char)));
cudaCheckError(hipMemcpy(cuda_parity_rows, parity_rows,
r * c * sizeof(char), hipMemcpyHostToDevice));
}
void copy_mult_table_to_cuda() {
cudaCheckError(hipMalloc(&cuda_mult_table, FIELD_SIZE * FIELD_SIZE * sizeof(char)));
// because MULTIPLICATION_TABLE is a 2D array
for(int i = 0; i < FIELD_SIZE; i++)
cudaCheckError(hipMemcpy(cuda_mult_table + i * FIELD_SIZE, MULTIPLICATION_TABLE[i],
FIELD_SIZE * sizeof(char), hipMemcpyHostToDevice));
}
__global__ void kernel_encode(char *matrix_rows, int matrix_r, int matrix_c,
char *inputs, int input_r, int input_c,
char *outputs, int output_r, int output_c,
int offset, int byte_count, char *mult_table) {
int i_thread = blockIdx.x * blockDim.x + threadIdx.x;
for (int i_output = 0; i_output < output_r; i_output++) {
char *output_shard = outputs + i_output * output_c;
char *matrix_row = matrix_rows + i_output * matrix_c;
{
int i_input = 0;
char *input_shard = inputs + i_input * input_c;
char *mult_table_row = mult_table + (matrix_row[i_input] & 0xFF) * FIELD_SIZE;
for(int i_byte = i_thread * BYTES_PER_THREAD;
i_byte < (i_thread+1) * BYTES_PER_THREAD; i_byte++) {
if (i_byte >= input_r)
break;
output_shard[i_byte] = mult_table_row[input_shard[i_byte] & 0xFF];
}
}
for (int i_input = 1; i_input < input_r; i_input++) {
char *input_shard = inputs + i_input * input_c;
char *mult_table_row = mult_table + (matrix_row[i_input] & 0xFF) * FIELD_SIZE;
for(int i_byte = i_thread * BYTES_PER_THREAD;
i_byte < (i_thread+1) * BYTES_PER_THREAD; i_byte++) {
if (i_byte >= input_r)
break;
output_shard[i_byte] ^= mult_table_row[input_shard[i_byte] & 0xFF];
}
}
}
}
void output_input_byte_table_cuda_loop(char *matrix_rows, int matrix_r, int matrix_c,
char *inputs, int input_r, int input_c,
char *outputs, int output_r, int output_c,
int offset, int byte_count) {
int num_blocks = updiv(updiv(byte_count, BYTES_PER_THREAD), THREADS_PER_BLOCK);
char *cuda_inputs;
cudaCheckError(hipMalloc(&cuda_inputs,
(input_c * input_r + output_c * output_r) * sizeof(char)));
char *cuda_outputs = cuda_inputs + input_c * input_r;
cudaCheckError(hipMemcpy(cuda_inputs, inputs,
input_c * input_r * sizeof(char), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_encode), dim3(num_blocks), dim3(THREADS_PER_BLOCK), 0, 0,
cuda_parity_rows, matrix_r, matrix_c, cuda_inputs, input_r, input_c,
cuda_outputs, output_r, output_c, offset, byte_count, cuda_mult_table);
cudaCheckError(hipDeviceSynchronize());
cudaCheckError(hipMemcpy(outputs, cuda_outputs,
output_c * output_r * sizeof(char), hipMemcpyDeviceToHost));
hipFree(cuda_inputs);
}
| b6c5c3782173097f230730250a0c1a22963ffe5e.cu | //
// Created by yejiongtao on 2019/5/2.
//
#include "cuda_encoder.h"
#include "galois.h"
#include "cycletimer.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include <stdio.h>
const int BYTES_PER_THREAD = 1;
const int THREADS_PER_BLOCK = 128;
char *cuda_parity_rows;
char *cuda_mult_table;
//#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) cudaAssert((ans), __FILE__, __LINE__);
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__inline__ int updiv(int a, int b) {
return (a + b - 1) / b;
}
void init_cuda(char *parity_rows, int r, int c) {
cudaCheckError(cudaMalloc(&cuda_parity_rows, r * c * sizeof(char)));
cudaCheckError(cudaMemcpy(cuda_parity_rows, parity_rows,
r * c * sizeof(char), cudaMemcpyHostToDevice));
}
void copy_mult_table_to_cuda() {
cudaCheckError(cudaMalloc(&cuda_mult_table, FIELD_SIZE * FIELD_SIZE * sizeof(char)));
// because MULTIPLICATION_TABLE is a 2D array
for(int i = 0; i < FIELD_SIZE; i++)
cudaCheckError(cudaMemcpy(cuda_mult_table + i * FIELD_SIZE, MULTIPLICATION_TABLE[i],
FIELD_SIZE * sizeof(char), cudaMemcpyHostToDevice));
}
__global__ void kernel_encode(char *matrix_rows, int matrix_r, int matrix_c,
char *inputs, int input_r, int input_c,
char *outputs, int output_r, int output_c,
int offset, int byte_count, char *mult_table) {
int i_thread = blockIdx.x * blockDim.x + threadIdx.x;
for (int i_output = 0; i_output < output_r; i_output++) {
char *output_shard = outputs + i_output * output_c;
char *matrix_row = matrix_rows + i_output * matrix_c;
{
int i_input = 0;
char *input_shard = inputs + i_input * input_c;
char *mult_table_row = mult_table + (matrix_row[i_input] & 0xFF) * FIELD_SIZE;
for(int i_byte = i_thread * BYTES_PER_THREAD;
i_byte < (i_thread+1) * BYTES_PER_THREAD; i_byte++) {
if (i_byte >= input_r)
break;
output_shard[i_byte] = mult_table_row[input_shard[i_byte] & 0xFF];
}
}
for (int i_input = 1; i_input < input_r; i_input++) {
char *input_shard = inputs + i_input * input_c;
char *mult_table_row = mult_table + (matrix_row[i_input] & 0xFF) * FIELD_SIZE;
for(int i_byte = i_thread * BYTES_PER_THREAD;
i_byte < (i_thread+1) * BYTES_PER_THREAD; i_byte++) {
if (i_byte >= input_r)
break;
output_shard[i_byte] ^= mult_table_row[input_shard[i_byte] & 0xFF];
}
}
}
}
void output_input_byte_table_cuda_loop(char *matrix_rows, int matrix_r, int matrix_c,
char *inputs, int input_r, int input_c,
char *outputs, int output_r, int output_c,
int offset, int byte_count) {
int num_blocks = updiv(updiv(byte_count, BYTES_PER_THREAD), THREADS_PER_BLOCK);
char *cuda_inputs;
cudaCheckError(cudaMalloc(&cuda_inputs,
(input_c * input_r + output_c * output_r) * sizeof(char)));
char *cuda_outputs = cuda_inputs + input_c * input_r;
cudaCheckError(cudaMemcpy(cuda_inputs, inputs,
input_c * input_r * sizeof(char), cudaMemcpyHostToDevice));
kernel_encode<<<num_blocks, THREADS_PER_BLOCK>>>(
cuda_parity_rows, matrix_r, matrix_c, cuda_inputs, input_r, input_c,
cuda_outputs, output_r, output_c, offset, byte_count, cuda_mult_table);
cudaCheckError(cudaDeviceSynchronize());
cudaCheckError(cudaMemcpy(outputs, cuda_outputs,
output_c * output_r * sizeof(char), cudaMemcpyDeviceToHost));
cudaFree(cuda_inputs);
}
|
c9c57ff6b900da1772fdf9bb7d16006e9d07fc58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| c9c57ff6b900da1772fdf9bb7d16006e9d07fc58.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
86977b2139dc699cb4d5e3fcf51ad6117fbab401.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <algorithm>
#include "Bosel.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <sys/stat.h>
//#define cimg_use_jpeg
#include "CImg.h"
#define SIZE 10
#define db(a) cout << #a << " = " << a << endl
#define db2(a, b) cout << #a << " = " << a << " " << #b << " = " << b << endl
inline bool existe(const std::string& name) {
struct stat buffer;
return (stat(name.c_str(), &buffer) == 0);
}
void performCPU(string filename)
{
std::clock_t start = clock();
double duration;
if (existe(filename)) {
puts("EL ARCHIVO SI EXISTE");
}
else {
puts("EL ARCHIVO NO EXISTE");
}
ImgFloat imagen(filename.c_str());
//ImgFloat imagen("lena30.jpg");
// depth, numColors, initialize
ImgFloat xGradient(imagen.width(), imagen.height(), 1, 1, 0);
ImgFloat yGradient(imagen.width(), imagen.height(), 1, 1, 0);
ImgFloat gradientA(imagen.width(), imagen.height(), 1, 1, 0);
ImgFloat gradientB(imagen.width(), imagen.height(), 1, 1, 0);
//imagen.blur(1.5);
ImgFloat R = imagen.get_channel(0);
Bosel b;
b.convolution(R, b.Gx, xGradient);
b.convolution(R, b.Gy, yGradient);
b.mergeA(gradientA, xGradient, yGradient);
b.mergeB(gradientB, xGradient, yGradient);
duration = (std::clock() - start) / (double)CLOCKS_PER_SEC;
printf("CPU for image %s takes %.2f seconds\n", filename.c_str(), duration);
(gradientA, gradientB).display("comparacin suma ABSs y SQRT");
//(xGradient, yGradient, gradient).display("Deteccin de Bordes");
cout << duration << endl;
//gradient.display();
}
__device__ void convolution(int coordinate, float* d_arr, float* gradient, int width, int len, int* mask, int* dir, int* pos)
{
float c = 0;
for (int ii = 0; ii < 3; ii++)
{
for (int jj = 0; jj < 3; jj++)
{
int x = coordinate + width * dir[ii * 3 + jj] + pos[jj];
if (x >= 0 && x < len)
c += d_arr[x] * mask[ii * 3 + jj];
}
}
gradient[coordinate] = c;
}
__global__ void deviceComputeGradient(float* d_arr, float* gradient, int width, int len, int* mask, int* dir, int* pos) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x * width + y < len)
convolution(x * width + y, d_arr, gradient, width, len, mask, dir, pos);
}
__global__ void deviceMerge(float* xGradient, float* yGradient, float* target, int width, int len) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int index = x * width + y;
if (index < len)
target[index] = abs(xGradient[index]) + abs(yGradient[index]);
}
//segunda versin (un slo mtodo)
__device__ void convolution2(int coordinate, float* d_arr, float* gradient, int width, int len, int* gx_mask, int* gy_mask, int* dir, int* pos)
{
float cx = 0, cy = 0;
for (int ii = 0; ii < 3; ii++)
{
for (int jj = 0; jj < 3; jj++)
{
int x = coordinate + width * dir[ii * 3 + jj] + pos[jj];
if (x >= 0 && x < len) {
cx += d_arr[x] * gx_mask[ii * 3 + jj];
cy += d_arr[x] * gy_mask[ii * 3 + jj];
}
}
}
gradient[coordinate] = abs(cx) + abs(cy);
}
__global__ void deviceComputeGradient2(float* d_arr, float* gradient, int width, int len, int* gx_mask, int* gy_mask, int* dir, int* pos) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x == 0 && y < 10)
printf("[%d %d]\n", x, y);
if (x * width + y < len)
convolution2(x * width + y, d_arr, gradient, width, len, gx_mask, gy_mask, dir, pos);
}
void performGPU(string filename)
{
ImgFloat imagen(filename.c_str());
std::clock_t startt = clock();
double duration;
ImgFloat result(imagen.width(), imagen.height(), 1, 1, 0);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// depth, numColors, initialize
int WIDTH = imagen.width();
int HEIGHT = imagen.height();
float *arr, *gradient;
float *d_arr, *d_gradient;
arr = (float*)malloc(WIDTH * HEIGHT * sizeof(float));
gradient = (float*)malloc(WIDTH * HEIGHT * sizeof(float));
hipMalloc((void**)&d_arr, WIDTH * HEIGHT * sizeof(float));
hipMalloc((void**)&d_gradient, WIDTH * HEIGHT * sizeof(float));
for (int i = 0; i < WIDTH; i++)
for (int j = 0; j < HEIGHT; j++) {
arr[i * WIDTH + j] = imagen(i, j);
gradient[i * WIDTH + j] = 0;
}
hipMemcpy(d_arr, arr, WIDTH * HEIGHT * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_gradient, gradient, WIDTH * HEIGHT * sizeof(float), hipMemcpyHostToDevice);
dim3 BLOCKS = dim3(1024, 1024);
dim3 THREADS = dim3(4, 4);
/*dim3 BLOCKS(2, 2);
dim3 THREADS(2, 2);*/
int pos[3] = { -1, 0, 1 };
int dir[9] = { -1, -1, -1, 0, 0, 0, 1, 1, 1 };
int Gx[9] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1
};
int Gy[9] = {
1, 2, 1,
0, 0, 0,
-1, -2, -1
};
int* d_pos, *d_dir, *d_Gx, *d_Gy;
hipMalloc((void**)&d_pos, 3 * sizeof(int));
hipMalloc((void**)&d_dir, 9 * sizeof(int));
hipMalloc((void**)&d_Gx, 9 * sizeof(int));
hipMalloc((void**)&d_Gy, 9 * sizeof(int));
hipMemcpy(d_pos, pos, 3 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_dir, dir, 9 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Gx, Gx, 9 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Gy, Gy, 9 * sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(start);
deviceComputeGradient2 << < BLOCKS, THREADS >> > (d_arr, d_gradient, WIDTH, WIDTH * HEIGHT, d_Gx, d_Gy, d_dir, d_pos);
hipMemcpy(gradient, d_gradient, WIDTH * HEIGHT * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < HEIGHT; j++) {
result(i, j) = gradient[i * WIDTH + j];
}
}
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
duration = (std::clock() - startt) / (double)CLOCKS_PER_SEC;
printf("GPU for image %s takes %.2f seconds\n", filename.c_str(), duration);
printf("hipEventElapsedTime = %.2f ms\n", milliseconds);
free(arr);
hipFree(d_arr);
hipFree(d_gradient);
hipFree(d_pos);
hipFree(d_dir);
hipFree(d_Gx);
hipFree(d_Gy);
hipEventDestroy(start);
hipEventDestroy(stop);
//ac si quieres ponemos en xgradient, ygradient
(result).display("HOLA MUNDO CUDA");
}
int main(int argc, char** argv) {
performCPU("lena_grises.bmp");
return 0;
for (int i = 1; i < 2; i++)
{
int len = 4;
int baseSize = 1024;
for (int j = 1; j <= len; j++)
{
ostringstream stream;
stream << (baseSize * j);
if (i == 0)
performCPU(stream.str() + "x" + stream.str() + ".jpg");
else
performGPU(stream.str() + "x" + stream.str() + ".jpg");
}
}
return 0;
}
| 86977b2139dc699cb4d5e3fcf51ad6117fbab401.cu | #include <stdio.h>
#include <iostream>
#include <sstream>
#include <algorithm>
#include "Bosel.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/stat.h>
//#define cimg_use_jpeg
#include "CImg.h"
#define SIZE 10
#define db(a) cout << #a << " = " << a << endl
#define db2(a, b) cout << #a << " = " << a << " " << #b << " = " << b << endl
inline bool existe(const std::string& name) {
struct stat buffer;
return (stat(name.c_str(), &buffer) == 0);
}
void performCPU(string filename)
{
std::clock_t start = clock();
double duration;
if (existe(filename)) {
puts("EL ARCHIVO SI EXISTE");
}
else {
puts("EL ARCHIVO NO EXISTE");
}
ImgFloat imagen(filename.c_str());
//ImgFloat imagen("lena30.jpg");
// depth, numColors, initialize
ImgFloat xGradient(imagen.width(), imagen.height(), 1, 1, 0);
ImgFloat yGradient(imagen.width(), imagen.height(), 1, 1, 0);
ImgFloat gradientA(imagen.width(), imagen.height(), 1, 1, 0);
ImgFloat gradientB(imagen.width(), imagen.height(), 1, 1, 0);
//imagen.blur(1.5);
ImgFloat R = imagen.get_channel(0);
Bosel b;
b.convolution(R, b.Gx, xGradient);
b.convolution(R, b.Gy, yGradient);
b.mergeA(gradientA, xGradient, yGradient);
b.mergeB(gradientB, xGradient, yGradient);
duration = (std::clock() - start) / (double)CLOCKS_PER_SEC;
printf("CPU for image %s takes %.2f seconds\n", filename.c_str(), duration);
(gradientA, gradientB).display("comparación suma ABSs y SQRT");
//(xGradient, yGradient, gradient).display("Detección de Bordes");
cout << duration << endl;
//gradient.display();
}
__device__ void convolution(int coordinate, float* d_arr, float* gradient, int width, int len, int* mask, int* dir, int* pos)
{
float c = 0;
for (int ii = 0; ii < 3; ii++)
{
for (int jj = 0; jj < 3; jj++)
{
int x = coordinate + width * dir[ii * 3 + jj] + pos[jj];
if (x >= 0 && x < len)
c += d_arr[x] * mask[ii * 3 + jj];
}
}
gradient[coordinate] = c;
}
__global__ void deviceComputeGradient(float* d_arr, float* gradient, int width, int len, int* mask, int* dir, int* pos) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x * width + y < len)
convolution(x * width + y, d_arr, gradient, width, len, mask, dir, pos);
}
__global__ void deviceMerge(float* xGradient, float* yGradient, float* target, int width, int len) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int index = x * width + y;
if (index < len)
target[index] = abs(xGradient[index]) + abs(yGradient[index]);
}
//segunda versión (un sólo método)
__device__ void convolution2(int coordinate, float* d_arr, float* gradient, int width, int len, int* gx_mask, int* gy_mask, int* dir, int* pos)
{
float cx = 0, cy = 0;
for (int ii = 0; ii < 3; ii++)
{
for (int jj = 0; jj < 3; jj++)
{
int x = coordinate + width * dir[ii * 3 + jj] + pos[jj];
if (x >= 0 && x < len) {
cx += d_arr[x] * gx_mask[ii * 3 + jj];
cy += d_arr[x] * gy_mask[ii * 3 + jj];
}
}
}
gradient[coordinate] = abs(cx) + abs(cy);
}
__global__ void deviceComputeGradient2(float* d_arr, float* gradient, int width, int len, int* gx_mask, int* gy_mask, int* dir, int* pos) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x == 0 && y < 10)
printf("[%d %d]\n", x, y);
if (x * width + y < len)
convolution2(x * width + y, d_arr, gradient, width, len, gx_mask, gy_mask, dir, pos);
}
void performGPU(string filename)
{
ImgFloat imagen(filename.c_str());
std::clock_t startt = clock();
double duration;
ImgFloat result(imagen.width(), imagen.height(), 1, 1, 0);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// depth, numColors, initialize
int WIDTH = imagen.width();
int HEIGHT = imagen.height();
float *arr, *gradient;
float *d_arr, *d_gradient;
arr = (float*)malloc(WIDTH * HEIGHT * sizeof(float));
gradient = (float*)malloc(WIDTH * HEIGHT * sizeof(float));
cudaMalloc((void**)&d_arr, WIDTH * HEIGHT * sizeof(float));
cudaMalloc((void**)&d_gradient, WIDTH * HEIGHT * sizeof(float));
for (int i = 0; i < WIDTH; i++)
for (int j = 0; j < HEIGHT; j++) {
arr[i * WIDTH + j] = imagen(i, j);
gradient[i * WIDTH + j] = 0;
}
cudaMemcpy(d_arr, arr, WIDTH * HEIGHT * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_gradient, gradient, WIDTH * HEIGHT * sizeof(float), cudaMemcpyHostToDevice);
dim3 BLOCKS = dim3(1024, 1024);
dim3 THREADS = dim3(4, 4);
/*dim3 BLOCKS(2, 2);
dim3 THREADS(2, 2);*/
int pos[3] = { -1, 0, 1 };
int dir[9] = { -1, -1, -1, 0, 0, 0, 1, 1, 1 };
int Gx[9] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1
};
int Gy[9] = {
1, 2, 1,
0, 0, 0,
-1, -2, -1
};
int* d_pos, *d_dir, *d_Gx, *d_Gy;
cudaMalloc((void**)&d_pos, 3 * sizeof(int));
cudaMalloc((void**)&d_dir, 9 * sizeof(int));
cudaMalloc((void**)&d_Gx, 9 * sizeof(int));
cudaMalloc((void**)&d_Gy, 9 * sizeof(int));
cudaMemcpy(d_pos, pos, 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_dir, dir, 9 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Gx, Gx, 9 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Gy, Gy, 9 * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start);
deviceComputeGradient2 << < BLOCKS, THREADS >> > (d_arr, d_gradient, WIDTH, WIDTH * HEIGHT, d_Gx, d_Gy, d_dir, d_pos);
cudaMemcpy(gradient, d_gradient, WIDTH * HEIGHT * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < HEIGHT; j++) {
result(i, j) = gradient[i * WIDTH + j];
}
}
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
duration = (std::clock() - startt) / (double)CLOCKS_PER_SEC;
printf("GPU for image %s takes %.2f seconds\n", filename.c_str(), duration);
printf("cudaEventElapsedTime = %.2f ms\n", milliseconds);
free(arr);
cudaFree(d_arr);
cudaFree(d_gradient);
cudaFree(d_pos);
cudaFree(d_dir);
cudaFree(d_Gx);
cudaFree(d_Gy);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//acá si quieres ponemos en xgradient, ygradient
(result).display("HOLA MUNDO CUDA");
}
int main(int argc, char** argv) {
performCPU("lena_grises.bmp");
return 0;
for (int i = 1; i < 2; i++)
{
int len = 4;
int baseSize = 1024;
for (int j = 1; j <= len; j++)
{
ostringstream stream;
stream << (baseSize * j);
if (i == 0)
performCPU(stream.str() + "x" + stream.str() + ".jpg");
else
performGPU(stream.str() + "x" + stream.str() + ".jpg");
}
}
return 0;
}
|
8b8033f0079c2f2469bf124664e9c33731e099fc.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <hip/hip_runtime.h>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <device_launch_parameters.h>
#include <utils/graphs/traversal.h>
#include <glog/logging.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(epsilon);
namespace data_driven_ctanp_pr {
template<typename WorkSource,
typename WorkTarget,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON)
work_target.append(dest);
}
}
}
template<
typename WorkSource, typename WorkTarget,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernel__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (local_work.size > 0) {
rank_t update = res * ALPHA / out_degree;
local_work.meta_data = update;
}
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&work_target, &graph, &residual, &EPSILON](index_t edge, rank_t update) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON) {
work_target.append(dest);
}
}
);
}
}
/*
* The per-device Page Rank problem
*/
template<typename TGraph,
template<typename> class ResidualDatum,
template<typename> class RankDatum>
struct Problem {
TGraph m_graph;
ResidualDatum<rank_t> m_residual;
RankDatum<rank_t> m_current_ranks;
Problem(const TGraph &graph, RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) :
m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) {
}
template<typename WorkSource, typename WorkTarget>
void Init__Single__(const WorkSource &workSource, WorkTarget workTarget, groute::Stream &stream) const {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_graph.owned_nnodes());
Marker::MarkWorkitems(m_graph.owned_nnodes(), "PageRankInit__Single__");
PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(workSource, workTarget, FLAGS_epsilon, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource,
typename WorkTarget>
void
Relax__Single__(const WorkSource &work_source, WorkTarget &output_worklist, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
float EPSILON = FLAGS_epsilon;
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernel__Single__");
PageRankKernel__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, output_worklist.DeviceObject(), EPSILON, m_graph, m_current_ranks, m_residual);
}
};
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool DataDrivenCtaNpPR() {
VLOG(0) << "DataDrivenCtaNpPR";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
utils::traversal::Context<data_driven_ctanp_pr::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(residual, current_ranks);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
data_driven_ctanp_pr::Problem<
groute::graphs::dev::CSRGraph,
groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>
solver(
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject());
size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
groute::Stream stream = context.CreateStream(0);
Worklist wl1(max_work_size, 0, "input queue"), wl2(max_work_size, 0, "output queue");
wl1.ResetAsync(stream.cuda_stream);
wl2.ResetAsync(stream.cuda_stream);
stream.Sync();
Stopwatch sw(true);
Worklist *in_wl = &wl1, *out_wl = &wl2;
solver.Init__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
in_wl->DeviceObject(), stream);
groute::Segment<index_t> work_seg;
work_seg = in_wl->GetSeg(stream);
int iteration = 0;
while (work_seg.GetSegmentSize() > 0) {
solver.Relax__Single__(
groute::dev::WorkSourceArray<index_t>(
work_seg.GetSegmentPtr(),
work_seg.GetSegmentSize()),
*out_wl, stream);
VLOG(1) << "INPUT " << work_seg.GetSegmentSize() << " OUTPUT " << out_wl->GetCount(stream);
if (++iteration > FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
in_wl->ResetAsync(stream.cuda_stream);
std::swap(in_wl, out_wl);
work_seg = in_wl->GetSeg(stream);
}
sw.stop();
VLOG(1) << data_driven_ctanp_pr::Algo::Name() << " terminated after " << iteration << " iterations (max: "
<< FLAGS_max_pr_iterations << ")";
VLOG(0) << "EPSILON: " << FLAGS_epsilon;
VLOG(0) << data_driven_ctanp_pr::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// Gather
auto gathered_output = data_driven_ctanp_pr::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
data_driven_ctanp_pr::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = data_driven_ctanp_pr::Algo::Host(context.host_graph, residual, current_ranks);
return data_driven_ctanp_pr::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
} | 8b8033f0079c2f2469bf124664e9c33731e099fc.cu | //
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <cuda.h>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <device_launch_parameters.h>
#include <utils/graphs/traversal.h>
#include <glog/logging.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(epsilon);
namespace data_driven_ctanp_pr {
template<typename WorkSource,
typename WorkTarget,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON)
work_target.append(dest);
}
}
}
template<
typename WorkSource, typename WorkTarget,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernel__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (local_work.size > 0) {
rank_t update = res * ALPHA / out_degree;
local_work.meta_data = update;
}
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&work_target, &graph, &residual, &EPSILON](index_t edge, rank_t update) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON) {
work_target.append(dest);
}
}
);
}
}
/*
* The per-device Page Rank problem
*/
template<typename TGraph,
template<typename> class ResidualDatum,
template<typename> class RankDatum>
struct Problem {
TGraph m_graph;
ResidualDatum<rank_t> m_residual;
RankDatum<rank_t> m_current_ranks;
Problem(const TGraph &graph, RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) :
m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) {
}
template<typename WorkSource, typename WorkTarget>
void Init__Single__(const WorkSource &workSource, WorkTarget workTarget, groute::Stream &stream) const {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_graph.owned_nnodes());
Marker::MarkWorkitems(m_graph.owned_nnodes(), "PageRankInit__Single__");
PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(workSource, workTarget, FLAGS_epsilon, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource,
typename WorkTarget>
void
Relax__Single__(const WorkSource &work_source, WorkTarget &output_worklist, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
float EPSILON = FLAGS_epsilon;
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernel__Single__");
PageRankKernel__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, output_worklist.DeviceObject(), EPSILON, m_graph, m_current_ranks, m_residual);
}
};
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool DataDrivenCtaNpPR() {
VLOG(0) << "DataDrivenCtaNpPR";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
utils::traversal::Context<data_driven_ctanp_pr::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(residual, current_ranks);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
data_driven_ctanp_pr::Problem<
groute::graphs::dev::CSRGraph,
groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>
solver(
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject());
size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
groute::Stream stream = context.CreateStream(0);
Worklist wl1(max_work_size, 0, "input queue"), wl2(max_work_size, 0, "output queue");
wl1.ResetAsync(stream.cuda_stream);
wl2.ResetAsync(stream.cuda_stream);
stream.Sync();
Stopwatch sw(true);
Worklist *in_wl = &wl1, *out_wl = &wl2;
solver.Init__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
in_wl->DeviceObject(), stream);
groute::Segment<index_t> work_seg;
work_seg = in_wl->GetSeg(stream);
int iteration = 0;
while (work_seg.GetSegmentSize() > 0) {
solver.Relax__Single__(
groute::dev::WorkSourceArray<index_t>(
work_seg.GetSegmentPtr(),
work_seg.GetSegmentSize()),
*out_wl, stream);
VLOG(1) << "INPUT " << work_seg.GetSegmentSize() << " OUTPUT " << out_wl->GetCount(stream);
if (++iteration > FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
in_wl->ResetAsync(stream.cuda_stream);
std::swap(in_wl, out_wl);
work_seg = in_wl->GetSeg(stream);
}
sw.stop();
VLOG(1) << data_driven_ctanp_pr::Algo::Name() << " terminated after " << iteration << " iterations (max: "
<< FLAGS_max_pr_iterations << ")";
VLOG(0) << "EPSILON: " << FLAGS_epsilon;
VLOG(0) << data_driven_ctanp_pr::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// Gather
auto gathered_output = data_driven_ctanp_pr::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
data_driven_ctanp_pr::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = data_driven_ctanp_pr::Algo::Host(context.host_graph, residual, current_ranks);
return data_driven_ctanp_pr::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
} |
9b63da59bf237bf20caa15fcb5f8e0e3237a66d3.hip | // !!! This is a file automatically generated by hipify!!!
// #include <pycuda-complex.hpp>
// #include <surface_functions.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
typedef unsigned char uchar;
texture< int, hipTextureType2D, hipReadModeElementType> tex_isFree;
texture<float, hipTextureType2D, hipReadModeElementType> tex_concentrationIn;
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
__global__ void main_kernel_tex( const int nWidth, const int nHeight, float hx, int *isFreeAll,
float *concentrationOut ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_j + t_i*blockDim.x*gridDim.x;
//Read neighbors occupancy
int left_isFree = tex2D( tex_isFree, t_j-1, t_i ); ;
int right_isFree = tex2D( tex_isFree, t_j+1, t_i ); ;
int up_isFree = tex2D( tex_isFree, t_j, t_i+1 ); ;
int down_isFree = tex2D( tex_isFree, t_j, t_i-1 ); ;
//Set PERIODIC boundary conditions
if (t_i == 0) down_isFree = isFreeAll[ t_j + (nHeight-1)*nWidth ];
if (t_i == (nHeight-1)) up_isFree = isFreeAll[ t_j ];
if (t_j == 0) left_isFree = isFreeAll[ (nWidth-1) + (t_i)*nWidth ];
if (t_j == (nWidth-1)) right_isFree = isFreeAll[ (t_i)*nWidth ];
//Read neighbors concentration
float center_C = tex2D( tex_concentrationIn, t_j, t_i );
float left_C = tex2D( tex_concentrationIn, t_j-1, t_i );
float right_C = tex2D( tex_concentrationIn, t_j+1, t_i );
float up_C = tex2D( tex_concentrationIn, t_j, t_i+1 );
float down_C = tex2D( tex_concentrationIn, t_j, t_i-1 );
//Set PERIODIC boundary conditions
if (t_i == 0) down_C = tex2D( tex_concentrationIn, t_j, nHeight-1 );
if (t_i == (nHeight-1)) up_C = tex2D( tex_concentrationIn, t_j, 0 );
if (t_j == 0) left_C = tex2D( tex_concentrationIn, nWidth-1, t_i );
if (t_j == (nWidth-1)) right_C = tex2D( tex_concentrationIn, 0, t_i );
float newConcentration = hx*left_C + (1.f - hx)*(right_C + down_C + up_C )/3.f +
( hx*(1 - right_isFree) + (1.f-hx)*( 3 - ( left_isFree + down_isFree + up_isFree ) )/3.f )*center_C;
if ( isFreeAll[tid] ) concentrationOut[tid] = newConcentration;
}
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
__device__ void isBlockActive( cudaP minVal, cudaP *sum_sh, uchar *activeBlock ){
int tid_b = threadIdx.x + threadIdx.y*blockDim.x;
if ( tid_b < ( (blockDim.x+2)*(blockDim.y+2) - (blockDim.x*blockDim.y) ) ) sum_sh[tid_b] += sum_sh[ tid_b + blockDim.x*blockDim.y ];
__syncthreads();
int i = blockDim.x*blockDim.y / 2;
while ( i > 0 ){
if ( tid_b < i ) sum_sh[tid_b] = sum_sh[tid_b] + sum_sh[tid_b+i];
__syncthreads();
i /= 2;
}
syncthreads();
if ( tid_b == 0 ) activeBlock[0] = ( sum_sh[0] >= minVal );
// return false;
// return ( sum_sh[0] >= minVal );
}
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
#define idx(j, i) ( (j + 1) + (i + 1)*(blockDim.x+2))
#define idxR(j, i) ( (j + 1 + threadIdx.x) + (i + 1 + threadIdx.y)*(blockDim.x+2))
__global__ void main_kernel_shared( const int nWidth, const int nHeight, cudaP hx, cudaP minVal, uchar *isFreeAll,
cudaP *concIn, cudaP *concentrationOut, uchar *activeBlocks ){
const int t_j = blockIdx.x*blockDim.x + threadIdx.x;
const int t_i = blockIdx.y*blockDim.y + threadIdx.y;
const int tid = t_j + t_i*blockDim.x*gridDim.x;
//Read my neighbors concentration
__shared__ cudaP conc_sh[ ( %(B_WIDTH)s + 2 ) * ( %(B_HEIGHT)s + 2 ) ];
__shared__ cudaP concSum_sh[ ( %(B_WIDTH)s + 2 ) * ( %(B_HEIGHT)s + 2 ) ];
conc_sh[ idx(threadIdx.x, threadIdx.y) ] = concIn[tid] ;
concSum_sh[ idx(threadIdx.x, threadIdx.y) ] = conc_sh[ idx(threadIdx.x, threadIdx.y) ];
//Left boundary
if (t_j == 0){
conc_sh[ idx(-1, threadIdx.y) ] = concIn[ (nWidth-1) + t_i*nWidth ];
concSum_sh[ idx(-1, threadIdx.y) ] = conc_sh[ idx(-1, threadIdx.y) ];
}
else if ( threadIdx.x == 0 ){
conc_sh[ idx(-1, threadIdx.y) ] = concIn[ (t_j-1) + t_i*nWidth ];
concSum_sh[ idx(-1, threadIdx.y) ] = conc_sh[ idx(-1, threadIdx.y) ];
if ( threadIdx.y == 0 ){
conc_sh[ idx(-1, -1) ] = 0; //Left-Down corner
concSum_sh[ idx(-1, -1) ] = 0;
}
if ( threadIdx.y == blockDim.y -1 ){
conc_sh[ idx(-1, blockDim.y) ] = 0; //Left-Up corner
concSum_sh[ idx(-1, blockDim.y) ] = 0;
}
}
//Right boundary
if (t_j == nWidth-1){
conc_sh[ idx(blockDim.x, threadIdx.y) ] = concIn[ t_i*nWidth ];
concSum_sh[ idx(blockDim.x, threadIdx.y) ] = conc_sh[ idx(blockDim.x, threadIdx.y) ];
}
else if ( threadIdx.x == blockDim.x-1 ){
conc_sh[ idx(blockDim.x, threadIdx.y) ] = concIn[ (t_j+1) + t_i*nWidth ];
concSum_sh[ idx(blockDim.x, threadIdx.y) ] = conc_sh[ idx(blockDim.x, threadIdx.y) ];
if ( threadIdx.y == 0 ){
conc_sh[ idx(blockDim.x, -1) ] = 0; //Right-Down corner
concSum_sh[ idx(blockDim.x, -1) ] = 0;
}
if ( threadIdx.y == blockDim.y -1 ){
conc_sh[ idx(blockDim.x, blockDim.y) ] = 0; //Right-Up corner
concSum_sh[ idx(blockDim.x, blockDim.y) ] = 0;
}
}
//Down boundary
if (t_i == 0){
conc_sh[ idx(threadIdx.x, -1) ] = concIn[ t_j + (nHeight-1)*nWidth ];
concSum_sh[ idx(threadIdx.x, -1) ] = conc_sh[ idx(threadIdx.x, -1) ];
}
else if ( threadIdx.y == 0 ){
conc_sh[ idx(threadIdx.x, -1) ] = concIn[ t_j + (t_i-1)*nWidth ];
concSum_sh[ idx(threadIdx.x, -1) ] = conc_sh[ idx(threadIdx.x, -1) ];
}
//Up boundary
if (t_i == nHeight-1){
conc_sh[ idx( threadIdx.x, blockDim.y) ] = concIn[ t_j ];
concSum_sh[ idx( threadIdx.x, blockDim.y) ] = conc_sh[ idx( threadIdx.x, blockDim.y) ];
}
else if ( threadIdx.y == blockDim.y-1 ){
conc_sh[ idx( threadIdx.x, blockDim.y) ] = concIn[ t_j + (t_i+1)*nWidth ];
concSum_sh[ idx( threadIdx.x, blockDim.y) ] = conc_sh[ idx( threadIdx.x, blockDim.y) ];
}
__syncthreads();
//Check if the block is active
__shared__ uchar activeBlock;
isBlockActive( minVal, concSum_sh, &activeBlock ) ;
// if ( threadIdx.x == 0 and threadIdx.y == 0 ) activeBlock=(uchar)1;
__syncthreads();
if ( activeBlock == 0 ) return;
// if ( !isBlockActive(minVal, concSum_sh ) ) return;
// __shared__ uchar activeBlock;
// if ( threadIdx.x == 0 and threadIdx.y ==0 ) activeBlock = activeBlocks[blockIdx.x + blockIdx.y*gridDim.x ];
// __syncthreads();
// if ( !activeBlock ) return;
//Read my neighbors occupancy
__shared__ uchar isFree_sh[ %(B_WIDTH)s + 2 ][ %(B_HEIGHT)s + 2 ];
isFree_sh[threadIdx.x+1][threadIdx.y+1] = isFreeAll[tid];
//Left boundary
if (t_j == 0) isFree_sh[0][threadIdx.y+1] = isFreeAll[ (nWidth-1) + t_i*nWidth ];
else if ( threadIdx.x == 0 ) isFree_sh[0][threadIdx.y+1] = isFreeAll[ (t_j-1) + t_i*nWidth ];
//Right boundary
if (t_j == nWidth-1) isFree_sh[blockDim.x+1][threadIdx.y+1] = isFreeAll[ t_i*nWidth ];
else if ( threadIdx.x == blockDim.x-1 ) isFree_sh[blockDim.x+1][threadIdx.y+1] = isFreeAll[ (t_j+1) + t_i*nWidth ];
//Down boundary
if (t_i == 0) isFree_sh[threadIdx.x+1][0] = isFreeAll[ t_j + (nHeight-1)*nWidth ];
else if ( threadIdx.y == 0 ) isFree_sh[threadIdx.x+1][0] = isFreeAll[ t_j + (t_i-1)*nWidth ];
//Up boundary
if (t_i == nHeight-1) isFree_sh[threadIdx.x+1][blockDim.y+1] = isFreeAll[ t_j ];
else if ( threadIdx.y == blockDim.y-1 ) isFree_sh[threadIdx.x+1][blockDim.y+1] = isFreeAll[ t_j + (t_i+1)*nWidth ];
__syncthreads();
cudaP oneThird = 1.0/3;
// cudaP newConc = hx*( conc_sh[threadIdx.x][threadIdx.y+1] + ( 1 - isFree_sh[threadIdx.x+2][threadIdx.y+1] )*conc_sh[threadIdx.x+1][threadIdx.y+1] ) +
// oneThird*( 1 - hx )*( conc_sh[threadIdx.x+2][threadIdx.y+1] + conc_sh[threadIdx.x+1][threadIdx.y] + conc_sh[threadIdx.x+1][threadIdx.y+2] +
// conc_sh[threadIdx.x+1][threadIdx.y+1]*( 3 - ( isFree_sh[threadIdx.x][threadIdx.y+1] + isFree_sh[threadIdx.x+1][threadIdx.y] + isFree_sh[threadIdx.x+1][threadIdx.y+2] ) ) );
if ( isFree_sh[threadIdx.x+1][threadIdx.y+1] ) concentrationOut[tid] =
hx*( conc_sh[ idxR(-1, 0) ] + ( 1 - isFree_sh[threadIdx.x+2][threadIdx.y+1] )*conc_sh[ idxR(0, 0) ] ) +
oneThird*( 1 - hx )*( conc_sh[ idxR(1, 0) ] + conc_sh[ idxR(0, -1) ] + conc_sh[ idxR(0, 1) ] +
conc_sh[ idxR(0, 0) ]*( 3 - ( isFree_sh[threadIdx.x][threadIdx.y+1] + isFree_sh[threadIdx.x+1][threadIdx.y] + isFree_sh[threadIdx.x+1][threadIdx.y+2] ) ) );
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void findActivity_kernel( cudaP minVal, cudaP *concentration, uchar *activeBlocks ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_j + t_i*blockDim.x*gridDim.x;
int tid_b = threadIdx.x + threadIdx.y*blockDim.x;
__shared__ cudaP concentration_sh[ %(THREADS_PER_BLOCK)s ];
concentration_sh[tid_b] = concentration[tid];
__syncthreads();
int i = blockDim.x*blockDim.y / 2;
while ( i > 0 ){
if ( tid_b < i ) concentration_sh[tid_b] = concentration_sh[tid_b] + concentration_sh[tid_b+i];
__syncthreads();
i /= 2;
}
if (concentration_sh[0] >= minVal ){
if ( tid_b < 3 ){
// left, center and right
if ( ( blockIdx.x > 0 ) and ( blockIdx.x < gridDim.x-1 ) ) activeBlocks[ blockIdx.x + (tid_b-1) + blockIdx.y*gridDim.x ] = (uchar) 1;
// down and up
if ( ( tid_b != 1) and (blockIdx.y > 0) and ( blockIdx.y < gridDim.y-1 ) ) activeBlocks[ blockIdx.x + (blockIdx.y+tid_b-1)*gridDim.x ] = (uchar) 1;
// //right
// if (blockIdx.x < gridDim.x-1) activeBlocks[ blockIdx.x+1 + blockIdx.y*gridDim.x ] = (uchar) 1;
// //left
// if (blockIdx.x > 0) activeBlocks[ (blockIdx.x-1) + blockIdx.y*gridDim.x ] = (uchar) 1;
// if ( tid_b == 0 ){
// //up
// if (blockIdx.y < gridDim.y-1) activeBlocks[ blockIdx.x + (blockIdx.y+1)*gridDim.x ] = (uchar) 1;
// //Down
// if (blockIdx.y > 0) activeBlocks[ blockIdx.x + (blockIdx.y-1)*gridDim.x ] = (uchar) 1;
// }
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void getActivity_kernel( uchar *activeBlocks, uchar *activeThreads ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_j + t_i*blockDim.x*gridDim.x ;
int tid_b = threadIdx.x + threadIdx.y*blockDim.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
__shared__ uchar activeBlock;
if (tid_b == 0 ) activeBlock = activeBlocks[bid];
__syncthreads();
uchar active = 0;
if ( activeBlock ) active = (uchar) 1;
activeThreads[tid] = active;
}
| 9b63da59bf237bf20caa15fcb5f8e0e3237a66d3.cu | // #include <pycuda-complex.hpp>
// #include <surface_functions.h>
#include <stdint.h>
#include <cuda.h>
typedef unsigned char uchar;
texture< int, cudaTextureType2D, cudaReadModeElementType> tex_isFree;
texture<float, cudaTextureType2D, cudaReadModeElementType> tex_concentrationIn;
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
__global__ void main_kernel_tex( const int nWidth, const int nHeight, float hx, int *isFreeAll,
float *concentrationOut ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_j + t_i*blockDim.x*gridDim.x;
//Read neighbors occupancy
int left_isFree = tex2D( tex_isFree, t_j-1, t_i ); ;
int right_isFree = tex2D( tex_isFree, t_j+1, t_i ); ;
int up_isFree = tex2D( tex_isFree, t_j, t_i+1 ); ;
int down_isFree = tex2D( tex_isFree, t_j, t_i-1 ); ;
//Set PERIODIC boundary conditions
if (t_i == 0) down_isFree = isFreeAll[ t_j + (nHeight-1)*nWidth ];
if (t_i == (nHeight-1)) up_isFree = isFreeAll[ t_j ];
if (t_j == 0) left_isFree = isFreeAll[ (nWidth-1) + (t_i)*nWidth ];
if (t_j == (nWidth-1)) right_isFree = isFreeAll[ (t_i)*nWidth ];
//Read neighbors concentration
float center_C = tex2D( tex_concentrationIn, t_j, t_i );
float left_C = tex2D( tex_concentrationIn, t_j-1, t_i );
float right_C = tex2D( tex_concentrationIn, t_j+1, t_i );
float up_C = tex2D( tex_concentrationIn, t_j, t_i+1 );
float down_C = tex2D( tex_concentrationIn, t_j, t_i-1 );
//Set PERIODIC boundary conditions
if (t_i == 0) down_C = tex2D( tex_concentrationIn, t_j, nHeight-1 );
if (t_i == (nHeight-1)) up_C = tex2D( tex_concentrationIn, t_j, 0 );
if (t_j == 0) left_C = tex2D( tex_concentrationIn, nWidth-1, t_i );
if (t_j == (nWidth-1)) right_C = tex2D( tex_concentrationIn, 0, t_i );
float newConcentration = hx*left_C + (1.f - hx)*(right_C + down_C + up_C )/3.f +
( hx*(1 - right_isFree) + (1.f-hx)*( 3 - ( left_isFree + down_isFree + up_isFree ) )/3.f )*center_C;
if ( isFreeAll[tid] ) concentrationOut[tid] = newConcentration;
}
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
__device__ void isBlockActive( cudaP minVal, cudaP *sum_sh, uchar *activeBlock ){
int tid_b = threadIdx.x + threadIdx.y*blockDim.x;
if ( tid_b < ( (blockDim.x+2)*(blockDim.y+2) - (blockDim.x*blockDim.y) ) ) sum_sh[tid_b] += sum_sh[ tid_b + blockDim.x*blockDim.y ];
__syncthreads();
int i = blockDim.x*blockDim.y / 2;
while ( i > 0 ){
if ( tid_b < i ) sum_sh[tid_b] = sum_sh[tid_b] + sum_sh[tid_b+i];
__syncthreads();
i /= 2;
}
syncthreads();
if ( tid_b == 0 ) activeBlock[0] = ( sum_sh[0] >= minVal );
// return false;
// return ( sum_sh[0] >= minVal );
}
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
#define idx(j, i) ( (j + 1) + (i + 1)*(blockDim.x+2))
#define idxR(j, i) ( (j + 1 + threadIdx.x) + (i + 1 + threadIdx.y)*(blockDim.x+2))
__global__ void main_kernel_shared( const int nWidth, const int nHeight, cudaP hx, cudaP minVal, uchar *isFreeAll,
cudaP *concIn, cudaP *concentrationOut, uchar *activeBlocks ){
const int t_j = blockIdx.x*blockDim.x + threadIdx.x;
const int t_i = blockIdx.y*blockDim.y + threadIdx.y;
const int tid = t_j + t_i*blockDim.x*gridDim.x;
//Read my neighbors concentration
__shared__ cudaP conc_sh[ ( %(B_WIDTH)s + 2 ) * ( %(B_HEIGHT)s + 2 ) ];
__shared__ cudaP concSum_sh[ ( %(B_WIDTH)s + 2 ) * ( %(B_HEIGHT)s + 2 ) ];
conc_sh[ idx(threadIdx.x, threadIdx.y) ] = concIn[tid] ;
concSum_sh[ idx(threadIdx.x, threadIdx.y) ] = conc_sh[ idx(threadIdx.x, threadIdx.y) ];
//Left boundary
if (t_j == 0){
conc_sh[ idx(-1, threadIdx.y) ] = concIn[ (nWidth-1) + t_i*nWidth ];
concSum_sh[ idx(-1, threadIdx.y) ] = conc_sh[ idx(-1, threadIdx.y) ];
}
else if ( threadIdx.x == 0 ){
conc_sh[ idx(-1, threadIdx.y) ] = concIn[ (t_j-1) + t_i*nWidth ];
concSum_sh[ idx(-1, threadIdx.y) ] = conc_sh[ idx(-1, threadIdx.y) ];
if ( threadIdx.y == 0 ){
conc_sh[ idx(-1, -1) ] = 0; //Left-Down corner
concSum_sh[ idx(-1, -1) ] = 0;
}
if ( threadIdx.y == blockDim.y -1 ){
conc_sh[ idx(-1, blockDim.y) ] = 0; //Left-Up corner
concSum_sh[ idx(-1, blockDim.y) ] = 0;
}
}
//Right boundary
if (t_j == nWidth-1){
conc_sh[ idx(blockDim.x, threadIdx.y) ] = concIn[ t_i*nWidth ];
concSum_sh[ idx(blockDim.x, threadIdx.y) ] = conc_sh[ idx(blockDim.x, threadIdx.y) ];
}
else if ( threadIdx.x == blockDim.x-1 ){
conc_sh[ idx(blockDim.x, threadIdx.y) ] = concIn[ (t_j+1) + t_i*nWidth ];
concSum_sh[ idx(blockDim.x, threadIdx.y) ] = conc_sh[ idx(blockDim.x, threadIdx.y) ];
if ( threadIdx.y == 0 ){
conc_sh[ idx(blockDim.x, -1) ] = 0; //Right-Down corner
concSum_sh[ idx(blockDim.x, -1) ] = 0;
}
if ( threadIdx.y == blockDim.y -1 ){
conc_sh[ idx(blockDim.x, blockDim.y) ] = 0; //Right-Up corner
concSum_sh[ idx(blockDim.x, blockDim.y) ] = 0;
}
}
//Down boundary
if (t_i == 0){
conc_sh[ idx(threadIdx.x, -1) ] = concIn[ t_j + (nHeight-1)*nWidth ];
concSum_sh[ idx(threadIdx.x, -1) ] = conc_sh[ idx(threadIdx.x, -1) ];
}
else if ( threadIdx.y == 0 ){
conc_sh[ idx(threadIdx.x, -1) ] = concIn[ t_j + (t_i-1)*nWidth ];
concSum_sh[ idx(threadIdx.x, -1) ] = conc_sh[ idx(threadIdx.x, -1) ];
}
//Up boundary
if (t_i == nHeight-1){
conc_sh[ idx( threadIdx.x, blockDim.y) ] = concIn[ t_j ];
concSum_sh[ idx( threadIdx.x, blockDim.y) ] = conc_sh[ idx( threadIdx.x, blockDim.y) ];
}
else if ( threadIdx.y == blockDim.y-1 ){
conc_sh[ idx( threadIdx.x, blockDim.y) ] = concIn[ t_j + (t_i+1)*nWidth ];
concSum_sh[ idx( threadIdx.x, blockDim.y) ] = conc_sh[ idx( threadIdx.x, blockDim.y) ];
}
__syncthreads();
//Check if the block is active
__shared__ uchar activeBlock;
isBlockActive( minVal, concSum_sh, &activeBlock ) ;
// if ( threadIdx.x == 0 and threadIdx.y == 0 ) activeBlock=(uchar)1;
__syncthreads();
if ( activeBlock == 0 ) return;
// if ( !isBlockActive(minVal, concSum_sh ) ) return;
// __shared__ uchar activeBlock;
// if ( threadIdx.x == 0 and threadIdx.y ==0 ) activeBlock = activeBlocks[blockIdx.x + blockIdx.y*gridDim.x ];
// __syncthreads();
// if ( !activeBlock ) return;
//Read my neighbors occupancy
__shared__ uchar isFree_sh[ %(B_WIDTH)s + 2 ][ %(B_HEIGHT)s + 2 ];
isFree_sh[threadIdx.x+1][threadIdx.y+1] = isFreeAll[tid];
//Left boundary
if (t_j == 0) isFree_sh[0][threadIdx.y+1] = isFreeAll[ (nWidth-1) + t_i*nWidth ];
else if ( threadIdx.x == 0 ) isFree_sh[0][threadIdx.y+1] = isFreeAll[ (t_j-1) + t_i*nWidth ];
//Right boundary
if (t_j == nWidth-1) isFree_sh[blockDim.x+1][threadIdx.y+1] = isFreeAll[ t_i*nWidth ];
else if ( threadIdx.x == blockDim.x-1 ) isFree_sh[blockDim.x+1][threadIdx.y+1] = isFreeAll[ (t_j+1) + t_i*nWidth ];
//Down boundary
if (t_i == 0) isFree_sh[threadIdx.x+1][0] = isFreeAll[ t_j + (nHeight-1)*nWidth ];
else if ( threadIdx.y == 0 ) isFree_sh[threadIdx.x+1][0] = isFreeAll[ t_j + (t_i-1)*nWidth ];
//Up boundary
if (t_i == nHeight-1) isFree_sh[threadIdx.x+1][blockDim.y+1] = isFreeAll[ t_j ];
else if ( threadIdx.y == blockDim.y-1 ) isFree_sh[threadIdx.x+1][blockDim.y+1] = isFreeAll[ t_j + (t_i+1)*nWidth ];
__syncthreads();
cudaP oneThird = 1.0/3;
// cudaP newConc = hx*( conc_sh[threadIdx.x][threadIdx.y+1] + ( 1 - isFree_sh[threadIdx.x+2][threadIdx.y+1] )*conc_sh[threadIdx.x+1][threadIdx.y+1] ) +
// oneThird*( 1 - hx )*( conc_sh[threadIdx.x+2][threadIdx.y+1] + conc_sh[threadIdx.x+1][threadIdx.y] + conc_sh[threadIdx.x+1][threadIdx.y+2] +
// conc_sh[threadIdx.x+1][threadIdx.y+1]*( 3 - ( isFree_sh[threadIdx.x][threadIdx.y+1] + isFree_sh[threadIdx.x+1][threadIdx.y] + isFree_sh[threadIdx.x+1][threadIdx.y+2] ) ) );
if ( isFree_sh[threadIdx.x+1][threadIdx.y+1] ) concentrationOut[tid] =
hx*( conc_sh[ idxR(-1, 0) ] + ( 1 - isFree_sh[threadIdx.x+2][threadIdx.y+1] )*conc_sh[ idxR(0, 0) ] ) +
oneThird*( 1 - hx )*( conc_sh[ idxR(1, 0) ] + conc_sh[ idxR(0, -1) ] + conc_sh[ idxR(0, 1) ] +
conc_sh[ idxR(0, 0) ]*( 3 - ( isFree_sh[threadIdx.x][threadIdx.y+1] + isFree_sh[threadIdx.x+1][threadIdx.y] + isFree_sh[threadIdx.x+1][threadIdx.y+2] ) ) );
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void findActivity_kernel( cudaP minVal, cudaP *concentration, uchar *activeBlocks ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_j + t_i*blockDim.x*gridDim.x;
int tid_b = threadIdx.x + threadIdx.y*blockDim.x;
__shared__ cudaP concentration_sh[ %(THREADS_PER_BLOCK)s ];
concentration_sh[tid_b] = concentration[tid];
__syncthreads();
int i = blockDim.x*blockDim.y / 2;
while ( i > 0 ){
if ( tid_b < i ) concentration_sh[tid_b] = concentration_sh[tid_b] + concentration_sh[tid_b+i];
__syncthreads();
i /= 2;
}
if (concentration_sh[0] >= minVal ){
if ( tid_b < 3 ){
// left, center and right
if ( ( blockIdx.x > 0 ) and ( blockIdx.x < gridDim.x-1 ) ) activeBlocks[ blockIdx.x + (tid_b-1) + blockIdx.y*gridDim.x ] = (uchar) 1;
// down and up
if ( ( tid_b != 1) and (blockIdx.y > 0) and ( blockIdx.y < gridDim.y-1 ) ) activeBlocks[ blockIdx.x + (blockIdx.y+tid_b-1)*gridDim.x ] = (uchar) 1;
// //right
// if (blockIdx.x < gridDim.x-1) activeBlocks[ blockIdx.x+1 + blockIdx.y*gridDim.x ] = (uchar) 1;
// //left
// if (blockIdx.x > 0) activeBlocks[ (blockIdx.x-1) + blockIdx.y*gridDim.x ] = (uchar) 1;
// if ( tid_b == 0 ){
// //up
// if (blockIdx.y < gridDim.y-1) activeBlocks[ blockIdx.x + (blockIdx.y+1)*gridDim.x ] = (uchar) 1;
// //Down
// if (blockIdx.y > 0) activeBlocks[ blockIdx.x + (blockIdx.y-1)*gridDim.x ] = (uchar) 1;
// }
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void getActivity_kernel( uchar *activeBlocks, uchar *activeThreads ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_j + t_i*blockDim.x*gridDim.x ;
int tid_b = threadIdx.x + threadIdx.y*blockDim.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
__shared__ uchar activeBlock;
if (tid_b == 0 ) activeBlock = activeBlocks[bid];
__syncthreads();
uchar active = 0;
if ( activeBlock ) active = (uchar) 1;
activeThreads[tid] = active;
}
|
99cfd48e1035097172e75f5c882d3eeafa5f8417.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void foo(int *ptr)
{
*ptr = 7;
}
int main(void)
{
hipLaunchKernelGGL(( foo), dim3(1),dim3(1), 0, 0, 0);
// make the host block until the device is finished with foo
hipDeviceSynchronize();
// check for error
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
return 0;
} | 99cfd48e1035097172e75f5c882d3eeafa5f8417.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void foo(int *ptr)
{
*ptr = 7;
}
int main(void)
{
foo<<<1,1>>>(0);
// make the host block until the device is finished with foo
cudaThreadSynchronize();
// check for error
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
return 0;
} |
3a6357c3033ba56ca9523d8bfc562aa011986f08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "treedefs.h"
#include "CudaArrayCopyUtils.h"
#include "treecodeCU.h"
#include "cudahelper.h"
#include "tictoc.h"
#include <iostream>
//: We should really be using native CUDA vectors for this.... but that requires more funny typing magic to convert the CPU data
template<our_size_t DIM, typename Float, our_size_t PPG> __device__ /*inline*/ bool passesMAC(const GroupInfo<DIM, Float, PPG>& groupInfo, const Node<DIM, Float>& nodeHere, Float theta) {
Float d = mag(groupInfo.center - nodeHere.barycenter.pos) - groupInfo.radius;
Float l = 2 * nodeHere.radius;
return d > (l / theta);
}
template<template<our_size_t, typename> class ElemType, template<our_size_t, typename> class ElemTypeArray, our_size_t DIM, typename Float>
__device__ /*inline*/ void initStack(ElemTypeArray<DIM, Float> level, our_size_t levelCt, ElemTypeArray<DIM, Float> stack, our_size_t* stackCt){
if(threadIdx.x < levelCt){
if(levelCt > level.elems || threadIdx.x >= stack.elems) printf("%d.%d %s: " SZSTR " (should be <= " SZSTR ") elements. stackCt @ %d / " SZSTR "\n",blockIdx.x, threadIdx.x, __func__,levelCt,level.elems,threadIdx.x,stack.elems);
ElemType<DIM, Float> eHere;
level.get(threadIdx.x, eHere);
stack.set(threadIdx.x, eHere);
}
if(threadIdx.x == 0){
atomicExch((cu_size_t*)stackCt,(cu_size_t)levelCt); // Don't want to have to threadfence afterwards. Just make sure it's set!
}
}
template<our_size_t DIM, typename Float>
__device__ void dumpStackChildren(NodeArray<DIM, Float> stack, const our_size_t* stackCt){
our_size_t dst = *stackCt;
for(our_size_t i = 0; i < dst; i++){
printf("(" SZSTR ") see (" SZSTR " " SZSTR ") in (%p/" SZSTR ")\n",i,stack.childStart[i],stack.childCount[i], stackCt, dst);
}
}
template<our_size_t DIM, typename T>
__device__ /*inline*/ void pushMeta(const our_size_t srcSt, const our_size_t srcCt, PointMassArray<DIM, T> stack, our_size_t* stackCt){
// This is a weird compiler bug. There's no reason this shouldn't have worked without the cast.
our_size_t dst = atomicAdd((cu_size_t*)stackCt, (cu_size_t)srcCt);
for(our_size_t i = dst, j = 0; i < dst + srcCt; i++, j++){
PointMass<DIM, T> eHere;
eHere.m = 1;
eHere.pos.x[0] = srcSt + j;
eHere.pos.x[1] = 0;
eHere.pos.x[2] = 0;
stack.set(i, eHere);
}
}
template<template<our_size_t, typename> class ElemType, template<our_size_t, typename> class ElemTypeArray, our_size_t DIM, typename Float>
__device__ /*inline*/ void pushAll(const ElemTypeArray<DIM, Float> src, const our_size_t srcCt, ElemTypeArray<DIM, Float> stack, our_size_t* stackCt){
// This is a weird compiler bug. There's no reason this shouldn't have worked without the cast.
our_size_t dst = atomicAdd((cu_size_t*)stackCt, (cu_size_t)srcCt);
if(srcCt > src.elems || dst >= stack.elems) printf("%d.%d %s: " SZSTR " (should be <= " SZSTR ") elements. stackCt @ " SZSTR " / " SZSTR "\n",blockIdx.x, threadIdx.x, __func__, srcCt,src.elems,dst,stack.elems);
for(our_size_t i = dst, j = 0; i < dst + srcCt; i++, j++){
ElemType<DIM, Float> eHere;
src.get(j, eHere);
stack.set(i, eHere);
}
}
template<our_size_t DIM, typename Float, TraverseMode Mode>
__device__ /*inline*/ InteractionType(DIM, Float, Mode) freshInteraction(){
InteractionType(DIM, Float, Mode) fresh; for(our_size_t i = 0; i < DIM; i++){
fresh.x[i] = 0.0;
}
return fresh;
}
// Needs softening
template<our_size_t DIM, typename Float, TraverseMode Mode, bool spam = false>
__device__ /*inline*/ InteractionType(DIM, Float, Mode) calc_interaction(const PointMass<DIM, Float> &m1, const InteracterType(DIM, Float, Mode) &m2, Float softening){
InteractionType(DIM, Float, Mode) interaction = freshInteraction<DIM, Float, Mode>();
our_size_t isParticle = m2.m != 0;
switch(Mode){
case Forces:{
// Reinterpret cast is evil, but necessary here. template-induced dead-code and all.
if(spam){
printf("Interacting\t%f %f %f %f vs %f %f %f %f\n",m1.m,m1.pos.x[0],m1.pos.x[1],m1.pos.x[2],m2.m,m2.pos.x[0],m2.pos.x[1],m2.pos.x[2]);
}
const PointMass<DIM, Float>& m2_inner = reinterpret_cast<const PointMass<DIM, Float>& >(m2);
Vec<DIM, Float> disp = m1.pos - m2_inner.pos;
interaction = (disp * ((m1.m * m2_inner.m) / (Float)(softening + pow((Float)mag_sq(disp),(Float)1.5)))).template castContents<InteractionElems(Mode, DIM, 2) , typename std::conditional<NonForceCondition(Mode), our_size_t, Float>::type>();
break;}
case CountOnly:
interaction.x[isParticle] = 1;
break;
case HashInteractions:{
// Type inference is failing here unless these are all explicitly separate variables
our_size_t generic = m2.pos.x[0];
our_size_t nodeSpecific1 = m2.pos.x[1];
our_size_t nodeSpecific2 = m2.pos.x[2];
our_size_t nodeSpecific = nodeSpecific1 ^ nodeSpecific2;
our_size_t e = generic ^ (!isParticle) * nodeSpecific;
interaction.x[isParticle] = e;
break;}
}
return interaction;
}
template<typename T>
__device__ /*inline*/ void swap(T &a, T &b){
T c(a); a=b; b=c;
}
template<our_size_t DIM, typename T>
__device__ /*inline*/ void dumpNodeArrayContents(const char *name, const NodeArray<DIM, T> &n){
ASSERT_DEAD_CODE;
printf("%s : %p %p %p %p " SZSTR "\n",name, n.isLeaf, n.childCount, n.childStart, n.radius, n.elems);
for(our_size_t i = 0; i < DIM; i++){
printf("%p ",n.minX.x[i]);
}printf("\n");
for(our_size_t i = 0; i < DIM; i++){
printf("%p ",n.maxX.x[i]);
}printf("\n");
for(our_size_t i = 0; i < DIM; i++){
printf("%p ",n.barycenter.pos.x[i]);
}printf("%p\n",n.barycenter.m);
printf("%s - Dump complete\n",name);
}
template<our_size_t DIM, typename Float, our_size_t TPB, our_size_t PPG, our_size_t MAX_LEVELS, our_size_t INTERACTION_THRESHOLD, TraverseMode Mode, bool spam>
__global__ void traverseTreeKernel(const our_size_t nGroups, const GroupInfoArray<DIM, Float, PPG> groupInfo,
const our_size_t startDepth, NodeArray<DIM, Float>* treeLevels, const our_size_t* treeCounts,
const our_size_t n, const ParticleArray<DIM, Float> particles, InteractionTypeArray(DIM, Float, Mode) interactions,
const Float softening, const Float theta,
our_size_t *const bfsStackCounters, const NodeArray<DIM, Float> bfsStackBuffers, const our_size_t stackCapacity) {
//this function processes groups of stars against nodes in the tree
//start at startDepth instead of root, to save a few levels
//groupInfo describes particles via offsets into the overall ParticleArray; everything is in principle tree-sorted before we start
typedef typename std::conditional<NonForceCondition(Mode), our_size_t, Float>::type InteractionElemType;
#define BUF_MUL 128
// TPB must = blockDims.x
__shared__ our_size_t interactionCounters[1];
__shared__ InteractionElemType pointMass[BUF_MUL*INTERACTION_THRESHOLD];
__shared__ InteractionElemType pointPos[DIM * BUF_MUL*INTERACTION_THRESHOLD];
__shared__ InteractionElemType interactionBuf[TPB * InteractionElems(Mode, DIM, 2)];
if(threadIdx.x == 0 && blockDim.x != TPB){
printf("%d Launched with mismatched TPB parameters\n",blockIdx.x);
}
InteracterTypeArray(DIM, Float, Mode) interactionList; // This should be const, but we need a compile time array initializer first
interactionList.m = pointMass;
for(our_size_t j = 0; j < DIM; j++){ // this should unroll, 2 int-ops, and a store (hopefully to registers) * DIM (=3)
interactionList.pos.x[j] = pointPos + (j * BUF_MUL*INTERACTION_THRESHOLD);
}
interactionList.setCapacity(BUF_MUL*INTERACTION_THRESHOLD);
InteractionTypeArray(DIM, Float, Mode) interactionScratch; // this should unroll, 2 int-ops, and a store (hopefully to registers) * DIM (=3)
for(our_size_t j = 0; j < InteractionElems(Mode, DIM, 2); j++){
interactionScratch.x[j] = interactionBuf + (TPB * j);
}
interactionScratch.setCapacity(TPB);
for(our_size_t groupOffset = 0; groupOffset + blockIdx.x < nGroups; groupOffset += gridDim.x){//one group per block of threads
//GroupInfo has 12 elements: 10 floats - coordinates of bounding box and center of bounding sphere and its radius; 2 ints - start offset for children, number of children
GroupInfo<DIM, Float, PPG> tgInfo; // This should be const, but we need a compile time array initializer first
groupInfo.get(blockIdx.x + groupOffset,tgInfo); // This should be gridDims.x * (3 + 3 * DIM(=3)) reads from arguments (global memory?)
const our_size_t threadsPerPart = blockDim.x / tgInfo.childCount; // 1 int-op, this is probably okay to spill eventually (or use constant memory), since it's referenced rarely, and always accessed across threads
our_size_t* const pGLCt = interactionCounters;
InteracterTypeArray(DIM, Float, Mode) pGList = interactionList;
InteracterTypeArray(DIM, Float, Mode) dummyP;
initStack<PointMass,PointMassArray>(dummyP, 0, pGList, pGLCt); // This should result in one atomic write to shared memory per block
our_size_t* cLCt = bfsStackCounters + 2 * blockIdx.x;
NodeArray<DIM, Float> currentLevel = bfsStackBuffers + 2 * blockIdx.x * stackCapacity; // This should be gridDims.x * (3 + 3 * DIM(=3)) reads from arguments (global memory?), 3x that many int-ops
currentLevel.setCapacity(stackCapacity);
initStack<Node,NodeArray>(treeLevels[startDepth], treeCounts[startDepth], currentLevel, cLCt); // one atomic write to global memory per block, <2**startDepth writes, should coalesce
our_size_t* nLCt = bfsStackCounters + 2 * blockIdx.x + 1;
NodeArray<DIM, Float> nextLevel = bfsStackBuffers + (2 * blockIdx.x + 1) * stackCapacity; // This should be gridDims.x * (3 + 3 * DIM(=3)) reads from arguments (global memory?), 4x that many int-ops
nextLevel.setCapacity(stackCapacity);
__threadfence_block();
__syncthreads(); // Everyone needs the stack initialized before we can continue
const our_size_t useful_thread_ct = threadsPerPart * tgInfo.childCount;
Particle<DIM, Float> particle;
if(threadIdx.x < useful_thread_ct){
if(tgInfo.childStart + (threadIdx.x % tgInfo.childCount) >= particles.elems){
printf("Getting particle, %d < " SZSTR ", so want at " SZSTR " + (%d %% " SZSTR ") = " SZSTR "\n",threadIdx.x,useful_thread_ct,tgInfo.childStart, threadIdx.x, tgInfo.childCount, tgInfo.childStart + (threadIdx.x % tgInfo.childCount));
}
particles.get(tgInfo.childStart + (threadIdx.x % tgInfo.childCount), particle); // (nearly) Every thread should perform 7 reads from arguments
}
InteractionType(DIM, Float, Mode) interaction = freshInteraction<DIM, Float, Mode>(); // Either 2 or 3 writes of 0, hopefully to registers, since we'll do a lot of math on these
our_size_t curDepth = startDepth;
while(*cLCt != 0 ){ // Maximum of MAX_LEVELS iterations, but could (and likely will) terminate early
//while current level count !=0 (each level, nodes can either do work, or increment a counter for amount of work
// to be done at next level) [#outerloop]
if(threadIdx.x == 0){
*nLCt = 0;
}
__threadfence_block();
__syncthreads();
cu_diff_t startOfs = *cLCt;
if(spam && threadIdx.x == 0){
printf("" SZSTR "." SZSTR " has " DFSTR " @ " SZSTR "\n",blockIdx.x + groupOffset,tgInfo.childStart + (threadIdx.x % tgInfo.childCount), startOfs, curDepth);
}
while(startOfs > 0){//runs over stack for current level (whenever a parent defers work, it places it on the next-level-stack
//[#stacktraverse]
cu_diff_t toGrab = startOfs - blockDim.x + threadIdx.x; // two int-ops
if(toGrab >= 0){
Node<DIM, Float> nodeHere; // These should mostly coalesce. At most one extra?
currentLevel.get(toGrab, nodeHere); // 4 * 3*DIM reads per thread
//if(threadIdx.x == 0) printf("\t%d.%d @ " SZSTR ":\t" SZSTR " " SZSTR " vs " SZSTR " " SZSTR " with " SZSTR " " DFSTR " \n", blockIdx.x, threadIdx.x, curDepth, nodeHere.childStart, nodeHere.childCount, currentLevel.childStart[toGrab], currentLevel.childCount[toGrab], *cLCt, toGrab);
//*
if(spam){
printf("" SZSTR "." SZSTR " comparing against node @ " SZSTR "." SZSTR ":" SZSTR ".%d = %d\n",
blockIdx.x + groupOffset,
tgInfo.childStart + (threadIdx.x % tgInfo.childCount),
curDepth,nodeHere.childStart,nodeHere.childCount,nodeHere.isLeaf,passesMAC<DIM, Float, PPG>(tgInfo, nodeHere, theta));
}
if(passesMAC(tgInfo, nodeHere, theta)){ // 12ish floating-point ops
//if(threadIdx.x == 0) printf("\t%d accepted MAC\n",threadIdx.x);
//*
InteracterType(DIM, Float, Mode) nodePush;
switch(Mode){
case Forces:{
nodePush = nodeHere.barycenter.template castContents<InteractionElems(Mode, DIM, 3) , typename std::conditional<NonForceCondition(Mode), our_size_t, Float>::type>(); break;}
case CountOnly:
case HashInteractions:{
nodePush.m = 0;
nodePush.pos.x[0] = curDepth;
nodePush.pos.x[1] = nodeHere.childStart;
nodePush.pos.x[2] = nodeHere.childCount;
break;}
}
InteracterTypeArray(DIM, Float, Mode) tmpArray = nodePush.toArray(); // Four assignments of pointer values
our_size_t tmpCt = 1;
pushAll<PointMass,PointMassArray>(tmpArray, tmpCt, pGList, pGLCt); // multiplied by number of passing nodes: atomic op, a sequence DIM+1 writes to shared memory. addresses accessed should be sequential
//*/
} else {
//if(threadIdx.x == 0) printf("\t%d rejected MAC\n",threadIdx.x);
if(nodeHere.isLeaf){
//*
if(spam) printf("" SZSTR "." SZSTR " leaf contains " SZSTR ":" SZSTR "\n",blockIdx.x + groupOffset, tgInfo.childStart + (threadIdx.x % tgInfo.childCount),nodeHere.childStart,nodeHere.childCount);
if(nodeHere.childCount > 16){
printf("\t%d.%d: Adding a lot particles " SZSTR "\n",blockIdx.x,threadIdx.x,nodeHere.childCount);
}
switch(Mode){
case Forces:{
// multiplied by number of split leaves: atomic op, a sequence of DIM+1 * at most PPG writes to shared memory.
// These seem unlikely to be well-behaved
pushAll<PointMass, PointMassArray>(particles.mass + nodeHere.childStart, nodeHere.childCount, *reinterpret_cast<PointMassArray<DIM, Float>* >(&pGList), pGLCt); break;}
case CountOnly:
case HashInteractions:{
pushMeta(nodeHere.childStart, nodeHere.childCount, pGList, pGLCt);
break;}
}
//*/
} else {
// multiplied by number of split leaves: atomic op, a sequence of (4+3*DIM) * at most 2**DIM writes to shared memory.
// These seem unlikely to be well-behaved
pushAll<Node, NodeArray>(treeLevels[curDepth + 1] + nodeHere.childStart, nodeHere.childCount, nextLevel, nLCt);
}
}
//*/
}
__threadfence_block();
__syncthreads();
// if(threadIdx.x == 0) printf("\t%3d.%d All safely past toGrab\n",blockIdx.x,threadIdx.x);
//*
if(INTERACTION_THRESHOLD > 0){ // Can't diverge, compile-time constant
cu_diff_t innerStartOfs = 0;
//*
//if(threadIdx.x == 0) printf("\t%d PGLCt is " SZSTR " >? " SZSTR " (" DFSTR " > " DFSTR ")\n",threadIdx.x,*pGLCt,INTERACTION_THRESHOLD,(cu_diff_t)(*pGLCt),(cu_diff_t)INTERACTION_THRESHOLD);
// The casting here feels very strange - why is innerStartOfs implicitly casted, rather than vice versa?
for(innerStartOfs = *pGLCt; innerStartOfs >= (cu_diff_t)INTERACTION_THRESHOLD; innerStartOfs -= threadsPerPart){
cu_diff_t toGrab = innerStartOfs - threadsPerPart + (threadIdx.x / tgInfo.childCount);
if(toGrab >= 0 && threadIdx.x < useful_thread_ct){
//if(toGrab % threadsPerPart == 0) printf("\t%d interacting with " DFSTR " = " SZSTR " - " SZSTR " + (%d / %d)\n",threadIdx.x,toGrab,innerStartOfs,threadsPerPart,threadIdx.x,tgInfo.childCount);
InteracterType(DIM, Float, Mode) pHere;
pGList.get(toGrab, pHere);
// Something like 12-ish FLOPs
interaction = interaction + calc_interaction<DIM, Float, Mode, spam>(particle.mass, pHere, softening);
}
}
//if(threadIdx.x == 0) printf("\t%d through interaction loop safely\n",threadIdx.x);
// Need to update stack pointer
//*/
if(threadIdx.x == 0){
atomicExch((cu_size_t *)pGLCt, 0);// (cu_size_t)((innerStartOfs < 0) ? 0 : innerStartOfs));
}
}
//*/
//if(threadIdx.x == 0) printf("%3d.%d: Try going around again\n",blockIdx.x,threadIdx.x);
startOfs -= blockDim.x;
}
//if(threadIdx.x == 0) printf("%3d.%d Done inside: " SZSTR " (loopcount at " DFSTR ") work remaining at depth: " SZSTR "\n",blockIdx.x, threadIdx.x, *nLCt,startOfs,curDepth);
// It would be nice if this happened purely with registers.
swap<NodeArray<DIM, Float>>(currentLevel, nextLevel);
swap<our_size_t*>(cLCt, nLCt);
curDepth += 1;
}
// Process remaining interactions
//*
__threadfence_block();
__syncthreads();
if(INTERACTION_THRESHOLD > 0){ // Can't diverge, compile-time constant
cu_diff_t innerStartOfs = 0;
//*
//if(threadIdx.x == 0) printf("\t%d PGLCt is " SZSTR " >? " SZSTR " (" DFSTR " > " DFSTR ")\n",threadIdx.x,*pGLCt,INTERACTION_THRESHOLD,(cu_diff_t)(*pGLCt),(cu_diff_t)INTERACTION_THRESHOLD);
for(innerStartOfs = *pGLCt; innerStartOfs > 0; innerStartOfs -= threadsPerPart){
cu_diff_t toGrab = innerStartOfs - threadsPerPart + (threadIdx.x / tgInfo.childCount);
if(toGrab >= 0 && threadIdx.x < useful_thread_ct){
//if(toGrab % threadsPerPart == 0) printf("\t%d interacting with " DFSTR " = " SZSTR " - " SZSTR " + (%d / %d)\n",threadIdx.x,toGrab,innerStartOfs,threadsPerPart,threadIdx.x,tgInfo.childCount);
InteracterType(DIM, Float, Mode) pHere;
pGList.get(toGrab, pHere);
// Something like 16-ish FLOPs
interaction = interaction + calc_interaction<DIM, Float, Mode, spam>(particle.mass, pHere, softening);
}
}
//if(threadIdx.x == 0) printf("\t%d through final interaction loop safely\n",threadIdx.x);
//*/
if(threadIdx.x == 0){
atomicExch((cu_size_t *)pGLCt, 0);
}
}
// This needs to be done in shared memory! We should figure out how to combine with the stack scratch-space!
if(threadIdx.x < useful_thread_ct){
interactionScratch.set(threadIdx.x, interaction);
}
__threadfence_block();
__syncthreads(); // All forces have been summed and are in view
// reduce (hack-job fashion for now) if multithreading per particle in play
//*
//printf("Reducing\n");
if(threadIdx.x < tgInfo.childCount){
InteractionType(DIM, Float, Mode) accInt = freshInteraction<DIM, Float, Mode>();
for(our_size_t i = 1; i < threadsPerPart; i++){
InteractionType(DIM, Float, Mode) tmp;
interactionScratch.get(threadIdx.x + i * tgInfo.childCount, tmp);
// DIM FLOPs
accInt = accInt + tmp;
}
interactions.set(tgInfo.childStart + threadIdx.x, interaction + accInt);
}
//if(threadIdx.x == 0) printf("%3d Done reducing\n",blockIdx.x);
//*/
}
return;
}
// Something is badly wrong with template resolution if we switch to InteractionType here.
// I think the compilers are doing name-mangling differently or something
/*
template void traverseTreeCUDA<3, float, 128, 16, 16, 300000, 8, Forces>
(our_size_t, GroupInfoArray<3, float, 16>, our_size_t,
NodeArray<3, float> *, our_size_t *,
our_size_t, ParticleArray<3, float>, VecArray<3, float>, float, float, our_size_t);
*/
template<our_size_t DIM, typename Float, our_size_t threadCt, our_size_t PPG, our_size_t MAX_LEVELS, our_size_t MAX_STACK_ENTRIES, our_size_t INTERACTION_THRESHOLD, TraverseMode Mode, bool spam>
void traverseTreeCUDA(our_size_t nGroups, GroupInfoArray<DIM, Float, PPG> groupInfo, our_size_t startDepth,
NodeArray<DIM, Float> treeLevels[MAX_LEVELS], our_size_t treeCounts[MAX_LEVELS],
our_size_t n, ParticleArray<DIM, Float> particles, InteractionTypeArray(DIM, Float, Mode) interactions, Float softening, Float theta, our_size_t blockCt){
std::cout << "Traverse tree with " << blockCt << " blocks and " << threadCt << " tpb"<<std::endl;
NodeArray<DIM, Float> placeHolderLevels[MAX_LEVELS];
makeDeviceTree<DIM, Float, MAX_LEVELS>(treeLevels, placeHolderLevels, treeCounts);
NodeArray<DIM, Float>* cuTreeLevels;
ALLOC_DEBUG_MSG(MAX_LEVELS*sizeof(NodeArray<DIM, Float>) + MAX_LEVELS * sizeof(our_size_t));
gpuErrchk( (hipMalloc(&cuTreeLevels, MAX_LEVELS*sizeof(NodeArray<DIM, Float>))) );
gpuErrchk( (hipMemcpy(cuTreeLevels, placeHolderLevels, MAX_LEVELS*sizeof(NodeArray<DIM, Float>), hipMemcpyHostToDevice)) );
our_size_t* cuTreeCounts;
gpuErrchk( (hipMalloc(&cuTreeCounts, MAX_LEVELS * sizeof(our_size_t))) );
gpuErrchk( (hipMemcpy(cuTreeCounts, treeCounts, MAX_LEVELS * sizeof(our_size_t), hipMemcpyHostToDevice)) );
our_size_t biggestRow = 0;
for(our_size_t level = 0; level < MAX_LEVELS; level++){
biggestRow = (treeCounts[level] > biggestRow) ? treeCounts[level] : biggestRow;
}
std::cout << "Biggest row: " << biggestRow << std::endl;
const our_size_t stackCapacity = biggestRow;
const our_size_t blocksPerLaunch = MAX_STACK_ENTRIES / stackCapacity;
std::cout << "Allowing: " << blocksPerLaunch << " blocks per launch" << std::endl;
NodeArray<DIM, Float> bfsStackBuffers;
our_size_t * bfsStackCounters;
allocDeviceNodeArray(blocksPerLaunch * 2 * stackCapacity, bfsStackBuffers);
ALLOC_DEBUG_MSG(blocksPerLaunch * 2 * sizeof(our_size_t));
gpuErrchk( (hipMalloc(&bfsStackCounters, blocksPerLaunch * 2 * sizeof(our_size_t))) );
GroupInfoArray<DIM, Float, PPG> cuGroupInfo;
allocDeviceGroupInfoArray(nGroups, cuGroupInfo);
copyDeviceGroupInfoArray(nGroups, cuGroupInfo, groupInfo, hipMemcpyHostToDevice);
ParticleArray<DIM, Float> cuParticles;
allocDeviceParticleArray(n, cuParticles);
copyDeviceParticleArray(n, cuParticles, particles, hipMemcpyHostToDevice);
InteractionTypeArray(DIM, Float, Mode) cuInteractions;
allocDeviceVecArray(n, cuInteractions);
copyDeviceVecArray(n, cuInteractions, interactions, hipMemcpyHostToDevice);
dim3 dimGrid(blocksPerLaunch);
dim3 dimBlock(threadCt);
std::cout << "Trying to launch with " << threadCt << " / block with " << blocksPerLaunch << " blocks" << std::endl;
tic;
hipLaunchKernelGGL(( traverseTreeKernel<DIM, Float, threadCt, PPG, MAX_LEVELS, INTERACTION_THRESHOLD, Mode, spam>), dim3(dimGrid), dim3(dimBlock), 0, 0, nGroups, cuGroupInfo, startDepth, cuTreeLevels, cuTreeCounts, n, cuParticles, cuInteractions, softening, theta, bfsStackCounters, bfsStackBuffers, stackCapacity);
toc;
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
copyDeviceVecArray(n, interactions, cuInteractions, hipMemcpyDeviceToHost);
freeDeviceVecArray(cuInteractions);
freeDeviceParticleArray(cuParticles);
freeDeviceGroupInfoArray(cuGroupInfo);
freeDeviceNodeArray(bfsStackBuffers);
gpuErrchk( (hipFree(bfsStackCounters)) );
gpuErrchk( (hipFree(cuTreeCounts)) );
freeDeviceTree<DIM, Float, MAX_LEVELS>(placeHolderLevels);
gpuErrchk( (hipFree(cuTreeLevels)) );
}
//template void traverseTreeCUDA<3, float, 256, 16, 16, 300000, 16, Forces, true> (our_size_t, GroupInfoArray<3, float, 16>, our_size_t, NodeArray<3, float> *, our_size_t *, our_size_t, ParticleArray<3, float>, InteractionTypeArray(3, float, Forces), float, float, our_size_t);
template void traverseTreeCUDA<3, float, 256, 16, 16, 300000, 16, Forces, false> (our_size_t, GroupInfoArray<3, float, 16>, our_size_t, NodeArray<3, float> *, our_size_t *, our_size_t, ParticleArray<3, float>, InteractionTypeArray(3, float, Forces), float, float, our_size_t);
//template void traverseTreeCUDA<3, float, 256, 16, 16, 300000, 16, CountOnly, false> (our_size_t, GroupInfoArray<3, float, 16>, our_size_t, NodeArray<3, float> *, our_size_t *, our_size_t, ParticleArray<3, float>, InteractionTypeArray(3, float, CountOnly), float, float, our_size_t);
//template void traverseTreeCUDA<3, float, 256, 16, 16, 300000, 16, HashInteractions, false> (our_size_t, GroupInfoArray<3, float, 16>, our_size_t, NodeArray<3, float> *, our_size_t *, our_size_t, ParticleArray<3, float>, InteractionTypeArray(3, float, HashInteractions), float, float, our_size_t);
| 3a6357c3033ba56ca9523d8bfc562aa011986f08.cu | #include "treedefs.h"
#include "CudaArrayCopyUtils.h"
#include "treecodeCU.h"
#include "cudahelper.h"
#include "tictoc.h"
#include <iostream>
//: We should really be using native CUDA vectors for this.... but that requires more funny typing magic to convert the CPU data
template<our_size_t DIM, typename Float, our_size_t PPG> __device__ /*inline*/ bool passesMAC(const GroupInfo<DIM, Float, PPG>& groupInfo, const Node<DIM, Float>& nodeHere, Float theta) {
Float d = mag(groupInfo.center - nodeHere.barycenter.pos) - groupInfo.radius;
Float l = 2 * nodeHere.radius;
return d > (l / theta);
}
template<template<our_size_t, typename> class ElemType, template<our_size_t, typename> class ElemTypeArray, our_size_t DIM, typename Float>
__device__ /*inline*/ void initStack(ElemTypeArray<DIM, Float> level, our_size_t levelCt, ElemTypeArray<DIM, Float> stack, our_size_t* stackCt){
if(threadIdx.x < levelCt){
if(levelCt > level.elems || threadIdx.x >= stack.elems) printf("%d.%d %s: " SZSTR " (should be <= " SZSTR ") elements. stackCt @ %d / " SZSTR "\n",blockIdx.x, threadIdx.x, __func__,levelCt,level.elems,threadIdx.x,stack.elems);
ElemType<DIM, Float> eHere;
level.get(threadIdx.x, eHere);
stack.set(threadIdx.x, eHere);
}
if(threadIdx.x == 0){
atomicExch((cu_size_t*)stackCt,(cu_size_t)levelCt); // Don't want to have to threadfence afterwards. Just make sure it's set!
}
}
template<our_size_t DIM, typename Float>
__device__ void dumpStackChildren(NodeArray<DIM, Float> stack, const our_size_t* stackCt){
our_size_t dst = *stackCt;
for(our_size_t i = 0; i < dst; i++){
printf("(" SZSTR ") see (" SZSTR " " SZSTR ") in (%p/" SZSTR ")\n",i,stack.childStart[i],stack.childCount[i], stackCt, dst);
}
}
template<our_size_t DIM, typename T>
__device__ /*inline*/ void pushMeta(const our_size_t srcSt, const our_size_t srcCt, PointMassArray<DIM, T> stack, our_size_t* stackCt){
// This is a weird compiler bug. There's no reason this shouldn't have worked without the cast.
our_size_t dst = atomicAdd((cu_size_t*)stackCt, (cu_size_t)srcCt);
for(our_size_t i = dst, j = 0; i < dst + srcCt; i++, j++){
PointMass<DIM, T> eHere;
eHere.m = 1;
eHere.pos.x[0] = srcSt + j;
eHere.pos.x[1] = 0;
eHere.pos.x[2] = 0;
stack.set(i, eHere);
}
}
template<template<our_size_t, typename> class ElemType, template<our_size_t, typename> class ElemTypeArray, our_size_t DIM, typename Float>
__device__ /*inline*/ void pushAll(const ElemTypeArray<DIM, Float> src, const our_size_t srcCt, ElemTypeArray<DIM, Float> stack, our_size_t* stackCt){
// This is a weird compiler bug. There's no reason this shouldn't have worked without the cast.
our_size_t dst = atomicAdd((cu_size_t*)stackCt, (cu_size_t)srcCt);
if(srcCt > src.elems || dst >= stack.elems) printf("%d.%d %s: " SZSTR " (should be <= " SZSTR ") elements. stackCt @ " SZSTR " / " SZSTR "\n",blockIdx.x, threadIdx.x, __func__, srcCt,src.elems,dst,stack.elems);
for(our_size_t i = dst, j = 0; i < dst + srcCt; i++, j++){
ElemType<DIM, Float> eHere;
src.get(j, eHere);
stack.set(i, eHere);
}
}
template<our_size_t DIM, typename Float, TraverseMode Mode>
__device__ /*inline*/ InteractionType(DIM, Float, Mode) freshInteraction(){
InteractionType(DIM, Float, Mode) fresh; for(our_size_t i = 0; i < DIM; i++){
fresh.x[i] = 0.0;
}
return fresh;
}
// Needs softening
template<our_size_t DIM, typename Float, TraverseMode Mode, bool spam = false>
__device__ /*inline*/ InteractionType(DIM, Float, Mode) calc_interaction(const PointMass<DIM, Float> &m1, const InteracterType(DIM, Float, Mode) &m2, Float softening){
InteractionType(DIM, Float, Mode) interaction = freshInteraction<DIM, Float, Mode>();
our_size_t isParticle = m2.m != 0;
switch(Mode){
case Forces:{
// Reinterpret cast is evil, but necessary here. template-induced dead-code and all.
if(spam){
printf("Interacting\t%f %f %f %f vs %f %f %f %f\n",m1.m,m1.pos.x[0],m1.pos.x[1],m1.pos.x[2],m2.m,m2.pos.x[0],m2.pos.x[1],m2.pos.x[2]);
}
const PointMass<DIM, Float>& m2_inner = reinterpret_cast<const PointMass<DIM, Float>& >(m2);
Vec<DIM, Float> disp = m1.pos - m2_inner.pos;
interaction = (disp * ((m1.m * m2_inner.m) / (Float)(softening + pow((Float)mag_sq(disp),(Float)1.5)))).template castContents<InteractionElems(Mode, DIM, 2) , typename std::conditional<NonForceCondition(Mode), our_size_t, Float>::type>();
break;}
case CountOnly:
interaction.x[isParticle] = 1;
break;
case HashInteractions:{
// Type inference is failing here unless these are all explicitly separate variables
our_size_t generic = m2.pos.x[0];
our_size_t nodeSpecific1 = m2.pos.x[1];
our_size_t nodeSpecific2 = m2.pos.x[2];
our_size_t nodeSpecific = nodeSpecific1 ^ nodeSpecific2;
our_size_t e = generic ^ (!isParticle) * nodeSpecific;
interaction.x[isParticle] = e;
break;}
}
return interaction;
}
template<typename T>
__device__ /*inline*/ void swap(T &a, T &b){
T c(a); a=b; b=c;
}
template<our_size_t DIM, typename T>
__device__ /*inline*/ void dumpNodeArrayContents(const char *name, const NodeArray<DIM, T> &n){
ASSERT_DEAD_CODE;
printf("%s : %p %p %p %p " SZSTR "\n",name, n.isLeaf, n.childCount, n.childStart, n.radius, n.elems);
for(our_size_t i = 0; i < DIM; i++){
printf("%p ",n.minX.x[i]);
}printf("\n");
for(our_size_t i = 0; i < DIM; i++){
printf("%p ",n.maxX.x[i]);
}printf("\n");
for(our_size_t i = 0; i < DIM; i++){
printf("%p ",n.barycenter.pos.x[i]);
}printf("%p\n",n.barycenter.m);
printf("%s - Dump complete\n",name);
}
template<our_size_t DIM, typename Float, our_size_t TPB, our_size_t PPG, our_size_t MAX_LEVELS, our_size_t INTERACTION_THRESHOLD, TraverseMode Mode, bool spam>
__global__ void traverseTreeKernel(const our_size_t nGroups, const GroupInfoArray<DIM, Float, PPG> groupInfo,
const our_size_t startDepth, NodeArray<DIM, Float>* treeLevels, const our_size_t* treeCounts,
const our_size_t n, const ParticleArray<DIM, Float> particles, InteractionTypeArray(DIM, Float, Mode) interactions,
const Float softening, const Float theta,
our_size_t *const bfsStackCounters, const NodeArray<DIM, Float> bfsStackBuffers, const our_size_t stackCapacity) {
//this function processes groups of stars against nodes in the tree
//start at startDepth instead of root, to save a few levels
//groupInfo describes particles via offsets into the overall ParticleArray; everything is in principle tree-sorted before we start
typedef typename std::conditional<NonForceCondition(Mode), our_size_t, Float>::type InteractionElemType;
#define BUF_MUL 128
// TPB must = blockDims.x
__shared__ our_size_t interactionCounters[1];
__shared__ InteractionElemType pointMass[BUF_MUL*INTERACTION_THRESHOLD];
__shared__ InteractionElemType pointPos[DIM * BUF_MUL*INTERACTION_THRESHOLD];
__shared__ InteractionElemType interactionBuf[TPB * InteractionElems(Mode, DIM, 2)];
if(threadIdx.x == 0 && blockDim.x != TPB){
printf("%d Launched with mismatched TPB parameters\n",blockIdx.x);
}
InteracterTypeArray(DIM, Float, Mode) interactionList; // This should be const, but we need a compile time array initializer first
interactionList.m = pointMass;
for(our_size_t j = 0; j < DIM; j++){ // this should unroll, 2 int-ops, and a store (hopefully to registers) * DIM (=3)
interactionList.pos.x[j] = pointPos + (j * BUF_MUL*INTERACTION_THRESHOLD);
}
interactionList.setCapacity(BUF_MUL*INTERACTION_THRESHOLD);
InteractionTypeArray(DIM, Float, Mode) interactionScratch; // this should unroll, 2 int-ops, and a store (hopefully to registers) * DIM (=3)
for(our_size_t j = 0; j < InteractionElems(Mode, DIM, 2); j++){
interactionScratch.x[j] = interactionBuf + (TPB * j);
}
interactionScratch.setCapacity(TPB);
for(our_size_t groupOffset = 0; groupOffset + blockIdx.x < nGroups; groupOffset += gridDim.x){//one group per block of threads
//GroupInfo has 12 elements: 10 floats - coordinates of bounding box and center of bounding sphere and its radius; 2 ints - start offset for children, number of children
GroupInfo<DIM, Float, PPG> tgInfo; // This should be const, but we need a compile time array initializer first
groupInfo.get(blockIdx.x + groupOffset,tgInfo); // This should be gridDims.x * (3 + 3 * DIM(=3)) reads from arguments (global memory?)
const our_size_t threadsPerPart = blockDim.x / tgInfo.childCount; // 1 int-op, this is probably okay to spill eventually (or use constant memory), since it's referenced rarely, and always accessed across threads
our_size_t* const pGLCt = interactionCounters;
InteracterTypeArray(DIM, Float, Mode) pGList = interactionList;
InteracterTypeArray(DIM, Float, Mode) dummyP;
initStack<PointMass,PointMassArray>(dummyP, 0, pGList, pGLCt); // This should result in one atomic write to shared memory per block
our_size_t* cLCt = bfsStackCounters + 2 * blockIdx.x;
NodeArray<DIM, Float> currentLevel = bfsStackBuffers + 2 * blockIdx.x * stackCapacity; // This should be gridDims.x * (3 + 3 * DIM(=3)) reads from arguments (global memory?), 3x that many int-ops
currentLevel.setCapacity(stackCapacity);
initStack<Node,NodeArray>(treeLevels[startDepth], treeCounts[startDepth], currentLevel, cLCt); // one atomic write to global memory per block, <2**startDepth writes, should coalesce
our_size_t* nLCt = bfsStackCounters + 2 * blockIdx.x + 1;
NodeArray<DIM, Float> nextLevel = bfsStackBuffers + (2 * blockIdx.x + 1) * stackCapacity; // This should be gridDims.x * (3 + 3 * DIM(=3)) reads from arguments (global memory?), 4x that many int-ops
nextLevel.setCapacity(stackCapacity);
__threadfence_block();
__syncthreads(); // Everyone needs the stack initialized before we can continue
const our_size_t useful_thread_ct = threadsPerPart * tgInfo.childCount;
Particle<DIM, Float> particle;
if(threadIdx.x < useful_thread_ct){
if(tgInfo.childStart + (threadIdx.x % tgInfo.childCount) >= particles.elems){
printf("Getting particle, %d < " SZSTR ", so want at " SZSTR " + (%d %% " SZSTR ") = " SZSTR "\n",threadIdx.x,useful_thread_ct,tgInfo.childStart, threadIdx.x, tgInfo.childCount, tgInfo.childStart + (threadIdx.x % tgInfo.childCount));
}
particles.get(tgInfo.childStart + (threadIdx.x % tgInfo.childCount), particle); // (nearly) Every thread should perform 7 reads from arguments
}
InteractionType(DIM, Float, Mode) interaction = freshInteraction<DIM, Float, Mode>(); // Either 2 or 3 writes of 0, hopefully to registers, since we'll do a lot of math on these
our_size_t curDepth = startDepth;
while(*cLCt != 0 ){ // Maximum of MAX_LEVELS iterations, but could (and likely will) terminate early
//while current level count !=0 (each level, nodes can either do work, or increment a counter for amount of work
// to be done at next level) [#outerloop]
if(threadIdx.x == 0){
*nLCt = 0;
}
__threadfence_block();
__syncthreads();
cu_diff_t startOfs = *cLCt;
if(spam && threadIdx.x == 0){
printf("" SZSTR "." SZSTR " has " DFSTR " @ " SZSTR "\n",blockIdx.x + groupOffset,tgInfo.childStart + (threadIdx.x % tgInfo.childCount), startOfs, curDepth);
}
while(startOfs > 0){//runs over stack for current level (whenever a parent defers work, it places it on the next-level-stack
//[#stacktraverse]
cu_diff_t toGrab = startOfs - blockDim.x + threadIdx.x; // two int-ops
if(toGrab >= 0){
Node<DIM, Float> nodeHere; // These should mostly coalesce. At most one extra?
currentLevel.get(toGrab, nodeHere); // 4 * 3*DIM reads per thread
//if(threadIdx.x == 0) printf("\t%d.%d @ " SZSTR ":\t" SZSTR " " SZSTR " vs " SZSTR " " SZSTR " with " SZSTR " " DFSTR " \n", blockIdx.x, threadIdx.x, curDepth, nodeHere.childStart, nodeHere.childCount, currentLevel.childStart[toGrab], currentLevel.childCount[toGrab], *cLCt, toGrab);
//*
if(spam){
printf("" SZSTR "." SZSTR " comparing against node @ " SZSTR "." SZSTR ":" SZSTR ".%d = %d\n",
blockIdx.x + groupOffset,
tgInfo.childStart + (threadIdx.x % tgInfo.childCount),
curDepth,nodeHere.childStart,nodeHere.childCount,nodeHere.isLeaf,passesMAC<DIM, Float, PPG>(tgInfo, nodeHere, theta));
}
if(passesMAC(tgInfo, nodeHere, theta)){ // 12ish floating-point ops
//if(threadIdx.x == 0) printf("\t%d accepted MAC\n",threadIdx.x);
//*
InteracterType(DIM, Float, Mode) nodePush;
switch(Mode){
case Forces:{
nodePush = nodeHere.barycenter.template castContents<InteractionElems(Mode, DIM, 3) , typename std::conditional<NonForceCondition(Mode), our_size_t, Float>::type>(); break;}
case CountOnly:
case HashInteractions:{
nodePush.m = 0;
nodePush.pos.x[0] = curDepth;
nodePush.pos.x[1] = nodeHere.childStart;
nodePush.pos.x[2] = nodeHere.childCount;
break;}
}
InteracterTypeArray(DIM, Float, Mode) tmpArray = nodePush.toArray(); // Four assignments of pointer values
our_size_t tmpCt = 1;
pushAll<PointMass,PointMassArray>(tmpArray, tmpCt, pGList, pGLCt); // multiplied by number of passing nodes: atomic op, a sequence DIM+1 writes to shared memory. addresses accessed should be sequential
//*/
} else {
//if(threadIdx.x == 0) printf("\t%d rejected MAC\n",threadIdx.x);
if(nodeHere.isLeaf){
//*
if(spam) printf("" SZSTR "." SZSTR " leaf contains " SZSTR ":" SZSTR "\n",blockIdx.x + groupOffset, tgInfo.childStart + (threadIdx.x % tgInfo.childCount),nodeHere.childStart,nodeHere.childCount);
if(nodeHere.childCount > 16){
printf("\t%d.%d: Adding a lot particles " SZSTR "\n",blockIdx.x,threadIdx.x,nodeHere.childCount);
}
switch(Mode){
case Forces:{
// multiplied by number of split leaves: atomic op, a sequence of DIM+1 * at most PPG writes to shared memory.
// These seem unlikely to be well-behaved
pushAll<PointMass, PointMassArray>(particles.mass + nodeHere.childStart, nodeHere.childCount, *reinterpret_cast<PointMassArray<DIM, Float>* >(&pGList), pGLCt); break;}
case CountOnly:
case HashInteractions:{
pushMeta(nodeHere.childStart, nodeHere.childCount, pGList, pGLCt);
break;}
}
//*/
} else {
// multiplied by number of split leaves: atomic op, a sequence of (4+3*DIM) * at most 2**DIM writes to shared memory.
// These seem unlikely to be well-behaved
pushAll<Node, NodeArray>(treeLevels[curDepth + 1] + nodeHere.childStart, nodeHere.childCount, nextLevel, nLCt);
}
}
//*/
}
__threadfence_block();
__syncthreads();
// if(threadIdx.x == 0) printf("\t%3d.%d All safely past toGrab\n",blockIdx.x,threadIdx.x);
//*
if(INTERACTION_THRESHOLD > 0){ // Can't diverge, compile-time constant
cu_diff_t innerStartOfs = 0;
//*
//if(threadIdx.x == 0) printf("\t%d PGLCt is " SZSTR " >? " SZSTR " (" DFSTR " > " DFSTR ")\n",threadIdx.x,*pGLCt,INTERACTION_THRESHOLD,(cu_diff_t)(*pGLCt),(cu_diff_t)INTERACTION_THRESHOLD);
// The casting here feels very strange - why is innerStartOfs implicitly casted, rather than vice versa?
for(innerStartOfs = *pGLCt; innerStartOfs >= (cu_diff_t)INTERACTION_THRESHOLD; innerStartOfs -= threadsPerPart){
cu_diff_t toGrab = innerStartOfs - threadsPerPart + (threadIdx.x / tgInfo.childCount);
if(toGrab >= 0 && threadIdx.x < useful_thread_ct){
//if(toGrab % threadsPerPart == 0) printf("\t%d interacting with " DFSTR " = " SZSTR " - " SZSTR " + (%d / %d)\n",threadIdx.x,toGrab,innerStartOfs,threadsPerPart,threadIdx.x,tgInfo.childCount);
InteracterType(DIM, Float, Mode) pHere;
pGList.get(toGrab, pHere);
// Something like 12-ish FLOPs
interaction = interaction + calc_interaction<DIM, Float, Mode, spam>(particle.mass, pHere, softening);
}
}
//if(threadIdx.x == 0) printf("\t%d through interaction loop safely\n",threadIdx.x);
// Need to update stack pointer
//*/
if(threadIdx.x == 0){
atomicExch((cu_size_t *)pGLCt, 0);// (cu_size_t)((innerStartOfs < 0) ? 0 : innerStartOfs));
}
}
//*/
//if(threadIdx.x == 0) printf("%3d.%d: Try going around again\n",blockIdx.x,threadIdx.x);
startOfs -= blockDim.x;
}
//if(threadIdx.x == 0) printf("%3d.%d Done inside: " SZSTR " (loopcount at " DFSTR ") work remaining at depth: " SZSTR "\n",blockIdx.x, threadIdx.x, *nLCt,startOfs,curDepth);
// It would be nice if this happened purely with registers.
swap<NodeArray<DIM, Float>>(currentLevel, nextLevel);
swap<our_size_t*>(cLCt, nLCt);
curDepth += 1;
}
// Process remaining interactions
//*
__threadfence_block();
__syncthreads();
if(INTERACTION_THRESHOLD > 0){ // Can't diverge, compile-time constant
cu_diff_t innerStartOfs = 0;
//*
//if(threadIdx.x == 0) printf("\t%d PGLCt is " SZSTR " >? " SZSTR " (" DFSTR " > " DFSTR ")\n",threadIdx.x,*pGLCt,INTERACTION_THRESHOLD,(cu_diff_t)(*pGLCt),(cu_diff_t)INTERACTION_THRESHOLD);
for(innerStartOfs = *pGLCt; innerStartOfs > 0; innerStartOfs -= threadsPerPart){
cu_diff_t toGrab = innerStartOfs - threadsPerPart + (threadIdx.x / tgInfo.childCount);
if(toGrab >= 0 && threadIdx.x < useful_thread_ct){
//if(toGrab % threadsPerPart == 0) printf("\t%d interacting with " DFSTR " = " SZSTR " - " SZSTR " + (%d / %d)\n",threadIdx.x,toGrab,innerStartOfs,threadsPerPart,threadIdx.x,tgInfo.childCount);
InteracterType(DIM, Float, Mode) pHere;
pGList.get(toGrab, pHere);
// Something like 16-ish FLOPs
interaction = interaction + calc_interaction<DIM, Float, Mode, spam>(particle.mass, pHere, softening);
}
}
//if(threadIdx.x == 0) printf("\t%d through final interaction loop safely\n",threadIdx.x);
//*/
if(threadIdx.x == 0){
atomicExch((cu_size_t *)pGLCt, 0);
}
}
// This needs to be done in shared memory! We should figure out how to combine with the stack scratch-space!
if(threadIdx.x < useful_thread_ct){
interactionScratch.set(threadIdx.x, interaction);
}
__threadfence_block();
__syncthreads(); // All forces have been summed and are in view
// reduce (hack-job fashion for now) if multithreading per particle in play
//*
//printf("Reducing\n");
if(threadIdx.x < tgInfo.childCount){
InteractionType(DIM, Float, Mode) accInt = freshInteraction<DIM, Float, Mode>();
for(our_size_t i = 1; i < threadsPerPart; i++){
InteractionType(DIM, Float, Mode) tmp;
interactionScratch.get(threadIdx.x + i * tgInfo.childCount, tmp);
// DIM FLOPs
accInt = accInt + tmp;
}
interactions.set(tgInfo.childStart + threadIdx.x, interaction + accInt);
}
//if(threadIdx.x == 0) printf("%3d Done reducing\n",blockIdx.x);
//*/
}
return;
}
// Something is badly wrong with template resolution if we switch to InteractionType here.
// I think the compilers are doing name-mangling differently or something
/*
template void traverseTreeCUDA<3, float, 128, 16, 16, 300000, 8, Forces>
(our_size_t, GroupInfoArray<3, float, 16>, our_size_t,
NodeArray<3, float> *, our_size_t *,
our_size_t, ParticleArray<3, float>, VecArray<3, float>, float, float, our_size_t);
*/
template<our_size_t DIM, typename Float, our_size_t threadCt, our_size_t PPG, our_size_t MAX_LEVELS, our_size_t MAX_STACK_ENTRIES, our_size_t INTERACTION_THRESHOLD, TraverseMode Mode, bool spam>
void traverseTreeCUDA(our_size_t nGroups, GroupInfoArray<DIM, Float, PPG> groupInfo, our_size_t startDepth,
NodeArray<DIM, Float> treeLevels[MAX_LEVELS], our_size_t treeCounts[MAX_LEVELS],
our_size_t n, ParticleArray<DIM, Float> particles, InteractionTypeArray(DIM, Float, Mode) interactions, Float softening, Float theta, our_size_t blockCt){
std::cout << "Traverse tree with " << blockCt << " blocks and " << threadCt << " tpb"<<std::endl;
NodeArray<DIM, Float> placeHolderLevels[MAX_LEVELS];
makeDeviceTree<DIM, Float, MAX_LEVELS>(treeLevels, placeHolderLevels, treeCounts);
NodeArray<DIM, Float>* cuTreeLevels;
ALLOC_DEBUG_MSG(MAX_LEVELS*sizeof(NodeArray<DIM, Float>) + MAX_LEVELS * sizeof(our_size_t));
gpuErrchk( (cudaMalloc(&cuTreeLevels, MAX_LEVELS*sizeof(NodeArray<DIM, Float>))) );
gpuErrchk( (cudaMemcpy(cuTreeLevels, placeHolderLevels, MAX_LEVELS*sizeof(NodeArray<DIM, Float>), cudaMemcpyHostToDevice)) );
our_size_t* cuTreeCounts;
gpuErrchk( (cudaMalloc(&cuTreeCounts, MAX_LEVELS * sizeof(our_size_t))) );
gpuErrchk( (cudaMemcpy(cuTreeCounts, treeCounts, MAX_LEVELS * sizeof(our_size_t), cudaMemcpyHostToDevice)) );
our_size_t biggestRow = 0;
for(our_size_t level = 0; level < MAX_LEVELS; level++){
biggestRow = (treeCounts[level] > biggestRow) ? treeCounts[level] : biggestRow;
}
std::cout << "Biggest row: " << biggestRow << std::endl;
const our_size_t stackCapacity = biggestRow;
const our_size_t blocksPerLaunch = MAX_STACK_ENTRIES / stackCapacity;
std::cout << "Allowing: " << blocksPerLaunch << " blocks per launch" << std::endl;
NodeArray<DIM, Float> bfsStackBuffers;
our_size_t * bfsStackCounters;
allocDeviceNodeArray(blocksPerLaunch * 2 * stackCapacity, bfsStackBuffers);
ALLOC_DEBUG_MSG(blocksPerLaunch * 2 * sizeof(our_size_t));
gpuErrchk( (cudaMalloc(&bfsStackCounters, blocksPerLaunch * 2 * sizeof(our_size_t))) );
GroupInfoArray<DIM, Float, PPG> cuGroupInfo;
allocDeviceGroupInfoArray(nGroups, cuGroupInfo);
copyDeviceGroupInfoArray(nGroups, cuGroupInfo, groupInfo, cudaMemcpyHostToDevice);
ParticleArray<DIM, Float> cuParticles;
allocDeviceParticleArray(n, cuParticles);
copyDeviceParticleArray(n, cuParticles, particles, cudaMemcpyHostToDevice);
InteractionTypeArray(DIM, Float, Mode) cuInteractions;
allocDeviceVecArray(n, cuInteractions);
copyDeviceVecArray(n, cuInteractions, interactions, cudaMemcpyHostToDevice);
dim3 dimGrid(blocksPerLaunch);
dim3 dimBlock(threadCt);
std::cout << "Trying to launch with " << threadCt << " / block with " << blocksPerLaunch << " blocks" << std::endl;
tic;
traverseTreeKernel<DIM, Float, threadCt, PPG, MAX_LEVELS, INTERACTION_THRESHOLD, Mode, spam><<<dimGrid, dimBlock>>>(nGroups, cuGroupInfo, startDepth, cuTreeLevels, cuTreeCounts, n, cuParticles, cuInteractions, softening, theta, bfsStackCounters, bfsStackBuffers, stackCapacity);
toc;
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
copyDeviceVecArray(n, interactions, cuInteractions, cudaMemcpyDeviceToHost);
freeDeviceVecArray(cuInteractions);
freeDeviceParticleArray(cuParticles);
freeDeviceGroupInfoArray(cuGroupInfo);
freeDeviceNodeArray(bfsStackBuffers);
gpuErrchk( (cudaFree(bfsStackCounters)) );
gpuErrchk( (cudaFree(cuTreeCounts)) );
freeDeviceTree<DIM, Float, MAX_LEVELS>(placeHolderLevels);
gpuErrchk( (cudaFree(cuTreeLevels)) );
}
//template void traverseTreeCUDA<3, float, 256, 16, 16, 300000, 16, Forces, true> (our_size_t, GroupInfoArray<3, float, 16>, our_size_t, NodeArray<3, float> *, our_size_t *, our_size_t, ParticleArray<3, float>, InteractionTypeArray(3, float, Forces), float, float, our_size_t);
template void traverseTreeCUDA<3, float, 256, 16, 16, 300000, 16, Forces, false> (our_size_t, GroupInfoArray<3, float, 16>, our_size_t, NodeArray<3, float> *, our_size_t *, our_size_t, ParticleArray<3, float>, InteractionTypeArray(3, float, Forces), float, float, our_size_t);
//template void traverseTreeCUDA<3, float, 256, 16, 16, 300000, 16, CountOnly, false> (our_size_t, GroupInfoArray<3, float, 16>, our_size_t, NodeArray<3, float> *, our_size_t *, our_size_t, ParticleArray<3, float>, InteractionTypeArray(3, float, CountOnly), float, float, our_size_t);
//template void traverseTreeCUDA<3, float, 256, 16, 16, 300000, 16, HashInteractions, false> (our_size_t, GroupInfoArray<3, float, 16>, our_size_t, NodeArray<3, float> *, our_size_t *, our_size_t, ParticleArray<3, float>, InteractionTypeArray(3, float, HashInteractions), float, float, our_size_t);
|
2ac836066d29fda3341de0433ed50c341d9e3291.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// -----------------------------------------------------------------------
// Fast CUDA Radix Sort Implementation
//
// The parallel radix sort algorithm implemented by this code is described
// in the following paper.
//
// Satish, N., Harris, M., and Garland, M. "Designing Efficient Sorting
// Algorithms for Manycore GPUs". In Proceedings of IEEE International
// Parallel & Distributed Processing Symposium 2009 (IPDPS 2009).
//
// -----------------------------------------------------------------------
#include "radixsort.h"
#include "cudpp/cudpp.h"
#include <stdio.h>
#include <assert.h>
#if (CUDART_VERSION < 2020)
#error CUDA runtime version 2.2 or later required!
#endif
namespace nvRadixSort
{
// Used for creating a mapping of kernel functions to the number of CTAs to launch for each
typedef void* KernelPointer;
int getNumCTAs(KernelPointer kernel);
void setNumCTAs(KernelPointer kernel, int numCTAs);
void computeNumCTAs(KernelPointer kernel, int smemDynamicBytes, bool bManualCoalesce);
bool bManualCoalesce = false;
bool bUsePersistentCTAs = false;
unsigned int persistentCTAThreshold[2] = { 0, 0 };
unsigned int persistentCTAThresholdFullBlocks[2] = { 0, 0 };
template <typename T>
int numCTAs(T kernel)
{
return getNumCTAs((KernelPointer)kernel);
}
template <typename T>
void numCTAs(T kernel, int numCTAs)
{
setNumCTAs((KernelPointer)kernel, numCTAs);
}
template <typename T>
void computeNumCTAs(T kernel, int smemDynamicBytes)
{
computeNumCTAs((KernelPointer)kernel, smemDynamicBytes, bManualCoalesce);
}
// In emulation mode, we need __syncthreads() inside warp-synchronous code,
// but we don't in code running on the GPU, so we define this macro to use
// in the warp-scan portion of the radix sort (see CUDPP for information
// on the warp scan algorithm.
#ifdef __DEVICE_EMULATION__
#define __SYNC __syncthreads();
#else
#define __SYNC
#endif
typedef unsigned int uint;
__global__ void emptyKernel() {}
// -----------------------------------------------------------------------------------------------
// The floatFlip and floatUnflip functions below are based on code in the web article
// "Radix Tricks" by Michael Herf (http://www.stereopsis.com/radix.html). They are used to convert
// floating point values into sortable unsigned integers (and back).
//
// Paraphrasing Michael: Binary single-precision floating point numbers have two features that
// keep them from being directly sortable. First, the sign bit is set when the value is negative,
// which means that all negative numbers are bigger than positive ones. Second, the values are
// signed-magnitude, so "more negative" floating point numbers actually look bigger to a normal
// bitwise comparison.
//
// "To fix our floating point numbers, we define the following rules:
//
// 1. Always flip the sign bit.
// 2. If the sign bit was set, flip the other bits too.
//
// To get back, we flip the sign bit always, and if the sign bit was not set, we flip the other
// bits too."
//
// This is a very inexpensive operation and it is only done on the first and last steps of the
// sort.
// -----------------------------------------------------------------------------------------------
// ================================================================================================
// Flip a float for sorting
// finds SIGN of fp number.
// if it's 1 (negative float), it flips all bits
// if it's 0 (positive float), it flips the sign only
// ================================================================================================
template <bool doFlip>
__device__ uint floatFlip(uint f)
{
if (doFlip)
{
uint mask = -int(f >> 31) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// flip a float back (invert FloatFlip)
// signed was flipped from above, so:
// if sign is 1 (negative), it flips the sign bit back
// if sign is 0 (positive), it flips all bits back
// ================================================================================================
template <bool doFlip>
__device__ uint floatUnflip(uint f)
{
if (doFlip)
{
uint mask = ((f >> 31) - 1) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// Kernel to flip all floats in an array (see floatFlip, above)
// Each thread flips four values (each 256-thread CTA flips 1024 values).
// ================================================================================================
__global__ void flipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
}
// ================================================================================================
// Kernel to unflip all floats in an array (see floatUnflip, above)
// Each thread unflips four values (each 256-thread CTA unflips 1024 values).
// ================================================================================================
__global__ void unflipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
}
//----------------------------------------------------------------------------
// Scans each warp in parallel ("warp-scan"), one element per thread.
// uses 2 numElements of shared memory per thread (64 = elements per warp)
//----------------------------------------------------------------------------
template<class T, int maxlevel>
__device__ T scanwarp(T val, volatile T* sData)
{
// The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1))
int idx = 2 * threadIdx.x - (threadIdx.x & (RadixSort::WARP_SIZE - 1));
sData[idx] = 0;
idx += RadixSort::WARP_SIZE;
T t = sData[idx] = val; __SYNC
#ifdef __DEVICE_EMULATION__
T t = sData[idx - 1]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 2]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 4]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 8]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 16]; __SYNC
sData[idx] += t; __SYNC
#else
if (0 <= maxlevel) { sData[idx] = t = t + sData[idx - 1]; } __SYNC
if (1 <= maxlevel) { sData[idx] = t = t + sData[idx - 2]; } __SYNC
if (2 <= maxlevel) { sData[idx] = t = t + sData[idx - 4]; } __SYNC
if (3 <= maxlevel) { sData[idx] = t = t + sData[idx - 8]; } __SYNC
if (4 <= maxlevel) { sData[idx] = t = t + sData[idx -16]; } __SYNC
#endif
return sData[idx] - val; // convert inclusive -> exclusive
}
//----------------------------------------------------------------------------
// scan4 scans 4*RadixSort::CTA_SIZE numElements in a block (4 per thread), using
// a warp-scan algorithm
//----------------------------------------------------------------------------
__device__ uint4 scan4(uint4 idata)
{
extern __shared__ uint ptr[];
uint idx = threadIdx.x;
uint4 val4 = idata;
uint sum[3];
sum[0] = val4.x;
sum[1] = val4.y + sum[0];
sum[2] = val4.z + sum[1];
uint val = val4.w + sum[2];
val = scanwarp<uint, 4>(val, ptr);
__syncthreads();
if ((idx & (RadixSort::WARP_SIZE - 1)) == RadixSort::WARP_SIZE - 1)
{
ptr[idx >> 5] = val + val4.w + sum[2];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (idx < RadixSort::WARP_SIZE)
#endif
{
ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr);
}
__syncthreads();
val += ptr[idx >> 5];
val4.x = val;
val4.y = val + sum[0];
val4.z = val + sum[1];
val4.w = val + sum[2];
return val4;
}
//----------------------------------------------------------------------------
//
// Rank is the core of the radix sort loop. Given a predicate, it
// computes the output position for each thread in an ordering where all
// True threads come first, followed by all False threads.
//
// This version handles 4 predicates per thread; hence, "rank4".
//
//----------------------------------------------------------------------------
template <int ctasize>
__device__ uint4 rank4(uint4 preds)
{
uint4 address = scan4(preds);
__shared__ uint numtrue;
if (threadIdx.x == ctasize-1)
{
numtrue = address.w + preds.w;
}
__syncthreads();
uint4 rank;
uint idx = threadIdx.x << 2;
rank.x = (preds.x) ? address.x : numtrue + idx - address.x;
rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y;
rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z;
rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w;
return rank;
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//
// Each thread sorts 4 elements by nbits bits
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlock(uint4 &key, uint4 &value)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<RadixSort::CTA_SIZE>(lsb);
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
__syncthreads();
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = value.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = value.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = value.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = value.w;
__syncthreads();
value.x = sMem1[threadIdx.x];
value.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
value.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
value.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts all blocks of data independently in shared
// memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
//
// The radix sort is done in two stages. This stage calls radixSortBlock on each
// block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
// differently than arrays that are not. "flip" is used to only compile in the
// float flip code when float keys are used. "loop" is used when persistent CTAs
// are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut,
uint4* keysIn, uint4* valuesIn,
uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key, value;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
uint *values1 = (uint*)valuesIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
value.x = (idx < numElements) ? values1[idx] : UINT_MAX;
value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX;
value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX;
value.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
value = valuesIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlock<nbits, startbit>(key, value);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
uint *values1 = (uint*)valuesOut;
keys1[idx] = key.x;
values1[idx] = value.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
values1[idx + 1] = value.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
values1[idx + 2] = value.z;
}
}
}
}
else
{
keysOut[i] = key;
valuesOut[i] = value;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// Given an array with blocks sorted according to a 4-bit radix group, each
// block counts the number of keys that fall into each radix in the group, and
// finds the starting offset of each radix in the block. It then writes the radix
// counts to the counters array, and the starting offsets to the blockOffsets array.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool loop>
__global__ void findRadixOffsets(uint2 *keys,
uint *counters,
uint *blockOffsets,
uint numElements,
uint totalBlocks)
{
extern __shared__ uint sRadix1[];
__shared__ uint sStartPointers[16];
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint2 radix2;
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && ((i + 1) << 1 ) > numElements )
{
// handle uint1 rather than uint2 for non-full blocks
uint *keys1 = (uint*)keys;
uint j = i << 1;
radix2.x = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
radix2.y = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
radix2 = keys[i];
}
sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(threadIdx.x < 16)
{
sStartPointers[threadIdx.x] = 0;
}
__syncthreads();
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x;
}
if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1])
{
sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE]] = threadIdx.x + RadixSort::CTA_SIZE;
}
__syncthreads();
if(threadIdx.x < 16)
{
blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x];
}
__syncthreads();
// Compute the sizes of each block.
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x - 1]] =
threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]];
}
if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1] )
{
sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]] =
threadIdx.x + RadixSort::CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]];
}
if(threadIdx.x == RadixSort::CTA_SIZE - 1)
{
sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]] =
2 * RadixSort::CTA_SIZE - sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]];
}
__syncthreads();
if(threadIdx.x < 16)
{
counters[threadIdx.x * totalBlocks + blockId] =
sStartPointers[threadIdx.x];
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. On compute version 1.1 and earlier GPUs, this code depends
// on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
//
// On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
// that all writes are coalesced using extra work in the kernel. On later
// GPUs coalescing rules have been relaxed, so this extra overhead hurts
// performance. On these GPUs we set manualCoalesce=false and directly store
// the results.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderData(uint *outKeys,
uint *outValues,
uint2 *keys,
uint2 *values,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[RadixSort::CTA_SIZE];
__shared__ uint2 sValues2[RadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint *sValues1 = (uint*)sValues2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint *values1 = (uint*)values;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
outValues[globalOffset] = sValues1[threadIdx.x];
}
radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]);
outValues[globalOffset] = sValues1[threadIdx.x + RadixSort::CTA_SIZE];
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
outValues[outOffset] = sValues1[inOffset];
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarp(uint *keys,
uint *values,
uint numElements)
{
volatile __shared__ uint sKeys[RadixSort::WARP_SIZE];
volatile __shared__ uint sValues[RadixSort::WARP_SIZE];
volatile __shared__ uint sFlags[RadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
sValues[threadIdx.x] = values[threadIdx.x];
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
uint val_i = sValues[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
uint tempval = sValues[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sValues[threadIdx.x + 1] = tempval;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
sValues[threadIdx.x] = val_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
values[threadIdx.x] = sValues[threadIdx.x];
}
//----------------------------------------------------------------------------
// Key-only Sorts
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarpKeysOnly(uint *keys,
uint numElements)
{
volatile __shared__ uint sKeys[RadixSort::WARP_SIZE];
volatile __shared__ uint sFlags[RadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlockKeysOnly(uint4 &key)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<256>(lsb);
#if 1
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
#else
sMem1[r.x] = key.x;
sMem1[r.y] = key.y;
sMem1[r.z] = key.z;
sMem1[r.w] = key.w;
__syncthreads();
// This access has 4-way bank conflicts
key = sMem[threadIdx.x];
#endif
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts all blocks of data independently in shared
// memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
//
// The radix sort is done in two stages. This stage calls radixSortBlock on each
// block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
// differently than arrays that are not. "flip" is used to only compile in the
// float flip code when float keys are used. "loop" is used when persistent CTAs
// are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlockKeysOnly<nbits, startbit>(key);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
keys1[idx] = key.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
}
}
}
}
else
{
keysOut[i] = key;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. On compute version 1.1 and earlier GPUs, this code depends
// on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
//
// On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
// that all writes are coalesced using extra work in the kernel. On later
// GPUs coalescing rules have been relaxed, so this extra overhead hurts
// performance. On these GPUs we set manualCoalesce=false and directly store
// the results.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderDataKeysOnly(uint *outKeys,
uint2 *keys,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[RadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
}
radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]);
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
extern "C" void checkCudaError(const char *msg)
{
#if defined(_DEBUG) || defined(DEBUG)
hipError_t e = hipDeviceSynchronize();
if( e != hipSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, hipGetErrorString(e));
exit(EXIT_FAILURE);
}
e = hipGetLastError();
if( e != hipSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, hipGetErrorString(e));
exit(EXIT_FAILURE);
}
#endif
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//
// Uses cudppScan() for the prefix sum of radix counters.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStep(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements)
{
const uint eltsPerBlock = RadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[0] : persistentCTAThreshold[0];
if (bUsePersistentCTAs && (numElements >= threshold))
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
// Run an empty kernel -- this seems to reset some of the CTA scheduling hardware
// on GT200, resulting in better scheduling and lower run times
if (startbit > 0)
{
hipLaunchKernelGGL(( emptyKernel), dim3(numCTAs(emptyKernel)), dim3(RadixSort::CTA_SIZE), 0, 0, );
}
}
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip? numCTAs(radixSortBlocks<4, 0, true, true, true>) : numCTAs(radixSortBlocks<4, 0, true, false, true>);
}
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, true>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, false>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocks<4, 0, false, true, true>) : numCTAs(radixSortBlocks<4, 0, false, false, true>);
}
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, true>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, false>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
checkCudaError("radixSortBlocks");
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, true, true>);
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, false, true>);
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
}
checkCudaError("findRadixOffsets");
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
if (fullBlocks)
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ? numCTAs(reorderData<0, true, true, true, true>) :
numCTAs(reorderData<0, true, true, false, true>);
}
hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ? numCTAs(reorderData<0, true, false, true, true>) :
numCTAs(reorderData<0, true, false, false, true>);
}
hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
}
else
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderData<0, false, true, true, true>) :
numCTAs(reorderData<0, false, true, false, true>);
}
hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderData<0, false, false, true, true>) :
numCTAs(reorderData<0, false, false, false, true>);
}
hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
}
checkCudaError("radixSortStep");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlock(uint *keys,
uint *values,
uint numElements)
{
bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocks<32, 0, true, flip, false>)
, dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 1 );
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<32, 0, false, flip, false>)
, dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 1 );
}
if (flip)
hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(RadixSort::CTA_SIZE), 0, 0, keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Main radix sort function. Sorts in place in the keys and values arrays,
// but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers.
//----------------------------------------------------------------------------
extern "C" void radixSort(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool flipBits = false)
{
if(numElements <= RadixSort::WARP_SIZE)
{
if (flipBits)
hipLaunchKernelGGL(( radixSortSingleWarp<true>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements);
else
hipLaunchKernelGGL(( radixSortSingleWarp<false>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= RadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlock<true>(keys, values, numElements);
else
radixSortSingleBlock<false>(keys, values, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStep<4, 0, true, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{ radixSortStep<4, 0, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 4)
{
radixSortStep<4, 4, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 8)
{
radixSortStep<4, 8, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 12)
{
radixSortStep<4, 12, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 16)
{
radixSortStep<4, 16, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 20)
{
radixSortStep<4, 20, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 24)
{
radixSortStep<4, 24, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStep<4, 28, false, true>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStep<4, 28, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
}
checkCudaError("radixSort");
}
extern "C" void radixSortFloatKeys(float *keys,
uint *values,
float *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool negativeKeys)
{
radixSort((uint*)keys, values, (uint*)tempKeys, tempValues, counters,
countersSum, blockOffsets, scanPlan, numElements, keyBits,
negativeKeys);
checkCudaError("radixSortFloatKeys");
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStepKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements)
{
const uint eltsPerBlock = RadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
//bool loop2 = numBlocks2 > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[1] : persistentCTAThreshold[1];
if (bUsePersistentCTAs && (numElements >= threshold))
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
}
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>) :
numCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>);
}
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, true>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, false>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>) :
numCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>);
}
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, true>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, false>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, true, true>);
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, false, true>);
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
if (fullBlocks)
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, true, true, true, true>) :
numCTAs(reorderDataKeysOnly<0, true, true, false, true>);
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, true, false, true, true>) :
numCTAs(reorderDataKeysOnly<0, true, false, false, true>);
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
}
else
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, false, true, true, true>) :
numCTAs(reorderDataKeysOnly<0, false, true, false, true>);
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, false, false, true, true>) :
numCTAs(reorderDataKeysOnly<0, false, false, false, true>);
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
}
checkCudaError("radixSortStepKeysOnly");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlockKeysOnly(uint *keys,
uint numElements)
{
bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, true, flip, false>)
, dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)keys, numElements, 1 );
}
else
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, false, flip, false>)
, dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)keys, numElements, 1 );
}
if (flip)
hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(RadixSort::CTA_SIZE), 0, 0, keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Main key-only radix sort function. Sorts in place in the keys and values
// arrays, but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
extern "C" void radixSortKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool flipBits = false)
{
if(numElements <= RadixSort::WARP_SIZE)
{
if (flipBits)
hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<true>), dim3(1), dim3(numElements), 0, 0, keys, numElements);
else
hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<false>), dim3(1), dim3(numElements), 0, 0, keys, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= RadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlockKeysOnly<true>(keys, numElements);
else
radixSortSingleBlockKeysOnly<false>(keys, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStepKeysOnly<4, 0, true, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStepKeysOnly<4, 0, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 4)
{
radixSortStepKeysOnly<4, 4, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 8)
{
radixSortStepKeysOnly<4, 8, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 12)
{
radixSortStepKeysOnly<4, 12, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 16)
{
radixSortStepKeysOnly<4, 16, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 20)
{
radixSortStepKeysOnly<4, 20, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 24)
{
radixSortStepKeysOnly<4, 24, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStepKeysOnly<4, 28, false, true>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStepKeysOnly<4, 28, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
}
checkCudaError("radixSortKeysOnly");
}
//----------------------------------------------------------------------------
// Main float key-only radix sort function. Sorts in place in the keys and values
// arrays, but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
extern "C" void radixSortFloatKeysOnly(float *keys,
float *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool negativeKeys)
{
radixSortKeysOnly((uint*)keys, (uint*)tempKeys, counters, countersSum, blockOffsets,
scanPlan, numElements, keyBits, negativeKeys);
checkCudaError("radixSortFloatKeys");
}
extern "C" void initDeviceParameters(bool keysOnly)
{
int deviceID = -1;
if (hipSuccess == hipGetDevice(&deviceID))
{
hipDeviceProp_t devprop;
hipGetDeviceProperties(&devprop, deviceID);
int smVersion = devprop.major * 10 + devprop.minor;
// sm_12 and later devices don't need help with coalesce in reorderData kernel
bManualCoalesce = (smVersion < 12);
bUsePersistentCTAs = (smVersion < 20);
if (bUsePersistentCTAs)
{
// Empirically we have found on pre-Fermi GPUs that for some (usually larger) sort
// sizes it is better to use exactly as many "persistent" CTAs
// as can fill the GPU, which loop over the "blocks" of work. For smaller
// arrays it is better to use the typical CUDA approach of launching one CTA
// per block of work.
// 0-element of these two-element arrays is for key-value sorts
// 1-element is for key-only sorts
persistentCTAThreshold[0] = bManualCoalesce ? 16777216 : 524288;
persistentCTAThresholdFullBlocks[0] = bManualCoalesce ? 2097152: 524288;
persistentCTAThreshold[1] = bManualCoalesce ? 16777216 : 8388608;
persistentCTAThresholdFullBlocks[1] = bManualCoalesce ? 2097152: 0;
// create a map of function pointers to register counts for more accurate occupancy calculation
// Must pass in the dynamic shared memory used by each kernel, since the runtime doesn't know it
// Note we only insert the "loop" version of the kernels (the one with the last template param = true)
// Because those are the only ones that require persistent CTAs that maximally fill the device.
computeNumCTAs(radixSortBlocks<4, 0, false, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, false, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, true, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, true, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(findRadixOffsets<0, false, true>, 2 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(findRadixOffsets<0, true, true>, 2 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(reorderData<0, false, false, false, true>, 0);
computeNumCTAs(reorderData<0, false, false, true, true>, 0);
computeNumCTAs(reorderData<0, false, true, false, true>, 0);
computeNumCTAs(reorderData<0, false, true, true, true>, 0);
computeNumCTAs(reorderData<0, true, false, false, true>, 0);
computeNumCTAs(reorderData<0, true, false, true, true>, 0);
computeNumCTAs(reorderData<0, true, true, false, true>, 0);
computeNumCTAs(reorderData<0, true, true, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, false, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, false, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, true, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, true, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, false, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, false, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, true, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, true, true, true>, 0);
computeNumCTAs(emptyKernel, 0);
}
}
}
} // namespace nvRadixSort
| 2ac836066d29fda3341de0433ed50c341d9e3291.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// -----------------------------------------------------------------------
// Fast CUDA Radix Sort Implementation
//
// The parallel radix sort algorithm implemented by this code is described
// in the following paper.
//
// Satish, N., Harris, M., and Garland, M. "Designing Efficient Sorting
// Algorithms for Manycore GPUs". In Proceedings of IEEE International
// Parallel & Distributed Processing Symposium 2009 (IPDPS 2009).
//
// -----------------------------------------------------------------------
#include "radixsort.h"
#include "cudpp/cudpp.h"
#include <stdio.h>
#include <assert.h>
#if (CUDART_VERSION < 2020)
#error CUDA runtime version 2.2 or later required!
#endif
namespace nvRadixSort
{
// Used for creating a mapping of kernel functions to the number of CTAs to launch for each
typedef void* KernelPointer;
int getNumCTAs(KernelPointer kernel);
void setNumCTAs(KernelPointer kernel, int numCTAs);
void computeNumCTAs(KernelPointer kernel, int smemDynamicBytes, bool bManualCoalesce);
bool bManualCoalesce = false;
bool bUsePersistentCTAs = false;
unsigned int persistentCTAThreshold[2] = { 0, 0 };
unsigned int persistentCTAThresholdFullBlocks[2] = { 0, 0 };
template <typename T>
int numCTAs(T kernel)
{
return getNumCTAs((KernelPointer)kernel);
}
template <typename T>
void numCTAs(T kernel, int numCTAs)
{
setNumCTAs((KernelPointer)kernel, numCTAs);
}
template <typename T>
void computeNumCTAs(T kernel, int smemDynamicBytes)
{
computeNumCTAs((KernelPointer)kernel, smemDynamicBytes, bManualCoalesce);
}
// In emulation mode, we need __syncthreads() inside warp-synchronous code,
// but we don't in code running on the GPU, so we define this macro to use
// in the warp-scan portion of the radix sort (see CUDPP for information
// on the warp scan algorithm.
#ifdef __DEVICE_EMULATION__
#define __SYNC __syncthreads();
#else
#define __SYNC
#endif
typedef unsigned int uint;
__global__ void emptyKernel() {}
// -----------------------------------------------------------------------------------------------
// The floatFlip and floatUnflip functions below are based on code in the web article
// "Radix Tricks" by Michael Herf (http://www.stereopsis.com/radix.html). They are used to convert
// floating point values into sortable unsigned integers (and back).
//
// Paraphrasing Michael: Binary single-precision floating point numbers have two features that
// keep them from being directly sortable. First, the sign bit is set when the value is negative,
// which means that all negative numbers are bigger than positive ones. Second, the values are
// signed-magnitude, so "more negative" floating point numbers actually look bigger to a normal
// bitwise comparison.
//
// "To fix our floating point numbers, we define the following rules:
//
// 1. Always flip the sign bit.
// 2. If the sign bit was set, flip the other bits too.
//
// To get back, we flip the sign bit always, and if the sign bit was not set, we flip the other
// bits too."
//
// This is a very inexpensive operation and it is only done on the first and last steps of the
// sort.
// -----------------------------------------------------------------------------------------------
// ================================================================================================
// Flip a float for sorting
// finds SIGN of fp number.
// if it's 1 (negative float), it flips all bits
// if it's 0 (positive float), it flips the sign only
// ================================================================================================
template <bool doFlip>
__device__ uint floatFlip(uint f)
{
if (doFlip)
{
uint mask = -int(f >> 31) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// flip a float back (invert FloatFlip)
// signed was flipped from above, so:
// if sign is 1 (negative), it flips the sign bit back
// if sign is 0 (positive), it flips all bits back
// ================================================================================================
template <bool doFlip>
__device__ uint floatUnflip(uint f)
{
if (doFlip)
{
uint mask = ((f >> 31) - 1) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// Kernel to flip all floats in an array (see floatFlip, above)
// Each thread flips four values (each 256-thread CTA flips 1024 values).
// ================================================================================================
__global__ void flipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
}
// ================================================================================================
// Kernel to unflip all floats in an array (see floatUnflip, above)
// Each thread unflips four values (each 256-thread CTA unflips 1024 values).
// ================================================================================================
__global__ void unflipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
}
//----------------------------------------------------------------------------
// Scans each warp in parallel ("warp-scan"), one element per thread.
// uses 2 numElements of shared memory per thread (64 = elements per warp)
//----------------------------------------------------------------------------
template<class T, int maxlevel>
__device__ T scanwarp(T val, volatile T* sData)
{
// The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1))
int idx = 2 * threadIdx.x - (threadIdx.x & (RadixSort::WARP_SIZE - 1));
sData[idx] = 0;
idx += RadixSort::WARP_SIZE;
T t = sData[idx] = val; __SYNC
#ifdef __DEVICE_EMULATION__
T t = sData[idx - 1]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 2]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 4]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 8]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 16]; __SYNC
sData[idx] += t; __SYNC
#else
if (0 <= maxlevel) { sData[idx] = t = t + sData[idx - 1]; } __SYNC
if (1 <= maxlevel) { sData[idx] = t = t + sData[idx - 2]; } __SYNC
if (2 <= maxlevel) { sData[idx] = t = t + sData[idx - 4]; } __SYNC
if (3 <= maxlevel) { sData[idx] = t = t + sData[idx - 8]; } __SYNC
if (4 <= maxlevel) { sData[idx] = t = t + sData[idx -16]; } __SYNC
#endif
return sData[idx] - val; // convert inclusive -> exclusive
}
//----------------------------------------------------------------------------
// scan4 scans 4*RadixSort::CTA_SIZE numElements in a block (4 per thread), using
// a warp-scan algorithm
//----------------------------------------------------------------------------
__device__ uint4 scan4(uint4 idata)
{
extern __shared__ uint ptr[];
uint idx = threadIdx.x;
uint4 val4 = idata;
uint sum[3];
sum[0] = val4.x;
sum[1] = val4.y + sum[0];
sum[2] = val4.z + sum[1];
uint val = val4.w + sum[2];
val = scanwarp<uint, 4>(val, ptr);
__syncthreads();
if ((idx & (RadixSort::WARP_SIZE - 1)) == RadixSort::WARP_SIZE - 1)
{
ptr[idx >> 5] = val + val4.w + sum[2];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (idx < RadixSort::WARP_SIZE)
#endif
{
ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr);
}
__syncthreads();
val += ptr[idx >> 5];
val4.x = val;
val4.y = val + sum[0];
val4.z = val + sum[1];
val4.w = val + sum[2];
return val4;
}
//----------------------------------------------------------------------------
//
// Rank is the core of the radix sort loop. Given a predicate, it
// computes the output position for each thread in an ordering where all
// True threads come first, followed by all False threads.
//
// This version handles 4 predicates per thread; hence, "rank4".
//
//----------------------------------------------------------------------------
template <int ctasize>
__device__ uint4 rank4(uint4 preds)
{
uint4 address = scan4(preds);
__shared__ uint numtrue;
if (threadIdx.x == ctasize-1)
{
numtrue = address.w + preds.w;
}
__syncthreads();
uint4 rank;
uint idx = threadIdx.x << 2;
rank.x = (preds.x) ? address.x : numtrue + idx - address.x;
rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y;
rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z;
rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w;
return rank;
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//
// Each thread sorts 4 elements by nbits bits
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlock(uint4 &key, uint4 &value)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<RadixSort::CTA_SIZE>(lsb);
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
__syncthreads();
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = value.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = value.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = value.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = value.w;
__syncthreads();
value.x = sMem1[threadIdx.x];
value.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
value.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
value.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts all blocks of data independently in shared
// memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
//
// The radix sort is done in two stages. This stage calls radixSortBlock on each
// block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
// differently than arrays that are not. "flip" is used to only compile in the
// float flip code when float keys are used. "loop" is used when persistent CTAs
// are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut,
uint4* keysIn, uint4* valuesIn,
uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key, value;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
uint *values1 = (uint*)valuesIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
value.x = (idx < numElements) ? values1[idx] : UINT_MAX;
value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX;
value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX;
value.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
value = valuesIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlock<nbits, startbit>(key, value);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
uint *values1 = (uint*)valuesOut;
keys1[idx] = key.x;
values1[idx] = value.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
values1[idx + 1] = value.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
values1[idx + 2] = value.z;
}
}
}
}
else
{
keysOut[i] = key;
valuesOut[i] = value;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// Given an array with blocks sorted according to a 4-bit radix group, each
// block counts the number of keys that fall into each radix in the group, and
// finds the starting offset of each radix in the block. It then writes the radix
// counts to the counters array, and the starting offsets to the blockOffsets array.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool loop>
__global__ void findRadixOffsets(uint2 *keys,
uint *counters,
uint *blockOffsets,
uint numElements,
uint totalBlocks)
{
extern __shared__ uint sRadix1[];
__shared__ uint sStartPointers[16];
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint2 radix2;
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && ((i + 1) << 1 ) > numElements )
{
// handle uint1 rather than uint2 for non-full blocks
uint *keys1 = (uint*)keys;
uint j = i << 1;
radix2.x = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
radix2.y = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
radix2 = keys[i];
}
sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(threadIdx.x < 16)
{
sStartPointers[threadIdx.x] = 0;
}
__syncthreads();
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x;
}
if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1])
{
sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE]] = threadIdx.x + RadixSort::CTA_SIZE;
}
__syncthreads();
if(threadIdx.x < 16)
{
blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x];
}
__syncthreads();
// Compute the sizes of each block.
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x - 1]] =
threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]];
}
if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1] )
{
sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]] =
threadIdx.x + RadixSort::CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]];
}
if(threadIdx.x == RadixSort::CTA_SIZE - 1)
{
sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]] =
2 * RadixSort::CTA_SIZE - sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]];
}
__syncthreads();
if(threadIdx.x < 16)
{
counters[threadIdx.x * totalBlocks + blockId] =
sStartPointers[threadIdx.x];
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. On compute version 1.1 and earlier GPUs, this code depends
// on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
//
// On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
// that all writes are coalesced using extra work in the kernel. On later
// GPUs coalescing rules have been relaxed, so this extra overhead hurts
// performance. On these GPUs we set manualCoalesce=false and directly store
// the results.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderData(uint *outKeys,
uint *outValues,
uint2 *keys,
uint2 *values,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[RadixSort::CTA_SIZE];
__shared__ uint2 sValues2[RadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint *sValues1 = (uint*)sValues2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint *values1 = (uint*)values;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
outValues[globalOffset] = sValues1[threadIdx.x];
}
radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]);
outValues[globalOffset] = sValues1[threadIdx.x + RadixSort::CTA_SIZE];
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
outValues[outOffset] = sValues1[inOffset];
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarp(uint *keys,
uint *values,
uint numElements)
{
volatile __shared__ uint sKeys[RadixSort::WARP_SIZE];
volatile __shared__ uint sValues[RadixSort::WARP_SIZE];
volatile __shared__ uint sFlags[RadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
sValues[threadIdx.x] = values[threadIdx.x];
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
uint val_i = sValues[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
uint tempval = sValues[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sValues[threadIdx.x + 1] = tempval;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
sValues[threadIdx.x] = val_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
values[threadIdx.x] = sValues[threadIdx.x];
}
//----------------------------------------------------------------------------
// Key-only Sorts
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarpKeysOnly(uint *keys,
uint numElements)
{
volatile __shared__ uint sKeys[RadixSort::WARP_SIZE];
volatile __shared__ uint sFlags[RadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlockKeysOnly(uint4 &key)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<256>(lsb);
#if 1
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
#else
sMem1[r.x] = key.x;
sMem1[r.y] = key.y;
sMem1[r.z] = key.z;
sMem1[r.w] = key.w;
__syncthreads();
// This access has 4-way bank conflicts
key = sMem[threadIdx.x];
#endif
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts all blocks of data independently in shared
// memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
//
// The radix sort is done in two stages. This stage calls radixSortBlock on each
// block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
// differently than arrays that are not. "flip" is used to only compile in the
// float flip code when float keys are used. "loop" is used when persistent CTAs
// are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlockKeysOnly<nbits, startbit>(key);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
keys1[idx] = key.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
}
}
}
}
else
{
keysOut[i] = key;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. On compute version 1.1 and earlier GPUs, this code depends
// on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
//
// On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
// that all writes are coalesced using extra work in the kernel. On later
// GPUs coalescing rules have been relaxed, so this extra overhead hurts
// performance. On these GPUs we set manualCoalesce=false and directly store
// the results.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderDataKeysOnly(uint *outKeys,
uint2 *keys,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[RadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
}
radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]);
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
extern "C" void checkCudaError(const char *msg)
{
#if defined(_DEBUG) || defined(DEBUG)
cudaError_t e = cudaThreadSynchronize();
if( e != cudaSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, cudaGetErrorString(e));
exit(EXIT_FAILURE);
}
e = cudaGetLastError();
if( e != cudaSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, cudaGetErrorString(e));
exit(EXIT_FAILURE);
}
#endif
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//
// Uses cudppScan() for the prefix sum of radix counters.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStep(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements)
{
const uint eltsPerBlock = RadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[0] : persistentCTAThreshold[0];
if (bUsePersistentCTAs && (numElements >= threshold))
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
// Run an empty kernel -- this seems to reset some of the CTA scheduling hardware
// on GT200, resulting in better scheduling and lower run times
if (startbit > 0)
{
emptyKernel<<<numCTAs(emptyKernel), RadixSort::CTA_SIZE>>>();
}
}
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip? numCTAs(radixSortBlocks<4, 0, true, true, true>) : numCTAs(radixSortBlocks<4, 0, true, false, true>);
}
radixSortBlocks<nbits, startbit, true, flip, true>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
radixSortBlocks<nbits, startbit, true, flip, false>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocks<4, 0, false, true, true>) : numCTAs(radixSortBlocks<4, 0, false, false, true>);
}
radixSortBlocks<nbits, startbit, false, flip, true>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
radixSortBlocks<nbits, startbit, false, flip, false>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
checkCudaError("radixSortBlocks");
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, true, true>);
}
findRadixOffsets<startbit, true, true>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
findRadixOffsets<startbit, true, false>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, false, true>);
}
findRadixOffsets<startbit, false, true>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
findRadixOffsets<startbit, false, false>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
}
checkCudaError("findRadixOffsets");
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
if (fullBlocks)
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ? numCTAs(reorderData<0, true, true, true, true>) :
numCTAs(reorderData<0, true, true, false, true>);
}
reorderData<startbit, true, true, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, true, true, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ? numCTAs(reorderData<0, true, false, true, true>) :
numCTAs(reorderData<0, true, false, false, true>);
}
reorderData<startbit, true, false, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, true, false, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
}
else
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderData<0, false, true, true, true>) :
numCTAs(reorderData<0, false, true, false, true>);
}
reorderData<startbit, false, true, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, false, true, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderData<0, false, false, true, true>) :
numCTAs(reorderData<0, false, false, false, true>);
}
reorderData<startbit, false, false, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, false, false, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
}
checkCudaError("radixSortStep");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlock(uint *keys,
uint *values,
uint numElements)
{
bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
radixSortBlocks<32, 0, true, flip, false>
<<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 1 );
}
else
{
radixSortBlocks<32, 0, false, flip, false>
<<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 1 );
}
if (flip)
unflipFloats<<<1, RadixSort::CTA_SIZE>>>(keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Main radix sort function. Sorts in place in the keys and values arrays,
// but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers.
//----------------------------------------------------------------------------
extern "C" void radixSort(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool flipBits = false)
{
if(numElements <= RadixSort::WARP_SIZE)
{
if (flipBits)
radixSortSingleWarp<true><<<1, numElements>>>(keys, values, numElements);
else
radixSortSingleWarp<false><<<1, numElements>>>(keys, values, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= RadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlock<true>(keys, values, numElements);
else
radixSortSingleBlock<false>(keys, values, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStep<4, 0, true, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{ radixSortStep<4, 0, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 4)
{
radixSortStep<4, 4, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 8)
{
radixSortStep<4, 8, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 12)
{
radixSortStep<4, 12, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 16)
{
radixSortStep<4, 16, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 20)
{
radixSortStep<4, 20, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 24)
{
radixSortStep<4, 24, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStep<4, 28, false, true>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStep<4, 28, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
}
checkCudaError("radixSort");
}
extern "C" void radixSortFloatKeys(float *keys,
uint *values,
float *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool negativeKeys)
{
radixSort((uint*)keys, values, (uint*)tempKeys, tempValues, counters,
countersSum, blockOffsets, scanPlan, numElements, keyBits,
negativeKeys);
checkCudaError("radixSortFloatKeys");
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStepKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements)
{
const uint eltsPerBlock = RadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
//bool loop2 = numBlocks2 > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[1] : persistentCTAThreshold[1];
if (bUsePersistentCTAs && (numElements >= threshold))
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
}
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>) :
numCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>);
}
radixSortBlocksKeysOnly<nbits, startbit, true, flip, true>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
radixSortBlocksKeysOnly<nbits, startbit, true, flip, false>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>) :
numCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>);
}
radixSortBlocksKeysOnly<nbits, startbit, false, flip, true>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
radixSortBlocksKeysOnly<nbits, startbit, false, flip, false>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, true, true>);
}
findRadixOffsets<startbit, true, true>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
findRadixOffsets<startbit, true, false>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, false, true>);
}
findRadixOffsets<startbit, false, true>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
findRadixOffsets<startbit, false, false>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
if (fullBlocks)
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, true, true, true, true>) :
numCTAs(reorderDataKeysOnly<0, true, true, false, true>);
}
reorderDataKeysOnly<startbit, true, true, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, true, true, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, true, false, true, true>) :
numCTAs(reorderDataKeysOnly<0, true, false, false, true>);
}
reorderDataKeysOnly<startbit, true, false, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, true, false, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
}
else
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, false, true, true, true>) :
numCTAs(reorderDataKeysOnly<0, false, true, false, true>);
}
reorderDataKeysOnly<startbit, false, true, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, false, true, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, false, false, true, true>) :
numCTAs(reorderDataKeysOnly<0, false, false, false, true>);
}
reorderDataKeysOnly<startbit, false, false, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, false, false, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
}
checkCudaError("radixSortStepKeysOnly");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlockKeysOnly(uint *keys,
uint numElements)
{
bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
radixSortBlocksKeysOnly<32, 0, true, flip, false>
<<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)keys, numElements, 1 );
}
else
{
radixSortBlocksKeysOnly<32, 0, false, flip, false>
<<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)keys, numElements, 1 );
}
if (flip)
unflipFloats<<<1, RadixSort::CTA_SIZE>>>(keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Main key-only radix sort function. Sorts in place in the keys and values
// arrays, but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
extern "C" void radixSortKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool flipBits = false)
{
if(numElements <= RadixSort::WARP_SIZE)
{
if (flipBits)
radixSortSingleWarpKeysOnly<true><<<1, numElements>>>(keys, numElements);
else
radixSortSingleWarpKeysOnly<false><<<1, numElements>>>(keys, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= RadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlockKeysOnly<true>(keys, numElements);
else
radixSortSingleBlockKeysOnly<false>(keys, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStepKeysOnly<4, 0, true, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStepKeysOnly<4, 0, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 4)
{
radixSortStepKeysOnly<4, 4, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 8)
{
radixSortStepKeysOnly<4, 8, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 12)
{
radixSortStepKeysOnly<4, 12, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 16)
{
radixSortStepKeysOnly<4, 16, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 20)
{
radixSortStepKeysOnly<4, 20, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 24)
{
radixSortStepKeysOnly<4, 24, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStepKeysOnly<4, 28, false, true>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStepKeysOnly<4, 28, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
}
checkCudaError("radixSortKeysOnly");
}
//----------------------------------------------------------------------------
// Main float key-only radix sort function. Sorts in place in the keys and values
// arrays, but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
extern "C" void radixSortFloatKeysOnly(float *keys,
float *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool negativeKeys)
{
radixSortKeysOnly((uint*)keys, (uint*)tempKeys, counters, countersSum, blockOffsets,
scanPlan, numElements, keyBits, negativeKeys);
checkCudaError("radixSortFloatKeys");
}
extern "C" void initDeviceParameters(bool keysOnly)
{
int deviceID = -1;
if (cudaSuccess == cudaGetDevice(&deviceID))
{
cudaDeviceProp devprop;
cudaGetDeviceProperties(&devprop, deviceID);
int smVersion = devprop.major * 10 + devprop.minor;
// sm_12 and later devices don't need help with coalesce in reorderData kernel
bManualCoalesce = (smVersion < 12);
bUsePersistentCTAs = (smVersion < 20);
if (bUsePersistentCTAs)
{
// Empirically we have found on pre-Fermi GPUs that for some (usually larger) sort
// sizes it is better to use exactly as many "persistent" CTAs
// as can fill the GPU, which loop over the "blocks" of work. For smaller
// arrays it is better to use the typical CUDA approach of launching one CTA
// per block of work.
// 0-element of these two-element arrays is for key-value sorts
// 1-element is for key-only sorts
persistentCTAThreshold[0] = bManualCoalesce ? 16777216 : 524288;
persistentCTAThresholdFullBlocks[0] = bManualCoalesce ? 2097152: 524288;
persistentCTAThreshold[1] = bManualCoalesce ? 16777216 : 8388608;
persistentCTAThresholdFullBlocks[1] = bManualCoalesce ? 2097152: 0;
// create a map of function pointers to register counts for more accurate occupancy calculation
// Must pass in the dynamic shared memory used by each kernel, since the runtime doesn't know it
// Note we only insert the "loop" version of the kernels (the one with the last template param = true)
// Because those are the only ones that require persistent CTAs that maximally fill the device.
computeNumCTAs(radixSortBlocks<4, 0, false, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, false, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, true, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, true, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(findRadixOffsets<0, false, true>, 2 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(findRadixOffsets<0, true, true>, 2 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(reorderData<0, false, false, false, true>, 0);
computeNumCTAs(reorderData<0, false, false, true, true>, 0);
computeNumCTAs(reorderData<0, false, true, false, true>, 0);
computeNumCTAs(reorderData<0, false, true, true, true>, 0);
computeNumCTAs(reorderData<0, true, false, false, true>, 0);
computeNumCTAs(reorderData<0, true, false, true, true>, 0);
computeNumCTAs(reorderData<0, true, true, false, true>, 0);
computeNumCTAs(reorderData<0, true, true, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, false, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, false, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, true, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, true, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, false, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, false, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, true, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, true, true, true>, 0);
computeNumCTAs(emptyKernel, 0);
}
}
}
} // namespace nvRadixSort
|
ba03f8c6dc80b415243ef29ebb011c9a07c6f436.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int_tp i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int_tp g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups CUDA_KERNEL(1, 1)();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int_tp i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int_tp g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups CUDA_KERNEL(1, 1)();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| ba03f8c6dc80b415243ef29ebb011c9a07c6f436.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int_tp i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int_tp g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups CUDA_KERNEL(1, 1)();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int_tp i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int_tp g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups CUDA_KERNEL(1, 1)();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
825954c246062273441d5dc2c497f60b50043d47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "narrowphase_implement.h"
#include "bvh_math.cuh"
#include <gjk_math.cu>
#include <stripedModel.cu>
#include <CudaBase.h>
#define GJK_BLOCK_SIZE 64
inline __device__ float totalSpeed(float3 * vel, const uint4 & ia, const uint4 & ib)
{
return (float3_length2(vel[ia.x]) + float3_length2(vel[ia.y]) +
float3_length2(vel[ia.z]) + float3_length2(vel[ia.w]) +
float3_length2(vel[ib.x]) + float3_length2(vel[ib.y]) +
float3_length2(vel[ib.z]) + float3_length2(vel[ib.w]));
}
inline __device__ void t0Tetrahedron(TetrahedronProxy & prx,
const uint4 & v,
float3 * pos)
{
prx.p[0] = pos[v.x];
prx.p[1] = pos[v.y];
prx.p[2] = pos[v.z];
prx.p[3] = pos[v.w];
}
inline __device__ void progressTetrahedron(TetrahedronProxy & prx,
const uint4 & v,
float3 * pos,
float3 * vel,
float h)
{
prx.p[0] = float3_add(pos[v.x], scale_float3_by(vel[v.x], h));
prx.p[1] = float3_add(pos[v.y], scale_float3_by(vel[v.y], h));
prx.p[2] = float3_add(pos[v.z], scale_float3_by(vel[v.z], h));
prx.p[3] = float3_add(pos[v.w], scale_float3_by(vel[v.w], h));
}
inline __device__ float velocityOnTetrahedronAlong(float3 * v, const uint4 & t, const BarycentricCoordinate & coord, const float3 & d)
{
float3 vot = make_float3(0.f, 0.f, 0.f);
if(coord.x > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[t.x], coord.x));
if(coord.y > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[t.y], coord.y));
if(coord.z > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[t.z], coord.z));
if(coord.w > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[t.w], coord.w));
return float3_dot(vot, d);
}
inline __device__ float velocityOnTetrahedronAlong2(float3 * v, const BarycentricCoordinate & coord, const float3 & d)
{
float3 vot = make_float3(0.f, 0.f, 0.f);
if(coord.x > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[0], coord.x));
if(coord.y > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[2], coord.y));
if(coord.z > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[4], coord.z));
if(coord.w > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[6], coord.w));
return float3_dot(vot, d);
}
__global__ void writePairInd_kernel(uint2 * dstPair,
uint2 * srcPair,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
dstPair[ind] = srcPair[ind];
}
__global__ void writePairPosAndVel_kernel(float3 * dstPos,
float3 * dstVel,
uint2 * pairs,
float3 * srcPos,
float3 * srcVel,
uint4 * indices,
uint * pointStart, uint * indexStart,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
// a00 b00 a01 b01 a02 b02 a03 b03 a10 b10 a11 b11 ...
// 8 pos/vel per pair 4 for a 4 for b
const uint iPair = ind>>3;
// 512 threads 256 va 256 vb
const int isB = threadIdx.x & 1;
uint4 ia;
if(isB) ia = computePointIndex(pointStart, indexStart, indices, pairs[iPair].y);
else ia = computePointIndex(pointStart, indexStart, indices, pairs[iPair].x);
uint * tetVertices = &ia.x;
// 512 threads 64 ta 64 tb
const int iVert = (threadIdx.x >> 1) & 3;
dstPos[ind] = srcPos[tetVertices[iVert]];
dstVel[ind] = srcVel[tetVertices[iVert]];
}
__global__ void computeSeparateAxis_kernel(ContactData * dstContact,
uint2 * pairs,
float3 * pos, float3 * vel,
uint4* indices,
uint * pointStart, uint * indexStart,
uint maxInd)
{
__shared__ Simplex sS[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxA[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxB[GJK_BLOCK_SIZE];
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
const uint4 ita = computePointIndex(pointStart, indexStart, indices, pairs[ind].x);
const uint4 itb = computePointIndex(pointStart, indexStart, indices, pairs[ind].y);
progressTetrahedron(sPrxA[threadIdx.x], ita, pos, vel, 0.01667f);
progressTetrahedron(sPrxB[threadIdx.x], itb, pos, vel, 0.01667f);
ClosestPointTestContext ctc;
BarycentricCoordinate coord;
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, dstContact[ind].separateAxis,
coord);
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
}
__global__ void computeTimeOfImpact_kernel(ContactData * dstContact,
uint2 * pairs,
float3 * pos, float3 * vel,
uint4 * indices,
uint * pointStart, uint * indexStart,
uint maxInd)
{
__shared__ Simplex sS[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxA[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxB[GJK_BLOCK_SIZE];
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
dstContact[ind].separateAxis=make_float4(0.f, 0.f, 0.f, 0.f);
dstContact[ind].timeOfImpact = 1e8f;
const uint4 ita = computePointIndex(pointStart, indexStart, indices, pairs[ind].x);
const uint4 itb = computePointIndex(pointStart, indexStart, indices, pairs[ind].y);
if(totalSpeed(vel, ita, itb) < 1e-8f) return;
t0Tetrahedron(sPrxA[threadIdx.x], ita, pos);
t0Tetrahedron(sPrxB[threadIdx.x], itb, pos);
ClosestPointTestContext ctc;
BarycentricCoordinate coord;
float4 sas;
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, sas,
coord);
// intersected try zero margin
if(sas.w < 1.f) {
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], 0.f, ctc, sas,
coord);
}
// still intersected no solution
if(sas.w < 1.f) return;
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
float3 nor = float3_normalize(float3_from_float4(sas));
float closeInSpeed = velocityOnTetrahedronAlong(vel, itb, getBarycentricCoordinate4Relativei(dstContact[ind].localB, pos, itb),
nor)
- velocityOnTetrahedronAlong(vel, ita, getBarycentricCoordinate4Relativei(dstContact[ind].localA, pos, ita),
nor);
// going apart no contact
if(closeInSpeed < 1e-8f) {
return;
}
float separateDistance = float4_length(sas);
// within thin shell margin
if(separateDistance < GJK_THIN_MARGIN2) {
dstContact[ind].timeOfImpact = 1e-9f;
dstContact[ind].separateAxis = sas;
return;
}
// use thin shell margin
separateDistance -= GJK_THIN_MARGIN2;
dstContact[ind].separateAxis = sas;
float lastDistance = separateDistance;
float toi = 0.f;
int i = 0;
while (i<GJK_MAX_NUM_ITERATIONS) {
// going apart
if(closeInSpeed < 1e-8f) {
dstContact[ind].timeOfImpact = 1e8f;
// for debug purpose
// dstContact[ind].separateAxis = sas;
// interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
break;
}
toi += separateDistance / closeInSpeed * .743f;
// too far away
if(toi > GJK_STEPSIZE) {
dstContact[ind].timeOfImpact = toi;
// for debug purpose
// dstContact[ind].separateAxis = sas;
// interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
break;
}
progressTetrahedron(sPrxA[threadIdx.x], ita, pos, vel, toi);
progressTetrahedron(sPrxB[threadIdx.x], itb, pos, vel, toi);
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, sas,
coord);
// penetrated use result of last step
if(sas.w < 1.f) {
break;
}
// output toi and r
dstContact[ind].timeOfImpact = toi;
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
separateDistance = float4_length(sas);
// close enough use result of last step
if(separateDistance < 0.001f) {
break;
}
// going apart, no contact
if(separateDistance >= lastDistance) {
dstContact[ind].timeOfImpact = 1e8f;
break;
}
lastDistance = separateDistance;
// output sa
dstContact[ind].separateAxis = sas;
nor = float3_normalize(float3_from_float4(sas));
closeInSpeed = velocityOnTetrahedronAlong(vel, itb, getBarycentricCoordinate4Relativei(dstContact[ind].localB, pos, itb),
nor)
- velocityOnTetrahedronAlong(vel, ita, getBarycentricCoordinate4Relativei(dstContact[ind].localA, pos, ita),
nor);
i++;
}
}
__global__ void advanceTimeOfImpactIterative_kernel(ContactData * dstContact,
float3 * pos, float3 * vel,
uint maxInd)
{
__shared__ Simplex sS[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxA[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxB[GJK_BLOCK_SIZE];
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
const ContactData ct = dstContact[ind];
// already determined no contact
if(ct.separateAxis.w < 1.f || ct.timeOfImpact > GJK_STEPSIZE)
return;
float3 * ppos = & pos[ind<<3];
float3 * pvel = & vel[ind<<3];
float4 sas = ct.separateAxis;
const float3 nor = float3_normalize(float3_from_float4(sas));
float closeInSpeed = velocityOnTetrahedronAlong2(&pvel[1], getBarycentricCoordinate4Relative2(ct.localB, &ppos[1]),
nor)
- velocityOnTetrahedronAlong2(pvel, getBarycentricCoordinate4Relative2(ct.localA, ppos),
nor);
// going apart
if(closeInSpeed < 1e-8f) {
dstContact[ind].timeOfImpact = 1e8f;
return;
}
float separateDistance = float4_length(ct.separateAxis);
// within thin shell margin
if(separateDistance <= GJK_THIN_MARGIN2)
return;
// use thin shell margin
separateDistance -= GJK_THIN_MARGIN2;
const float toi = ct.timeOfImpact + separateDistance / closeInSpeed * .571f;
// too far away
if(toi > GJK_STEPSIZE) {
dstContact[ind].timeOfImpact = 1e8f;
return;
}
sPrxA[threadIdx.x].p[0] = float3_add( ppos[0], scale_float3_by(pvel[0], toi) );
sPrxB[threadIdx.x].p[0] = float3_add( ppos[1], scale_float3_by(pvel[1], toi) );
sPrxA[threadIdx.x].p[1] = float3_add( ppos[2], scale_float3_by(pvel[2], toi) );
sPrxB[threadIdx.x].p[1] = float3_add( ppos[3], scale_float3_by(pvel[3], toi) );
sPrxA[threadIdx.x].p[2] = float3_add( ppos[4], scale_float3_by(pvel[4], toi) );
sPrxB[threadIdx.x].p[2] = float3_add( ppos[5], scale_float3_by(pvel[5], toi) );
sPrxA[threadIdx.x].p[3] = float3_add( ppos[6], scale_float3_by(pvel[6], toi) );
sPrxB[threadIdx.x].p[3] = float3_add( ppos[7], scale_float3_by(pvel[7], toi) );
ClosestPointTestContext ctc;
BarycentricCoordinate coord;
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, sas,
coord);
// penetrated use result of last step
if(sas.w < 1.f) return;
// output
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
dstContact[ind].separateAxis = sas;
dstContact[ind].timeOfImpact = toi;
}
__global__ void computeInitialSeparation_kernel(ContactData * dstContact,
float3 * pos,
uint maxInd)
{
__shared__ Simplex sS[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxA[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxB[GJK_BLOCK_SIZE];
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
dstContact[ind].separateAxis=make_float4(0.f, 0.f, 0.f, 0.f);
dstContact[ind].timeOfImpact = 1e8f;
float3 * ppos = & pos[ind<<3];
sPrxA[threadIdx.x].p[0] = ppos[0];
sPrxB[threadIdx.x].p[0] = ppos[1];
sPrxA[threadIdx.x].p[1] = ppos[2];
sPrxB[threadIdx.x].p[1] = ppos[3];
sPrxA[threadIdx.x].p[2] = ppos[4];
sPrxB[threadIdx.x].p[2] = ppos[5];
sPrxA[threadIdx.x].p[3] = ppos[6];
sPrxB[threadIdx.x].p[3] = ppos[7];
ClosestPointTestContext ctc;
BarycentricCoordinate coord;
float4 sas;
// computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, sas,
// coord);
// intersected try zero margin
// if(sas.w < 1.f) {
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], 0.f, ctc,
sas,
coord);
// }
// still intersected no solution
if(sas.w < 1.f) return;
// output
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
dstContact[ind].separateAxis = sas;
dstContact[ind].timeOfImpact = 1e-9f;
}
__device__ int isValidPair(float toi, const float4 & sa)
{
if(toi >= GJK_STEPSIZE) return 0;
if(float3_length2(sa) < 1e-12f) return 0;
return 1;
}
__global__ void computeValidPairs_kernel(uint* dstCounts,
ContactData * srcContact,
uint numContacts,
uint scanBufferLength)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= scanBufferLength) return;
if(ind >= numContacts) {
dstCounts[ind] = 0;
return;
}
const ContactData cd = srcContact[ind];
dstCounts[ind] = isValidPair(cd.timeOfImpact, cd.separateAxis);
}
__global__ void squeezeContactPairs_kernel(uint2 * dstPairs, uint2 * srcPairs,
ContactData * dstContact, ContactData *srcContact,
uint * counts, uint * packLocs,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
if(!counts[ind]) return;
const uint toLoc = packLocs[ind];
dstPairs[toLoc] = srcPairs[ind];
dstContact[toLoc] = srcContact[ind];
}
__global__ void squeezeContactPosAndVel_kernel(float3 * dstPos,
float3 * srcPos,
float3 * dstVel,
float3 * srcVel,
uint2 * dstPairs,
uint2 * srcPairs,
ContactData * dstContact,
ContactData *srcContact,
uint *counts, uint * packLocs,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
const uint iPair = ind>>3;
if(!counts[iPair]) return;
uint toLoc = (packLocs[iPair]<<3) + (ind & 7);
dstPos[toLoc] = srcPos[ind];
dstVel[toLoc] = srcVel[ind];
if(ind & 7) return;
toLoc = packLocs[iPair];
dstPairs[toLoc] = srcPairs[iPair];
dstContact[toLoc] = srcContact[iPair];
}
__global__ void resetX_kernel(float3 * dst, float3 * src,
float3 * vel,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
ind <<= 3;
int i;
for(i=0; i < 8; i++) {
if(ind>= maxInd) return;
dst[ind] = src[ind];
vel[ind] = make_float3(0.f, 0.f, 0.f);
ind++;
}
}
extern "C" {
void narrowphaseComputeSeparateAxis(ContactData * dstContact,
uint2 * pairs,
float3 * pos,
float3 * vel,
uint4 * ind,
uint * pointStart, uint * indexStart,
uint numOverlappingPairs)
{
dim3 block(GJK_BLOCK_SIZE, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, GJK_BLOCK_SIZE);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( computeSeparateAxis_kernel), dim3(grid), dim3(block) , 0, 0, dstContact, pairs, pos, vel, ind, pointStart, indexStart, numOverlappingPairs);
}
void narrowphaseComputeTimeOfImpact(ContactData * dstContact,
uint2 * pairs,
float3 * pos,
float3 * vel,
uint4 * ind,
uint * pointStart, uint * indexStart,
uint numOverlappingPairs)
{
dim3 block(GJK_BLOCK_SIZE, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, GJK_BLOCK_SIZE);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( computeTimeOfImpact_kernel), dim3(grid), dim3(block) , 0, 0, dstContact, pairs, pos, vel, ind, pointStart, indexStart, numOverlappingPairs);
}
void narrowphase_computeInitialSeparation(ContactData * dstContact,
float3 * pos,
uint numOverlappingPairs)
{
dim3 block(GJK_BLOCK_SIZE, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, GJK_BLOCK_SIZE);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( computeInitialSeparation_kernel), dim3(grid), dim3(block) , 0, 0, dstContact,
pos,
numOverlappingPairs);
}
void narrowphase_advanceTimeOfImpactIterative(ContactData * dstContact,
float3 * pos,
float3 * vel,
uint numOverlappingPairs)
{
dim3 block(GJK_BLOCK_SIZE, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, GJK_BLOCK_SIZE);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( advanceTimeOfImpactIterative_kernel), dim3(grid), dim3(block) , 0, 0, dstContact,
pos,
vel,
numOverlappingPairs);
}
void narrowphaseComputeValidPairs(uint * dstCounts,
ContactData * srcContact,
uint numContacts,
uint scanBufferLength)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(scanBufferLength, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( computeValidPairs_kernel), dim3(grid), dim3(block) , 0, 0, dstCounts, srcContact, numContacts, scanBufferLength);
}
void narrowphaseSqueezeContactPairs(uint2 * dstPairs, uint2 * srcPairs,
ContactData * dstContact, ContactData *srcContact,
uint * counts, uint * packLocs,
uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( squeezeContactPairs_kernel), dim3(grid), dim3(block) , 0, 0, dstPairs, srcPairs,
dstContact, srcContact,
counts, packLocs,
maxInd);
}
void narrowphaseResetX(float3 * dst, float3 *src,
float3 * vel,
uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd>>3, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( resetX_kernel), dim3(grid), dim3(block) , 0, 0, dst, src, vel, maxInd);
}
void narrowphase_writePairPosAndVel(float3 * dstPos,
float3 * dstVel,
uint2 * pairs,
float3 * pos,
float3 * vel,
uint4 * ind,
uint * pointStart, uint * indexStart,
uint numOverlappingPairs)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs<<3, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( writePairPosAndVel_kernel), dim3(grid), dim3(block) , 0, 0, dstPos,
dstVel,
pairs,
pos,
vel,
ind,
pointStart, indexStart,
numOverlappingPairs<<3);
}
void narrowphase_writePairs(uint2 * dstPair,
uint2 * srcPair,
uint numOverlappingPairs)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( writePairInd_kernel), dim3(grid), dim3(block) , 0, 0, dstPair,
srcPair,
numOverlappingPairs);
}
void narrowphase_squeezeContactPosAndVel(float3 * dstPos,
float3 * srcPos,
float3 * dstVel,
float3 * srcVel,
uint2 * dstPairs,
uint2 * srcPairs,
ContactData * dstContact,
ContactData *srcContact,
uint *counts, uint * scanResult,
uint numPairs)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(numPairs<<3, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( squeezeContactPosAndVel_kernel), dim3(grid), dim3(block) , 0, 0, dstPos,
srcPos,
dstVel,
srcVel,
dstPairs,
srcPairs,
dstContact,
srcContact,
counts, scanResult,
numPairs<<3);
}
}
| 825954c246062273441d5dc2c497f60b50043d47.cu | #include "narrowphase_implement.h"
#include "bvh_math.cuh"
#include <gjk_math.cu>
#include <stripedModel.cu>
#include <CudaBase.h>
#define GJK_BLOCK_SIZE 64
inline __device__ float totalSpeed(float3 * vel, const uint4 & ia, const uint4 & ib)
{
return (float3_length2(vel[ia.x]) + float3_length2(vel[ia.y]) +
float3_length2(vel[ia.z]) + float3_length2(vel[ia.w]) +
float3_length2(vel[ib.x]) + float3_length2(vel[ib.y]) +
float3_length2(vel[ib.z]) + float3_length2(vel[ib.w]));
}
inline __device__ void t0Tetrahedron(TetrahedronProxy & prx,
const uint4 & v,
float3 * pos)
{
prx.p[0] = pos[v.x];
prx.p[1] = pos[v.y];
prx.p[2] = pos[v.z];
prx.p[3] = pos[v.w];
}
inline __device__ void progressTetrahedron(TetrahedronProxy & prx,
const uint4 & v,
float3 * pos,
float3 * vel,
float h)
{
prx.p[0] = float3_add(pos[v.x], scale_float3_by(vel[v.x], h));
prx.p[1] = float3_add(pos[v.y], scale_float3_by(vel[v.y], h));
prx.p[2] = float3_add(pos[v.z], scale_float3_by(vel[v.z], h));
prx.p[3] = float3_add(pos[v.w], scale_float3_by(vel[v.w], h));
}
inline __device__ float velocityOnTetrahedronAlong(float3 * v, const uint4 & t, const BarycentricCoordinate & coord, const float3 & d)
{
float3 vot = make_float3(0.f, 0.f, 0.f);
if(coord.x > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[t.x], coord.x));
if(coord.y > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[t.y], coord.y));
if(coord.z > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[t.z], coord.z));
if(coord.w > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[t.w], coord.w));
return float3_dot(vot, d);
}
inline __device__ float velocityOnTetrahedronAlong2(float3 * v, const BarycentricCoordinate & coord, const float3 & d)
{
float3 vot = make_float3(0.f, 0.f, 0.f);
if(coord.x > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[0], coord.x));
if(coord.y > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[2], coord.y));
if(coord.z > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[4], coord.z));
if(coord.w > 1e-5f)
vot = float3_add(vot, scale_float3_by(v[6], coord.w));
return float3_dot(vot, d);
}
__global__ void writePairInd_kernel(uint2 * dstPair,
uint2 * srcPair,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
dstPair[ind] = srcPair[ind];
}
__global__ void writePairPosAndVel_kernel(float3 * dstPos,
float3 * dstVel,
uint2 * pairs,
float3 * srcPos,
float3 * srcVel,
uint4 * indices,
uint * pointStart, uint * indexStart,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
// a00 b00 a01 b01 a02 b02 a03 b03 a10 b10 a11 b11 ...
// 8 pos/vel per pair 4 for a 4 for b
const uint iPair = ind>>3;
// 512 threads 256 va 256 vb
const int isB = threadIdx.x & 1;
uint4 ia;
if(isB) ia = computePointIndex(pointStart, indexStart, indices, pairs[iPair].y);
else ia = computePointIndex(pointStart, indexStart, indices, pairs[iPair].x);
uint * tetVertices = &ia.x;
// 512 threads 64 ta 64 tb
const int iVert = (threadIdx.x >> 1) & 3;
dstPos[ind] = srcPos[tetVertices[iVert]];
dstVel[ind] = srcVel[tetVertices[iVert]];
}
__global__ void computeSeparateAxis_kernel(ContactData * dstContact,
uint2 * pairs,
float3 * pos, float3 * vel,
uint4* indices,
uint * pointStart, uint * indexStart,
uint maxInd)
{
__shared__ Simplex sS[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxA[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxB[GJK_BLOCK_SIZE];
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
const uint4 ita = computePointIndex(pointStart, indexStart, indices, pairs[ind].x);
const uint4 itb = computePointIndex(pointStart, indexStart, indices, pairs[ind].y);
progressTetrahedron(sPrxA[threadIdx.x], ita, pos, vel, 0.01667f);
progressTetrahedron(sPrxB[threadIdx.x], itb, pos, vel, 0.01667f);
ClosestPointTestContext ctc;
BarycentricCoordinate coord;
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, dstContact[ind].separateAxis,
coord);
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
}
__global__ void computeTimeOfImpact_kernel(ContactData * dstContact,
uint2 * pairs,
float3 * pos, float3 * vel,
uint4 * indices,
uint * pointStart, uint * indexStart,
uint maxInd)
{
__shared__ Simplex sS[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxA[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxB[GJK_BLOCK_SIZE];
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
dstContact[ind].separateAxis=make_float4(0.f, 0.f, 0.f, 0.f);
dstContact[ind].timeOfImpact = 1e8f;
const uint4 ita = computePointIndex(pointStart, indexStart, indices, pairs[ind].x);
const uint4 itb = computePointIndex(pointStart, indexStart, indices, pairs[ind].y);
if(totalSpeed(vel, ita, itb) < 1e-8f) return;
t0Tetrahedron(sPrxA[threadIdx.x], ita, pos);
t0Tetrahedron(sPrxB[threadIdx.x], itb, pos);
ClosestPointTestContext ctc;
BarycentricCoordinate coord;
float4 sas;
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, sas,
coord);
// intersected try zero margin
if(sas.w < 1.f) {
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], 0.f, ctc, sas,
coord);
}
// still intersected no solution
if(sas.w < 1.f) return;
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
float3 nor = float3_normalize(float3_from_float4(sas));
float closeInSpeed = velocityOnTetrahedronAlong(vel, itb, getBarycentricCoordinate4Relativei(dstContact[ind].localB, pos, itb),
nor)
- velocityOnTetrahedronAlong(vel, ita, getBarycentricCoordinate4Relativei(dstContact[ind].localA, pos, ita),
nor);
// going apart no contact
if(closeInSpeed < 1e-8f) {
return;
}
float separateDistance = float4_length(sas);
// within thin shell margin
if(separateDistance < GJK_THIN_MARGIN2) {
dstContact[ind].timeOfImpact = 1e-9f;
dstContact[ind].separateAxis = sas;
return;
}
// use thin shell margin
separateDistance -= GJK_THIN_MARGIN2;
dstContact[ind].separateAxis = sas;
float lastDistance = separateDistance;
float toi = 0.f;
int i = 0;
while (i<GJK_MAX_NUM_ITERATIONS) {
// going apart
if(closeInSpeed < 1e-8f) {
dstContact[ind].timeOfImpact = 1e8f;
// for debug purpose
// dstContact[ind].separateAxis = sas;
// interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
break;
}
toi += separateDistance / closeInSpeed * .743f;
// too far away
if(toi > GJK_STEPSIZE) {
dstContact[ind].timeOfImpact = toi;
// for debug purpose
// dstContact[ind].separateAxis = sas;
// interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
break;
}
progressTetrahedron(sPrxA[threadIdx.x], ita, pos, vel, toi);
progressTetrahedron(sPrxB[threadIdx.x], itb, pos, vel, toi);
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, sas,
coord);
// penetrated use result of last step
if(sas.w < 1.f) {
break;
}
// output toi and r
dstContact[ind].timeOfImpact = toi;
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
separateDistance = float4_length(sas);
// close enough use result of last step
if(separateDistance < 0.001f) {
break;
}
// going apart, no contact
if(separateDistance >= lastDistance) {
dstContact[ind].timeOfImpact = 1e8f;
break;
}
lastDistance = separateDistance;
// output sa
dstContact[ind].separateAxis = sas;
nor = float3_normalize(float3_from_float4(sas));
closeInSpeed = velocityOnTetrahedronAlong(vel, itb, getBarycentricCoordinate4Relativei(dstContact[ind].localB, pos, itb),
nor)
- velocityOnTetrahedronAlong(vel, ita, getBarycentricCoordinate4Relativei(dstContact[ind].localA, pos, ita),
nor);
i++;
}
}
__global__ void advanceTimeOfImpactIterative_kernel(ContactData * dstContact,
float3 * pos, float3 * vel,
uint maxInd)
{
__shared__ Simplex sS[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxA[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxB[GJK_BLOCK_SIZE];
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
const ContactData ct = dstContact[ind];
// already determined no contact
if(ct.separateAxis.w < 1.f || ct.timeOfImpact > GJK_STEPSIZE)
return;
float3 * ppos = & pos[ind<<3];
float3 * pvel = & vel[ind<<3];
float4 sas = ct.separateAxis;
const float3 nor = float3_normalize(float3_from_float4(sas));
float closeInSpeed = velocityOnTetrahedronAlong2(&pvel[1], getBarycentricCoordinate4Relative2(ct.localB, &ppos[1]),
nor)
- velocityOnTetrahedronAlong2(pvel, getBarycentricCoordinate4Relative2(ct.localA, ppos),
nor);
// going apart
if(closeInSpeed < 1e-8f) {
dstContact[ind].timeOfImpact = 1e8f;
return;
}
float separateDistance = float4_length(ct.separateAxis);
// within thin shell margin
if(separateDistance <= GJK_THIN_MARGIN2)
return;
// use thin shell margin
separateDistance -= GJK_THIN_MARGIN2;
const float toi = ct.timeOfImpact + separateDistance / closeInSpeed * .571f;
// too far away
if(toi > GJK_STEPSIZE) {
dstContact[ind].timeOfImpact = 1e8f;
return;
}
sPrxA[threadIdx.x].p[0] = float3_add( ppos[0], scale_float3_by(pvel[0], toi) );
sPrxB[threadIdx.x].p[0] = float3_add( ppos[1], scale_float3_by(pvel[1], toi) );
sPrxA[threadIdx.x].p[1] = float3_add( ppos[2], scale_float3_by(pvel[2], toi) );
sPrxB[threadIdx.x].p[1] = float3_add( ppos[3], scale_float3_by(pvel[3], toi) );
sPrxA[threadIdx.x].p[2] = float3_add( ppos[4], scale_float3_by(pvel[4], toi) );
sPrxB[threadIdx.x].p[2] = float3_add( ppos[5], scale_float3_by(pvel[5], toi) );
sPrxA[threadIdx.x].p[3] = float3_add( ppos[6], scale_float3_by(pvel[6], toi) );
sPrxB[threadIdx.x].p[3] = float3_add( ppos[7], scale_float3_by(pvel[7], toi) );
ClosestPointTestContext ctc;
BarycentricCoordinate coord;
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, sas,
coord);
// penetrated use result of last step
if(sas.w < 1.f) return;
// output
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
dstContact[ind].separateAxis = sas;
dstContact[ind].timeOfImpact = toi;
}
__global__ void computeInitialSeparation_kernel(ContactData * dstContact,
float3 * pos,
uint maxInd)
{
__shared__ Simplex sS[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxA[GJK_BLOCK_SIZE];
__shared__ TetrahedronProxy sPrxB[GJK_BLOCK_SIZE];
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
dstContact[ind].separateAxis=make_float4(0.f, 0.f, 0.f, 0.f);
dstContact[ind].timeOfImpact = 1e8f;
float3 * ppos = & pos[ind<<3];
sPrxA[threadIdx.x].p[0] = ppos[0];
sPrxB[threadIdx.x].p[0] = ppos[1];
sPrxA[threadIdx.x].p[1] = ppos[2];
sPrxB[threadIdx.x].p[1] = ppos[3];
sPrxA[threadIdx.x].p[2] = ppos[4];
sPrxB[threadIdx.x].p[2] = ppos[5];
sPrxA[threadIdx.x].p[3] = ppos[6];
sPrxB[threadIdx.x].p[3] = ppos[7];
ClosestPointTestContext ctc;
BarycentricCoordinate coord;
float4 sas;
// computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], GJK_THIN_MARGIN, ctc, sas,
// coord);
// intersected try zero margin
// if(sas.w < 1.f) {
computeSeparateDistance(sS[threadIdx.x], sPrxA[threadIdx.x], sPrxB[threadIdx.x], 0.f, ctc,
sas,
coord);
// }
// still intersected no solution
if(sas.w < 1.f) return;
// output
interpolatePointAB(sS[threadIdx.x], coord, dstContact[ind].localA, dstContact[ind].localB);
dstContact[ind].separateAxis = sas;
dstContact[ind].timeOfImpact = 1e-9f;
}
__device__ int isValidPair(float toi, const float4 & sa)
{
if(toi >= GJK_STEPSIZE) return 0;
if(float3_length2(sa) < 1e-12f) return 0;
return 1;
}
__global__ void computeValidPairs_kernel(uint* dstCounts,
ContactData * srcContact,
uint numContacts,
uint scanBufferLength)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= scanBufferLength) return;
if(ind >= numContacts) {
dstCounts[ind] = 0;
return;
}
const ContactData cd = srcContact[ind];
dstCounts[ind] = isValidPair(cd.timeOfImpact, cd.separateAxis);
}
__global__ void squeezeContactPairs_kernel(uint2 * dstPairs, uint2 * srcPairs,
ContactData * dstContact, ContactData *srcContact,
uint * counts, uint * packLocs,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
if(!counts[ind]) return;
const uint toLoc = packLocs[ind];
dstPairs[toLoc] = srcPairs[ind];
dstContact[toLoc] = srcContact[ind];
}
__global__ void squeezeContactPosAndVel_kernel(float3 * dstPos,
float3 * srcPos,
float3 * dstVel,
float3 * srcVel,
uint2 * dstPairs,
uint2 * srcPairs,
ContactData * dstContact,
ContactData *srcContact,
uint *counts, uint * packLocs,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
const uint iPair = ind>>3;
if(!counts[iPair]) return;
uint toLoc = (packLocs[iPair]<<3) + (ind & 7);
dstPos[toLoc] = srcPos[ind];
dstVel[toLoc] = srcVel[ind];
if(ind & 7) return;
toLoc = packLocs[iPair];
dstPairs[toLoc] = srcPairs[iPair];
dstContact[toLoc] = srcContact[iPair];
}
__global__ void resetX_kernel(float3 * dst, float3 * src,
float3 * vel,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
ind <<= 3;
int i;
for(i=0; i < 8; i++) {
if(ind>= maxInd) return;
dst[ind] = src[ind];
vel[ind] = make_float3(0.f, 0.f, 0.f);
ind++;
}
}
extern "C" {
void narrowphaseComputeSeparateAxis(ContactData * dstContact,
uint2 * pairs,
float3 * pos,
float3 * vel,
uint4 * ind,
uint * pointStart, uint * indexStart,
uint numOverlappingPairs)
{
dim3 block(GJK_BLOCK_SIZE, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, GJK_BLOCK_SIZE);
dim3 grid(nblk, 1, 1);
computeSeparateAxis_kernel<<< grid, block >>>(dstContact, pairs, pos, vel, ind, pointStart, indexStart, numOverlappingPairs);
}
void narrowphaseComputeTimeOfImpact(ContactData * dstContact,
uint2 * pairs,
float3 * pos,
float3 * vel,
uint4 * ind,
uint * pointStart, uint * indexStart,
uint numOverlappingPairs)
{
dim3 block(GJK_BLOCK_SIZE, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, GJK_BLOCK_SIZE);
dim3 grid(nblk, 1, 1);
computeTimeOfImpact_kernel<<< grid, block >>>(dstContact, pairs, pos, vel, ind, pointStart, indexStart, numOverlappingPairs);
}
void narrowphase_computeInitialSeparation(ContactData * dstContact,
float3 * pos,
uint numOverlappingPairs)
{
dim3 block(GJK_BLOCK_SIZE, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, GJK_BLOCK_SIZE);
dim3 grid(nblk, 1, 1);
computeInitialSeparation_kernel<<< grid, block >>>(dstContact,
pos,
numOverlappingPairs);
}
void narrowphase_advanceTimeOfImpactIterative(ContactData * dstContact,
float3 * pos,
float3 * vel,
uint numOverlappingPairs)
{
dim3 block(GJK_BLOCK_SIZE, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, GJK_BLOCK_SIZE);
dim3 grid(nblk, 1, 1);
advanceTimeOfImpactIterative_kernel<<< grid, block >>>(dstContact,
pos,
vel,
numOverlappingPairs);
}
void narrowphaseComputeValidPairs(uint * dstCounts,
ContactData * srcContact,
uint numContacts,
uint scanBufferLength)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(scanBufferLength, 512);
dim3 grid(nblk, 1, 1);
computeValidPairs_kernel<<< grid, block >>>(dstCounts, srcContact, numContacts, scanBufferLength);
}
void narrowphaseSqueezeContactPairs(uint2 * dstPairs, uint2 * srcPairs,
ContactData * dstContact, ContactData *srcContact,
uint * counts, uint * packLocs,
uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
squeezeContactPairs_kernel<<< grid, block >>>(dstPairs, srcPairs,
dstContact, srcContact,
counts, packLocs,
maxInd);
}
void narrowphaseResetX(float3 * dst, float3 *src,
float3 * vel,
uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd>>3, 512);
dim3 grid(nblk, 1, 1);
resetX_kernel<<< grid, block >>>(dst, src, vel, maxInd);
}
void narrowphase_writePairPosAndVel(float3 * dstPos,
float3 * dstVel,
uint2 * pairs,
float3 * pos,
float3 * vel,
uint4 * ind,
uint * pointStart, uint * indexStart,
uint numOverlappingPairs)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs<<3, 512);
dim3 grid(nblk, 1, 1);
writePairPosAndVel_kernel<<< grid, block >>>(dstPos,
dstVel,
pairs,
pos,
vel,
ind,
pointStart, indexStart,
numOverlappingPairs<<3);
}
void narrowphase_writePairs(uint2 * dstPair,
uint2 * srcPair,
uint numOverlappingPairs)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(numOverlappingPairs, 512);
dim3 grid(nblk, 1, 1);
writePairInd_kernel<<< grid, block >>>(dstPair,
srcPair,
numOverlappingPairs);
}
void narrowphase_squeezeContactPosAndVel(float3 * dstPos,
float3 * srcPos,
float3 * dstVel,
float3 * srcVel,
uint2 * dstPairs,
uint2 * srcPairs,
ContactData * dstContact,
ContactData *srcContact,
uint *counts, uint * scanResult,
uint numPairs)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(numPairs<<3, 512);
dim3 grid(nblk, 1, 1);
squeezeContactPosAndVel_kernel<<< grid, block >>>(dstPos,
srcPos,
dstVel,
srcVel,
dstPairs,
srcPairs,
dstContact,
srcContact,
counts, scanResult,
numPairs<<3);
}
}
|
9ab8bef34d912f166daabc4b4a73b9099e7e6013.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <glm/gtc/matrix_inverse.hpp>
//#include <stream_compaction/efficient.h">
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define MAX_THREADS 64
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
//#define FILENAME "/d/Documents/cis565/hw3/test.txt"
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene *hst_scene;
static glm::vec3 *dev_image;
static Ray* dev_rays;
static Ray* dev_out_rays;
static Geom* dev_geoms;
static Material* dev_materials;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
const Geom* geoms = &(hst_scene->geoms)[0];
const Material* materials = &(hst_scene->materials)[0];
const int numObjects = hst_scene->geoms.size();
hipMalloc((void**)&dev_rays, pixelcount*sizeof(Ray));
hipMalloc((void**)&dev_out_rays, pixelcount*sizeof(Ray));
hipMalloc((void**)&dev_geoms, numObjects*sizeof(Geom));
hipMalloc((void**)&dev_materials, numObjects*sizeof(Material));
hipMemcpy(dev_geoms, geoms, numObjects*sizeof(Geom), hipMemcpyHostToDevice);
hipMemcpy(dev_materials, materials, numObjects*sizeof(Material), hipMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_rays);
hipFree(dev_geoms);
hipFree(dev_materials);
checkCUDAError("pathtraceFree");
}
__global__ void initRays(int n, int iter, Camera cam, Ray* rays, float cam_pos_dx, float cam_pos_dy){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n){
int x = index % cam.resolution.x;
int y = index / cam.resolution.x;
glm::vec3 left = glm::cross(cam.up, cam.view);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(-0.1, 0.1);
float res2x = cam.resolution.x / 2.0f;
float res2y = cam.resolution.y / 2.0f;
float magx = -(res2x - x + u01(rng))*sin(cam.fov.x) / res2x;
float magy = (res2y - y + u01(rng))*sin(cam.fov.y) / res2y;
glm::vec3 direction = cam.view + magx*left + magy*cam.up;
direction = glm::normalize(direction);
// Depth-of-field computations
// Source : https://www.cs.princeton.edu/courses/archive/fall00/cs426/lectures/raycast/sld017.htm
glm::vec3 focal_point = cam.position + cam.focal * cam.view;
float t = -(glm::dot(cam.position, direction) + glm::dot(focal_point, cam.view)) / ((glm::dot(direction,cam.view))+0.000001);
glm::vec3 intersection = cam.position + t*direction;
//rays[index].origin = cam.position;
rays[index].origin = cam.position + cam_pos_dx*left + cam_pos_dy*cam.up;
direction = intersection - rays[index].origin;
rays[index].direction = direction;
rays[index].color = glm::vec3(1.0);
rays[index].isAlive = true;
rays[index].index = index;
}
}
__global__ void intersect(int iter, int depth, int traceDepth, int n, Camera cam, Ray* rays, int numObjects, const Geom* geoms, const Material* materials){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
glm::vec3 normal;
glm::vec3 intersectionPoint;
float isIntersection;
bool outside;
glm::vec3 minNormal;
glm::vec3 minIntersectionPoint;
float minDist = INFINITY;
int obj_index = -1;
if (index < n){
if (!rays[index].isAlive){
return;
}
if (depth == traceDepth - 1 && rays[index].isAlive){
rays[index].color = glm::vec3(0.0);
rays[index].isAlive = false;
return;
}
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, depth);
Ray ray = rays[index];
for (int i = 0; i < numObjects; i++){
if (geoms[i].type == SPHERE){
isIntersection = sphereIntersectionTest(geoms[i], ray, intersectionPoint, normal, outside);
}
else {
isIntersection = boxIntersectionTest(geoms[i], ray, intersectionPoint, normal, outside);
}
if (isIntersection > 0 && minDist > glm::distance(ray.origin, intersectionPoint)){
minNormal = normal;
minIntersectionPoint = intersectionPoint;
minDist = glm::distance(ray.origin, intersectionPoint);
obj_index = i;
}
}
if (obj_index >= 0){
scatterRay(rays[index], minIntersectionPoint, minNormal, materials[geoms[obj_index].materialid], geoms[obj_index], rng);
}
else{
rays[index].color = glm::vec3(0.0);
rays[index].isAlive = false;
}
}
}
__global__ void updatePixels(int n, Ray* rays, glm::vec3* image){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n){
if (!rays[index].isAlive){
image[rays[index].index] += rays[index].color;
}
}
}
struct is_dead{
__host__ __device__
bool operator()(const Ray ray){
return !ray.isAlive;
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int numObjects = hst_scene->geoms.size();
const int pixelcount = cam.resolution.x * cam.resolution.y;
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// Initialize aperture for DOF
int numBlocks = (pixelcount-1) / MAX_THREADS + 1;
thrust::default_random_engine rng = makeSeededRandomEngine(iter, iter, 0);
thrust::uniform_real_distribution<float> uAp(-cam.aperture, cam.aperture);
float dx = uAp(rng);
float dy = uAp(rng);
// Initialize rays
hipLaunchKernelGGL(( initRays), dim3(numBlocks), dim3(MAX_THREADS), 0, 0, pixelcount, iter, cam, dev_rays, dx, dy);
checkCUDAError("initRays");
// Handle movement (TODO: make this faster?)
Geom* geoms = &(hst_scene->geoms)[0];
glm::vec3 new_translation;
glm::vec3 new_direction;
float dist;
for (int i = 0; i < numObjects; i++){
if (geoms[i].moving){
new_direction = geoms[i].moveto - geoms[i].translation;
dist = glm::length(new_direction);
new_direction = glm::normalize(new_direction);
thrust::uniform_real_distribution<float> uMove(0, dist);
new_translation = geoms[i].translation + new_direction * uMove(rng);
geoms[i].transform = utilityCore::buildTransformationMatrix(
new_translation, geoms[i].rotation, geoms[i].scale);
geoms[i].inverseTransform = glm::inverse(geoms[i].transform);
geoms[i].invTranspose = glm::inverseTranspose(geoms[i].transform);
}
}
hipMemcpy(dev_geoms, geoms, numObjects*sizeof(Geom), hipMemcpyHostToDevice);
// Path tracing
int numAlive = pixelcount;
Ray* last_ray;
for (int d = 0; d < traceDepth; d++){
numBlocks = (numAlive - 1) / MAX_THREADS + 1;
hipLaunchKernelGGL(( intersect), dim3(numBlocks), dim3(MAX_THREADS), 0, 0, iter, d, traceDepth, numAlive, cam, dev_rays, numObjects, dev_geoms, dev_materials);
hipLaunchKernelGGL(( updatePixels), dim3(numBlocks), dim3(MAX_THREADS), 0, 0, numAlive, dev_rays, dev_image);
numAlive = shared_compact(numAlive, dev_out_rays, dev_rays);
hipMemcpy(dev_rays, dev_out_rays, numAlive*sizeof(Ray), hipMemcpyDeviceToDevice);
//last_ray = thrust::remove_if(thrust::device, dev_rays, dev_rays + numAlive, is_dead());
//numAlive = last_ray - dev_rays;
if (numAlive == 0){
break;
}
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
/*
* Exclusive scan on idata, stores into odata, using shared memory
*/
__global__ void kernSharedScan(int n, int *odata, const int *idata){
extern __shared__ int temp[];
int index = threadIdx.x;
int offset = 1;
temp[2 * index] = idata[2 * index + (blockIdx.x*blockDim.x*2)];
temp[2 * index + 1] = idata[2 * index + 1 + (blockIdx.x*blockDim.x*2)];
for (int d = n >> 1; d > 0; d >>= 1){
__syncthreads();
if (index < d){
int ai = offset*(2 * index + 1) - 1;
int bi = offset*(2 * index + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (index == 0){
temp[n - 1] = 0;
}
for (int d = 1; d < n; d *= 2){
offset >>= 1;
__syncthreads();
if (index < d){
int ai = offset*(2 * index + 1) - 1;
int bi = offset*(2 * index + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
odata[2 * index + (blockIdx.x*blockDim.x*2)] = temp[2 * index];
odata[2 * index + 1 + (blockIdx.x*blockDim.x*2)] = temp[2 * index + 1];
}
template <typename T> __global__ void kernFindAlive(int n, int* odata, T* idata){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
if (idata[index].isAlive){
odata[index] = 1;
}
else {
odata[index] = 0;
}
}
}
template <typename T> __global__ void kernScatter(int n, T* odata, T* idata, int* bools, int* scan){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
if (bools[index] == 1){
odata[scan[index]] = idata[index];
}
}
}
__global__ void kernGetLastInBlocks(int n, int blockSize, int* dev_odata, int* dev_idata, int* dev_orig_idata){
// n is the number of elements expected in the output array
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
dev_odata[index] = dev_idata[(index + 1)*blockSize - 1] + dev_orig_idata[(index + 1)*blockSize - 1];
}
}
__global__ void kernInPlaceIncrementBlocks(int n, int* dev_data, int* increment){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
dev_data[index] = dev_data[index] + increment[blockIdx.x];
}
}
void blockwise_scan(int n, int* dev_odata, int* dev_idata){
int nfb = (n - 1) / MAX_THREADS + 1;
int n2 = nfb * MAX_THREADS;
int n_size = n * sizeof(int); // original input array size in bytes
int fb_size = nfb*MAX_THREADS*sizeof(int); // full block size in bytes
int shared_mem_size = MAX_THREADS * sizeof(int);
int* dev_idata_fb;
int* dev_odata_fb;
//TODO: only need this is non-power-of-2
hipMalloc((void**)&dev_idata_fb, fb_size);
hipMalloc((void**)&dev_odata_fb, fb_size);
hipMemset(dev_idata_fb, 0, fb_size);
hipMemcpy(dev_idata_fb, dev_idata, n_size, hipMemcpyDeviceToDevice);
// Base case
if (nfb == 1){
hipLaunchKernelGGL(( kernSharedScan), dim3(nfb), dim3(MAX_THREADS/2), shared_mem_size, 0, MAX_THREADS, dev_odata_fb, dev_idata_fb);
hipMemcpy(dev_odata, dev_odata_fb, n_size, hipMemcpyDeviceToDevice);
hipFree(dev_idata_fb);
hipFree(dev_odata_fb);
return;
}
// Recurse
int* dev_block_increments;
int* dev_block_increments_scan;
int numBlocks = (nfb - 1) / MAX_THREADS + 1;
hipMalloc((void**)&dev_block_increments, nfb*sizeof(int));
hipMalloc((void**)&dev_block_increments_scan, nfb*sizeof(int));
hipLaunchKernelGGL(( kernSharedScan) , dim3(nfb), dim3(MAX_THREADS/2), shared_mem_size, 0, MAX_THREADS, dev_odata_fb, dev_idata_fb);
//hipDeviceSynchronize();
hipLaunchKernelGGL(( kernGetLastInBlocks), dim3(numBlocks), dim3(MAX_THREADS), 0, 0, nfb, MAX_THREADS, dev_block_increments, dev_odata_fb, dev_idata_fb);
//hipDeviceSynchronize();
blockwise_scan(nfb, dev_block_increments_scan, dev_block_increments);
hipLaunchKernelGGL(( kernInPlaceIncrementBlocks), dim3(nfb), dim3(MAX_THREADS), 0, 0, n2, dev_odata_fb, dev_block_increments_scan);
//hipDeviceSynchronize();
hipMemcpy(dev_odata, dev_odata_fb, n_size, hipMemcpyDeviceToDevice);
hipFree(dev_idata_fb);
hipFree(dev_odata_fb);
hipFree(dev_block_increments);
hipFree(dev_block_increments_scan);
}
template <typename T> int shared_compact(int n, T* dev_odata, T* dev_idata){
// Returns the number of elements remaining, elements after the return value in odata are undefined
// Assumes device memory
int numBlocks = (n - 1) / MAX_THREADS + 1;
int n_size = n * sizeof(int);
int n2_size = numBlocks*MAX_THREADS*sizeof(int);
int out_size = 0;
int* dev_temp;
int* dev_temp2;
int* dev_scan;
hipMalloc((void**)&dev_temp, n_size);
hipMalloc((void**)&dev_temp2, n2_size);
hipMalloc((void**)&dev_scan, n2_size);
// Compute temp (binary)
hipLaunchKernelGGL(( kernFindAlive), dim3(numBlocks), dim3(MAX_THREADS), 0, 0, n, dev_temp, dev_idata);
hipDeviceSynchronize();
// Scan on temp
blockwise_scan(n, dev_scan, dev_temp);
// Scatter on scan
hipLaunchKernelGGL(( kernScatter), dim3(numBlocks), dim3(MAX_THREADS), 0, 0, n, dev_odata, dev_idata, dev_temp, dev_scan);
// Compute outsize
int lastnum;
int lastbool;
hipMemcpy(&lastnum, dev_scan + n - 1, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&lastbool, dev_temp + n - 1, sizeof(int), hipMemcpyDeviceToHost);
out_size = lastnum + lastbool;
hipFree(dev_temp);
hipFree(dev_temp2);
hipFree(dev_scan);
return out_size;
}
| 9ab8bef34d912f166daabc4b4a73b9099e7e6013.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <glm/gtc/matrix_inverse.hpp>
//#include <stream_compaction/efficient.h">
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define MAX_THREADS 64
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
//#define FILENAME "/d/Documents/cis565/hw3/test.txt"
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene *hst_scene;
static glm::vec3 *dev_image;
static Ray* dev_rays;
static Ray* dev_out_rays;
static Geom* dev_geoms;
static Material* dev_materials;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
const Geom* geoms = &(hst_scene->geoms)[0];
const Material* materials = &(hst_scene->materials)[0];
const int numObjects = hst_scene->geoms.size();
cudaMalloc((void**)&dev_rays, pixelcount*sizeof(Ray));
cudaMalloc((void**)&dev_out_rays, pixelcount*sizeof(Ray));
cudaMalloc((void**)&dev_geoms, numObjects*sizeof(Geom));
cudaMalloc((void**)&dev_materials, numObjects*sizeof(Material));
cudaMemcpy(dev_geoms, geoms, numObjects*sizeof(Geom), cudaMemcpyHostToDevice);
cudaMemcpy(dev_materials, materials, numObjects*sizeof(Material), cudaMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_rays);
cudaFree(dev_geoms);
cudaFree(dev_materials);
checkCUDAError("pathtraceFree");
}
__global__ void initRays(int n, int iter, Camera cam, Ray* rays, float cam_pos_dx, float cam_pos_dy){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n){
int x = index % cam.resolution.x;
int y = index / cam.resolution.x;
glm::vec3 left = glm::cross(cam.up, cam.view);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(-0.1, 0.1);
float res2x = cam.resolution.x / 2.0f;
float res2y = cam.resolution.y / 2.0f;
float magx = -(res2x - x + u01(rng))*sin(cam.fov.x) / res2x;
float magy = (res2y - y + u01(rng))*sin(cam.fov.y) / res2y;
glm::vec3 direction = cam.view + magx*left + magy*cam.up;
direction = glm::normalize(direction);
// Depth-of-field computations
// Source : https://www.cs.princeton.edu/courses/archive/fall00/cs426/lectures/raycast/sld017.htm
glm::vec3 focal_point = cam.position + cam.focal * cam.view;
float t = -(glm::dot(cam.position, direction) + glm::dot(focal_point, cam.view)) / ((glm::dot(direction,cam.view))+0.000001);
glm::vec3 intersection = cam.position + t*direction;
//rays[index].origin = cam.position;
rays[index].origin = cam.position + cam_pos_dx*left + cam_pos_dy*cam.up;
direction = intersection - rays[index].origin;
rays[index].direction = direction;
rays[index].color = glm::vec3(1.0);
rays[index].isAlive = true;
rays[index].index = index;
}
}
__global__ void intersect(int iter, int depth, int traceDepth, int n, Camera cam, Ray* rays, int numObjects, const Geom* geoms, const Material* materials){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
glm::vec3 normal;
glm::vec3 intersectionPoint;
float isIntersection;
bool outside;
glm::vec3 minNormal;
glm::vec3 minIntersectionPoint;
float minDist = INFINITY;
int obj_index = -1;
if (index < n){
if (!rays[index].isAlive){
return;
}
if (depth == traceDepth - 1 && rays[index].isAlive){
rays[index].color = glm::vec3(0.0);
rays[index].isAlive = false;
return;
}
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, depth);
Ray ray = rays[index];
for (int i = 0; i < numObjects; i++){
if (geoms[i].type == SPHERE){
isIntersection = sphereIntersectionTest(geoms[i], ray, intersectionPoint, normal, outside);
}
else {
isIntersection = boxIntersectionTest(geoms[i], ray, intersectionPoint, normal, outside);
}
if (isIntersection > 0 && minDist > glm::distance(ray.origin, intersectionPoint)){
minNormal = normal;
minIntersectionPoint = intersectionPoint;
minDist = glm::distance(ray.origin, intersectionPoint);
obj_index = i;
}
}
if (obj_index >= 0){
scatterRay(rays[index], minIntersectionPoint, minNormal, materials[geoms[obj_index].materialid], geoms[obj_index], rng);
}
else{
rays[index].color = glm::vec3(0.0);
rays[index].isAlive = false;
}
}
}
__global__ void updatePixels(int n, Ray* rays, glm::vec3* image){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n){
if (!rays[index].isAlive){
image[rays[index].index] += rays[index].color;
}
}
}
struct is_dead{
__host__ __device__
bool operator()(const Ray ray){
return !ray.isAlive;
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int numObjects = hst_scene->geoms.size();
const int pixelcount = cam.resolution.x * cam.resolution.y;
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// Initialize aperture for DOF
int numBlocks = (pixelcount-1) / MAX_THREADS + 1;
thrust::default_random_engine rng = makeSeededRandomEngine(iter, iter, 0);
thrust::uniform_real_distribution<float> uAp(-cam.aperture, cam.aperture);
float dx = uAp(rng);
float dy = uAp(rng);
// Initialize rays
initRays<<<numBlocks, MAX_THREADS>>>(pixelcount, iter, cam, dev_rays, dx, dy);
checkCUDAError("initRays");
// Handle movement (TODO: make this faster?)
Geom* geoms = &(hst_scene->geoms)[0];
glm::vec3 new_translation;
glm::vec3 new_direction;
float dist;
for (int i = 0; i < numObjects; i++){
if (geoms[i].moving){
new_direction = geoms[i].moveto - geoms[i].translation;
dist = glm::length(new_direction);
new_direction = glm::normalize(new_direction);
thrust::uniform_real_distribution<float> uMove(0, dist);
new_translation = geoms[i].translation + new_direction * uMove(rng);
geoms[i].transform = utilityCore::buildTransformationMatrix(
new_translation, geoms[i].rotation, geoms[i].scale);
geoms[i].inverseTransform = glm::inverse(geoms[i].transform);
geoms[i].invTranspose = glm::inverseTranspose(geoms[i].transform);
}
}
cudaMemcpy(dev_geoms, geoms, numObjects*sizeof(Geom), cudaMemcpyHostToDevice);
// Path tracing
int numAlive = pixelcount;
Ray* last_ray;
for (int d = 0; d < traceDepth; d++){
numBlocks = (numAlive - 1) / MAX_THREADS + 1;
intersect<<<numBlocks, MAX_THREADS>>>(iter, d, traceDepth, numAlive, cam, dev_rays, numObjects, dev_geoms, dev_materials);
updatePixels<<<numBlocks, MAX_THREADS>>>(numAlive, dev_rays, dev_image);
numAlive = shared_compact(numAlive, dev_out_rays, dev_rays);
cudaMemcpy(dev_rays, dev_out_rays, numAlive*sizeof(Ray), cudaMemcpyDeviceToDevice);
//last_ray = thrust::remove_if(thrust::device, dev_rays, dev_rays + numAlive, is_dead());
//numAlive = last_ray - dev_rays;
if (numAlive == 0){
break;
}
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
/*
* Exclusive scan on idata, stores into odata, using shared memory
*/
__global__ void kernSharedScan(int n, int *odata, const int *idata){
extern __shared__ int temp[];
int index = threadIdx.x;
int offset = 1;
temp[2 * index] = idata[2 * index + (blockIdx.x*blockDim.x*2)];
temp[2 * index + 1] = idata[2 * index + 1 + (blockIdx.x*blockDim.x*2)];
for (int d = n >> 1; d > 0; d >>= 1){
__syncthreads();
if (index < d){
int ai = offset*(2 * index + 1) - 1;
int bi = offset*(2 * index + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (index == 0){
temp[n - 1] = 0;
}
for (int d = 1; d < n; d *= 2){
offset >>= 1;
__syncthreads();
if (index < d){
int ai = offset*(2 * index + 1) - 1;
int bi = offset*(2 * index + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
odata[2 * index + (blockIdx.x*blockDim.x*2)] = temp[2 * index];
odata[2 * index + 1 + (blockIdx.x*blockDim.x*2)] = temp[2 * index + 1];
}
template <typename T> __global__ void kernFindAlive(int n, int* odata, T* idata){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
if (idata[index].isAlive){
odata[index] = 1;
}
else {
odata[index] = 0;
}
}
}
template <typename T> __global__ void kernScatter(int n, T* odata, T* idata, int* bools, int* scan){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
if (bools[index] == 1){
odata[scan[index]] = idata[index];
}
}
}
__global__ void kernGetLastInBlocks(int n, int blockSize, int* dev_odata, int* dev_idata, int* dev_orig_idata){
// n is the number of elements expected in the output array
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
dev_odata[index] = dev_idata[(index + 1)*blockSize - 1] + dev_orig_idata[(index + 1)*blockSize - 1];
}
}
__global__ void kernInPlaceIncrementBlocks(int n, int* dev_data, int* increment){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
dev_data[index] = dev_data[index] + increment[blockIdx.x];
}
}
void blockwise_scan(int n, int* dev_odata, int* dev_idata){
int nfb = (n - 1) / MAX_THREADS + 1;
int n2 = nfb * MAX_THREADS;
int n_size = n * sizeof(int); // original input array size in bytes
int fb_size = nfb*MAX_THREADS*sizeof(int); // full block size in bytes
int shared_mem_size = MAX_THREADS * sizeof(int);
int* dev_idata_fb;
int* dev_odata_fb;
//TODO: only need this is non-power-of-2
cudaMalloc((void**)&dev_idata_fb, fb_size);
cudaMalloc((void**)&dev_odata_fb, fb_size);
cudaMemset(dev_idata_fb, 0, fb_size);
cudaMemcpy(dev_idata_fb, dev_idata, n_size, cudaMemcpyDeviceToDevice);
// Base case
if (nfb == 1){
kernSharedScan<<<nfb, MAX_THREADS/2, shared_mem_size>>>(MAX_THREADS, dev_odata_fb, dev_idata_fb);
cudaMemcpy(dev_odata, dev_odata_fb, n_size, cudaMemcpyDeviceToDevice);
cudaFree(dev_idata_fb);
cudaFree(dev_odata_fb);
return;
}
// Recurse
int* dev_block_increments;
int* dev_block_increments_scan;
int numBlocks = (nfb - 1) / MAX_THREADS + 1;
cudaMalloc((void**)&dev_block_increments, nfb*sizeof(int));
cudaMalloc((void**)&dev_block_increments_scan, nfb*sizeof(int));
kernSharedScan <<<nfb, MAX_THREADS/2, shared_mem_size>>>(MAX_THREADS, dev_odata_fb, dev_idata_fb);
//cudaDeviceSynchronize();
kernGetLastInBlocks<<<numBlocks, MAX_THREADS>>>(nfb, MAX_THREADS, dev_block_increments, dev_odata_fb, dev_idata_fb);
//cudaDeviceSynchronize();
blockwise_scan(nfb, dev_block_increments_scan, dev_block_increments);
kernInPlaceIncrementBlocks<<<nfb, MAX_THREADS>>>(n2, dev_odata_fb, dev_block_increments_scan);
//cudaDeviceSynchronize();
cudaMemcpy(dev_odata, dev_odata_fb, n_size, cudaMemcpyDeviceToDevice);
cudaFree(dev_idata_fb);
cudaFree(dev_odata_fb);
cudaFree(dev_block_increments);
cudaFree(dev_block_increments_scan);
}
template <typename T> int shared_compact(int n, T* dev_odata, T* dev_idata){
// Returns the number of elements remaining, elements after the return value in odata are undefined
// Assumes device memory
int numBlocks = (n - 1) / MAX_THREADS + 1;
int n_size = n * sizeof(int);
int n2_size = numBlocks*MAX_THREADS*sizeof(int);
int out_size = 0;
int* dev_temp;
int* dev_temp2;
int* dev_scan;
cudaMalloc((void**)&dev_temp, n_size);
cudaMalloc((void**)&dev_temp2, n2_size);
cudaMalloc((void**)&dev_scan, n2_size);
// Compute temp (binary)
kernFindAlive<<<numBlocks, MAX_THREADS>>>(n, dev_temp, dev_idata);
cudaDeviceSynchronize();
// Scan on temp
blockwise_scan(n, dev_scan, dev_temp);
// Scatter on scan
kernScatter<<<numBlocks, MAX_THREADS>>>(n, dev_odata, dev_idata, dev_temp, dev_scan);
// Compute outsize
int lastnum;
int lastbool;
cudaMemcpy(&lastnum, dev_scan + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&lastbool, dev_temp + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
out_size = lastnum + lastbool;
cudaFree(dev_temp);
cudaFree(dev_temp2);
cudaFree(dev_scan);
return out_size;
}
|
f74549b8d4d29c5718348aac2b097e52eb9144b5.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include "rocblas.h"
#include <hiprand/hiprand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
//KC_FP_TYPE can be assumed to mean "double", but originally
//this definition could also work with "float" for faster speed.
//float compatability is no longer supported in this function.
#include "kcArrayFunctions.h"
#define MAX_P 1e25
#define MIN_P 1e-25
__device__ KC_FP_TYPE positiveBound(KC_FP_TYPE a) {
//return a;
if(isinf(a))
return MAX_P;
else
return fmin(fmax(a,MIN_P),MAX_P);
}
__device__ KC_FP_TYPE h(KC_FP_TYPE z, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh) {
return KC_MAX(MIN_P,KC_MIN(exp(z*gamma+sh)*dt,MAX_P));
}
//one thread per particle <<< nTrials,nParticles >>>
__global__ void kcMoveParticles(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * b, int * betaIdxVector, KC_FP_TYPE l_0, KC_FP_TYPE g, KC_FP_TYPE w, KC_FP_TYPE dt, KC_FP_TYPE * randN, KC_FP_TYPE sigMult, KC_FP_TYPE * log_li, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * ncdf, KC_FP_TYPE * posc, int * trIdx, int NT, int TT, int numParticles, int t) {
int threadNum = blockIdx.x*blockDim.x + threadIdx.x;
int tr_num = (int)threadNum / (int)numParticles;
int p_num = threadNum % numParticles;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
KC_FP_TYPE cb = b[betaIdxVector[row]];
KC_FP_TYPE sw = sqrt(w);
KC_FP_TYPE mup = (t==0)?(l_0):(pos[idx-1]+cb);
KC_FP_TYPE mu = mup;
KC_FP_TYPE sig2 = sigMult*w;
KC_FP_TYPE sig = sqrt(sig2);
KC_FP_TYPE maxI = fmin(1.0-1e-20, fmax( normcdf((1.0-mu)/sig),1e-20 ));
pos[idx] = fmin(1.0-1e-20, normcdfinv(maxI*randN[pidx])*sig + mu);
posc[pidx] = pos[idx];
KC_FP_TYPE dpos = pos[idx]-mu;
KC_FP_TYPE log_pi_k = -log(maxI)-0.5*log(2.0*M_PI*sig2) - 0.5/sig2*(dpos*dpos);
//to be stored for each particle: ncdf, lw, lw2
ncdf[idx] = normcdf((1-mup)/sw);
KC_FP_TYPE dposp = pos[idx]-mup;
KC_FP_TYPE log_p = -0*log(maxI) -0.5*log(2*M_PI*w)- 0.5/w*(dposp*dposp);
log_li[pidx] = -h(pos[idx],g,dt,spe[row])+y[row]*(log(fmax(h(pos[idx],g,1.0,spe[row]),1e-30))+log(dt))-lgamma(y[row]+1);
KC_FP_TYPE pw = (t==0)?(log(1/(KC_FP_TYPE)numParticles) ):( log(fmax(wt[idx-1], 1e-30)) );
lw[pidx] = exp(pw+log_p+log_li[pidx]-log_pi_k);
lw2[pidx] = exp(pw+log_p -log_pi_k);
//safety checks for numerical errors
if(isnan(lw[pidx]) || isinf(lw[pidx]) || isnan(pos[idx]) || isinf(pos[idx]) || isnan(lw2[pidx]) || isinf(lw2[pidx])) {
lw[pidx] = 0;
lw2[pidx] = 0;
pos[idx] = mup;
posc[pidx] = mup;
}
}
}
}
//one thread per trial <<< nTrials,1 >>>
__global__ void kcNormalizeWeights(KC_FP_TYPE * y, KC_FP_TYPE * wt, KC_FP_TYPE * wt_p, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * nEff, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
//sum up and normalize weights
KC_FP_TYPE weightSum = 0;
KC_FP_TYPE weightSum2 = 0;
for(int p_num = 0; p_num < numParticles; p_num++) {
int pidx = tr_num*numParticles+p_num;
weightSum += lw[pidx];
weightSum2 += lw2[pidx];
}
KC_FP_TYPE n_eff_den = 0;
weightSum = fmax(weightSum,1e-20);
weightSum2 = fmax(weightSum2,1e-20);
for(int p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt[idx] = lw[pidx] /weightSum;
wt_p[pidx] = lw2[pidx]/weightSum2;
n_eff_den += wt[idx]*wt[idx];
cumsum[pidx] = (p_num>0)?(cumsum[pidx-1]+wt[idx]):(wt[idx]);//for resampling
}
nEff[tr_num] = 1/n_eff_den;
}
}
}
//initial calculation - probability of each spike count coming from a rate at the bound
__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < TT) {
lg[idx] = exp( -h(1,g, dt,spe[idx]) + y[idx]*log(fmax(h(1,g,dt,spe[idx]),1e-30)) - lgamma(y[idx]+1));
}
}
//one thread per particle <<< nTrials,nParticles >>>
// if particles look bad, resamples them from the distribution before the next step
__global__ void kcResampleParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * posc, KC_FP_TYPE * wt, KC_FP_TYPE * log_li, KC_FP_TYPE * wt_p, int minEffParticles, KC_FP_TYPE * cumsum, KC_FP_TYPE * nEff, KC_FP_TYPE * randU, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * ncdf, int * trIdx, int NT, int TT, int numParticles, int t) {
int threadNum = blockIdx.x*blockDim.x + threadIdx.x;
int tr_num = (int)threadNum / (int)numParticles;
int p_num = threadNum % numParticles;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int pidx = tr_num*numParticles+p_num;
int row = trIdx[tr_num] + t;
int idx = TT*p_num + row;
int pidx_new = pidx;
if(nEff[tr_num] < minEffParticles) {
int p_num_new;
for(p_num_new = 0; p_num_new < numParticles-1 && randU[pidx] > cumsum[numParticles*tr_num+p_num_new]; p_num_new++) {
//everything taken care of in loop statement
}
pidx_new = tr_num*numParticles+p_num_new;
wt[idx] = 1.0/(KC_FP_TYPE)numParticles; //weights are now uniform again
pos[idx] = posc[pidx_new];
}
KC_FP_TYPE wt_old = (t==0)?(1.0/(KC_FP_TYPE)numParticles):(wt[idx-1]);
p_cet_0[pidx] = (1.0-ncdf[idx])*wt_old;
p_cgt_0a[pidx] = exp(log_li[pidx])*wt_p[pidx]; //or pidx_new?
p_cgt_0b[pidx] = ncdf[idx]*wt_old;
}
}
}
//one thread per trial <<< nTrials,1 >>>
//move bound crossing probabilities forward in time
__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
KC_FP_TYPE p_cet_s = 0;
KC_FP_TYPE p_cgt_sa = 0;
KC_FP_TYPE p_cgt_sb = 0;
for(int p_num = 0; p_num < numParticles; p_num++) {
int pidx = tr_num*numParticles+p_num;
//int idx = TT*p_num + row;
p_cet_s += p_cet_0[pidx];
p_cgt_sa += p_cgt_0a[pidx];
p_cgt_sb += p_cgt_0b[pidx];
//finished a bit of the resampler that must run post-sampling for parallelization not to screw up, this will only be used again if this is last timestep in trial
if(nEff[tr_num] < minEffParticles && t-1==trLength) {
cumsum[pidx] = 1/(KC_FP_TYPE)numParticles*(1+p_num);
}
}
KC_FP_TYPE p_clte_old = ((t==0)?(0):(p_clte[row-1]));
KC_FP_TYPE p_cgt_old = ((t==0)?(1):(p_cgt[row-1]));
KC_FP_TYPE p_clt_1 = lg[row]*p_clte_old;
KC_FP_TYPE p_cet_1 = lg[row]*(1.0-p_clte_old)*p_cet_s;
KC_FP_TYPE p_cgt_1 = (1.0-p_clte_old)*p_cgt_sa*p_cgt_sb;
p_cet[row] = p_cet_1/(p_clt_1+p_cet_1+p_cgt_1);
p_clte[row] = (p_cet_1+p_clt_1)/(p_clt_1+p_cet_1+p_cgt_1); //this is a little redudant, but I think it is convenient later?
p_clt[row] = p_clt_1/(p_clt_1+p_cet_1+p_cgt_1);
p_cgt[row] = p_cgt_1/(p_clt_1+p_cet_1+p_cgt_1);
p_cpr[row] = p_cgt_old*p_cet_s; //compare this index in MATLAB code
}
}
}
//Finally do that backwards sampling, <<< NT, 1 >>>
__global__ void kcBackwardsSample(KC_FP_TYPE * sample, int * crossingTimes, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * ncdf, KC_FP_TYPE * b, int * betaIdx, KC_FP_TYPE l_0, KC_FP_TYPE w, KC_FP_TYPE g, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_clte, KC_FP_TYPE * randUp, KC_FP_TYPE * randUb, KC_FP_TYPE * wt_p, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
int row = trIdx[tr_num] + t;
if(t == trLength-1) {
//if t=end of trial, start off the backwards sampling
crossingTimes[tr_num] = trLength;
//decide whether end trial has hit boundary
if(randUb[tr_num] < p_clte[row]) {
sample[row] = 1;
crossingTimes[tr_num] = t;
}
//else select a particle to be end of trial (cumsum holds the CDF of the distribution over particles)
else {
int p_num;
for(p_num = 0; p_num < numParticles-1 && randUp[tr_num] > cumsum[numParticles*tr_num+p_num]; p_num++) {
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
else if(t < trLength-1 && t >= 0) {
//else, propgate backwards
//if previous sample had hit threshold
if(sample[row+1] >= 1) {
//if boundary already reached
if(randUb[tr_num] < p_clte[row]/(p_cpr[row+1] + p_clte[row])) {
crossingTimes[tr_num] = t;
sample[row] = 1;
}
//gets pre-crossing particle
else {
KC_FP_TYPE wtSum = 0;
int p_num;
for(p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt_p[pidx] = wt[idx]*fmax(1.0-ncdf[idx+1],1e-25);
wtSum += wt_p[pidx];
}
wtSum = fmax(wtSum,1e-30);
KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum;
for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) {
int pidx = tr_num*numParticles+p_num+1;
csum += wt_p[pidx]/wtSum;
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
//else, samples a particle
else {
KC_FP_TYPE wtSum = 0;
int p_num;
for(p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt_p[pidx] = wt[idx]*exp(-0.5/w*pow( sample[row+1] - (pos[idx] + b[betaIdx[row]]),2 ));
wtSum += wt_p[pidx];
}
wtSum = fmax(wtSum,1e-30);
KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum;
for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) {
int pidx = tr_num*numParticles+p_num+1;
csum += wt_p[pidx]/wtSum;
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
}
}
/*
Performs a forward sweep of the path after backwards sampling
Draws from prior for steps post-threshold crossing (for conjugate sampling of parameters)
Calculates som statistics for later sampling
trial number given by CUDA thread
*/
__global__ void kcForwardFinalPass( KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * randUni, const KC_FP_TYPE* b, const int * betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx,const int NT, KC_FP_TYPE * beta_sum) {
int tr_num = blockIdx.x*blockDim.x+threadIdx.x;
if(tr_num < NT) {
int t_0 = trIdx[tr_num];
beta_sum[tr_num] = 0;
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
KC_FP_TYPE cb = b[betaIndVec[t_0]];
for(int t = 0; t < trLength; t++) {
if(t == crossingTimes[tr_num]) {
//samples the first value of lambda to cross the bound (truncated normal, > 1)
KC_FP_TYPE mu = (t > 0)?(lambda[t_0 + t-1]+cb):l_0;
KC_FP_TYPE minS = normcdf((1-mu)/sqrt(w));
if(minS >= 1.0-1e-5) {
lambda[t_0 + t] = 1;
}
else {
lambda[t_0 + t] = mu+sqrt(w)*normcdfinv( minS + (1-minS)*randUni[t_0+t]);
}
}
else if(t > crossingTimes[tr_num]) {
lambda[t_0 + t] = lambda[t_0 + t - 1] + cb + KC_SQRT(w)*normcdfinv( randUni[t_0+t]);
}
beta_sum[tr_num] += (t>0 && t <= crossingTimes[tr_num])?(lambda[t_0 + t] - lambda[t_0 + t-1]):0; //only include lambdas up until first threshold crossing to look at drift rates
}
}
}
//single thread kernel to assemble stats of the ramps across trials for sampling beta,l_0
__global__ void kcAssembleSamplingStatistics(KC_FP_TYPE * sigMat, KC_FP_TYPE * muVec, const KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * beta_sum,const int*betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx, const int NT, const int numBetas) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx == 0) {
for(int trNum = 0; trNum < NT; trNum++) {
int t_0 = trIdx[trNum];
int cb = betaIndVec[t_0];
int trLength = trIdx[trNum+1] - trIdx[trNum];
sigMat[(cb)*(numBetas+1) + cb] += fmin(1.0*crossingTimes[trNum],trLength-1.0)/w;
sigMat[(numBetas)*(numBetas+1) + numBetas] += 1.0/w;
muVec[cb] += beta_sum[trNum]/w;
muVec[numBetas] += lambda[t_0]/w;
}
}
}
//Samples a single set of latent paths from the ramping model for a set of trials given fixed parameters
//args
// 0 = new lambda (output, should be pre-allocated on GPU, same size as y)
// 1 = new auxiliary variable for threshold crossing (output, should be pre-allocated on GPU, vector of length number of trials)
// 2 = y (observations)
// 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 4 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB)
// 5 = betas (the beta values)
// 6 = w (variance of diffusion process)
// 7 = l_0 (starting lambda value)
// 8 = g (absorbing boundary effective height)
// 9 = dt (bin/timestep size)
// 10 = numParticles
// 11 = minEffParticles (how many effective particles per trial to keep around)
// 12 = sigMult (used for particle proposals, proposal variance is sigMult*w)
// 13 = maxTrialLength
// 14 = beta/l_0 sampling vec param c (uses this as output for sampling betas, l_0)
// 15 = beta/l_0 sampling vec param p uses this as output for sampling betas, l_0)
// 16 = spike history effect
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
hipError_t ce;
hiprandStatus_t cre;
/*ce = hipSetDevice(KC_GPU_DEVICE);
if(ce != hipSuccess) {
mexPrintf("Error initializing device (kcParticleFilterProp.cu) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}*/
//init data
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * lambdaTarget = kcGetArrayData(prhs[0]);
int * auxiliaryTarget = kcGetArrayDataInt(prhs[1]);
KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT);
int * trIdx = kcGetArrayDataInt(prhs[3]);
unsigned int NT = kcGetArrayNumEl(prhs[3])-1;
int * betaIdxVector = kcGetArrayDataInt(prhs[4]);
KC_FP_TYPE * b = mxGetPr(prhs[5]);
int numBetas = mxGetNumberOfElements(prhs[5]);
KC_FP_TYPE * b_gpu;
ce = hipMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas);
if(ce != hipSuccess) {
mexPrintf("Error allocating space for betas on GPU - first allocation in function (particle filter) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
ce = hipMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,hipMemcpyHostToDevice);
if(ce != hipSuccess) {
mexPrintf("Error moving betas to GPU (particle filter) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
KC_FP_TYPE w = mxGetScalar(prhs[6]);
KC_FP_TYPE l_0 = mxGetScalar(prhs[7]);
KC_FP_TYPE g = mxGetScalar(prhs[8]);
KC_FP_TYPE dt = mxGetScalar(prhs[9]);
int numParticles = mxGetScalar(prhs[10]);
int minEffParticles = mxGetScalar(prhs[11]);
int sigMult = mxGetScalar(prhs[12]);
int maxTrialLength = mxGetScalar(prhs[13]);
//load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[16],TT);
//particle weights/probabilities of hitting the bound
KC_FP_TYPE * p_clte;
KC_FP_TYPE * p_cet;
KC_FP_TYPE * p_cgt;
KC_FP_TYPE * p_clt;
KC_FP_TYPE * p_cpr;
checkCudaErrors(hipMalloc((void**)&p_clte, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cet, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cgt, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_clt, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cpr, TT*sizeof(KC_FP_TYPE)));
KC_FP_TYPE * wt;
KC_FP_TYPE * wt_p;
KC_FP_TYPE * pos;//particle positions
checkCudaErrors(hipMalloc((void**)&wt, (TT)*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&wt_p, (NT)*numParticles*sizeof(KC_FP_TYPE)));
ce = hipMalloc((void**)&pos, (TT)*numParticles*sizeof(KC_FP_TYPE));
if(ce != hipSuccess) {
mexPrintf("Error allocating pos ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
KC_FP_TYPE * log_li;
KC_FP_TYPE * posc; //for resampling
KC_FP_TYPE * lw; //unnormalized weights
KC_FP_TYPE * lw2;
KC_FP_TYPE * ncdf;
KC_FP_TYPE * p_cet_0;
KC_FP_TYPE * p_cgt_0a;
KC_FP_TYPE * p_cgt_0b;
KC_FP_TYPE * lg; //log p(y|at boundary)
KC_FP_TYPE * cumsum;
KC_FP_TYPE * beta_sum;
checkCudaErrors(hipMalloc((void**)&log_li, NT*numParticles*sizeof(KC_FP_TYPE)));
//checkCudaErrors(hipMalloc((void**)&log_lic, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&posc, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&lw, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&lw2, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&ncdf, TT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cet_0, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cgt_0a, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cgt_0b, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&cumsum, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&beta_sum, NT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&lg, TT*sizeof(KC_FP_TYPE)));
KC_FP_TYPE * nEff;
checkCudaErrors(hipMalloc((void**)&nEff, NT*sizeof(KC_FP_TYPE)));
int randSize = (NT*numParticles) + ((NT*numParticles)%2==0?0:1);
int randSizeS = (NT) + (NT%2==0?0:1);
int randSizeT = (TT) + (TT%2==0?0:1);
KC_FP_TYPE * randN;
KC_FP_TYPE * randNs;
KC_FP_TYPE * randTs;
ce = hipMalloc((void**)&randN, randSize *sizeof(KC_FP_TYPE));
if(ce != hipSuccess) {
mexPrintf("Error allocating randN ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
ce = hipMalloc((void**)&randNs, randSizeS*sizeof(KC_FP_TYPE));
if(ce != hipSuccess) {
mexPrintf("Error allocating randNs ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
ce = hipMalloc((void**)&randTs, randSizeT*sizeof(KC_FP_TYPE));
if(ce != hipSuccess) {
mexPrintf("Error allocating randTs ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
//setup the random number generator
hiprandGenerator_t curandGen = 0;
hiprandStatus_t hiprandStatus_t;
hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error initializing random number generator (%d).\n",(int)hiprandStatus_t);
mexErrMsgTxt(buffer);
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
//hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, (unsigned int)time(NULL));
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error random number seed (%d).\n",(int)hiprandStatus_t);
mexErrMsgTxt(buffer);
}
hiprandStatus_t = hiprandGenerateSeeds(curandGen);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error random number generating seed (%d).\n",(int)hiprandStatus_t);
mexErrMsgTxt(buffer);
}
//hipThreadSetLimit(hipLimitStackSize, 1024);
//setup initial particle positions
int blockSize , nBlocks;
int blockSizeT, nBlocksT;
int blockSizeN, nBlocksN;
blockSizeT = 4;
nBlocksT = TT/blockSizeT + ((TT%blockSizeT==0)?0:1);
blockSizeN = 1;
nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error before kcSetupLG ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) {
hipLaunchKernelGGL(( kcSetupLG) , dim3(nBlocksT), dim3(blockSizeT) , 0, 0, y,spe,lg,g,dt,TT);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcSetupLG<<<%d,%d>>> ",nBlocksT,blockSizeT);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
blockSize = 8;
int totalThreads = numParticles*NT;
nBlocks = totalThreads/blockSize + ((totalThreads%blockSize==0)?0:1);
//mexPrintf("Max trial length = %d, blockSizes = %d,%d, nBlocks = %d,%d\n", maxTrialLength,blockSize,blockSizeN,nBlocks,nBlocksN);
//forward pass loop
for (int ii = 0; ii < maxTrialLength;ii++) {
//move all particles foward
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN,randSize); //random sample steps for all particles
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
int currDev;
hipGetDevice(&currDev);
mexPrintf("Error synchronizing post-rand draw 1 Size=%d ii=%d, current device=%d ",randSize,ii,currDev);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in particle propogation. Size=%d ii=%d ",randSize,ii);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
hipLaunchKernelGGL(( kcMoveParticles) , dim3(nBlocks), dim3(blockSize) , 0, 0, y,spe,pos,wt, b_gpu,betaIdxVector,l_0,g,w,dt,randN, sigMult,log_li,lw,lw2,ncdf, posc, trIdx, NT, TT, numParticles, ii);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
int currDev;
hipGetDevice(&currDev);
mexPrintf("Error after kcMoveParticles<<<%d,%d>>> ii=%d/%d, dev=%d ",nBlocks,blockSize,ii,maxTrialLength,currDev);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//normalize weights
hipLaunchKernelGGL(( kcNormalizeWeights) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, y,wt,wt_p, lw, lw2, nEff, cumsum, trIdx, NT, TT, numParticles, ii);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcNormalizeWeights<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//check effective num particles, resample when necessary
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSize);
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in resampler. ii=%d/%d ",ii,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
hipLaunchKernelGGL(( kcResampleParticles) , dim3(nBlocks), dim3(blockSize) , 0, 0, y,pos,posc,wt,log_li,wt_p, minEffParticles,cumsum,nEff,randN,p_cet_0,p_cgt_0a,p_cgt_0b,ncdf,trIdx, NT, TT, numParticles, ii);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcResampleParticles<<<%d,%d>>> ii=%d/%d ",nBlocks,blockSize,ii,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//move passage density foward
//__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, int * trIdx, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int t, int NT, int TT, int numParticles) {
hipLaunchKernelGGL(( kcPropogateBoundaryDensity) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, y,p_clt,p_cet,p_cgt,p_clte,p_cpr,p_cet_0,p_cgt_0a, p_cgt_0b, lg, nEff, minEffParticles, cumsum,trIdx, NT, TT, numParticles, ii);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcPropogateBoundaryDensity<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
//backwards sample the particles
for (int jj = maxTrialLength-1; jj >= 0; jj--) {
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSizeS);
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in backwards sampler (1). jj=%d/%d ",jj,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randNs,randSizeS);
//ce = hipDeviceSynchronize();
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in backwards sampler (2). jj=%d/%d ",jj,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error synchronizing before kcBackwardsSample (post random generation) jj=%d/%d ",jj,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
hipLaunchKernelGGL(( kcBackwardsSample) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, lambdaTarget, auxiliaryTarget, pos, wt, ncdf, b_gpu, betaIdxVector, l_0, w, g, p_cpr, p_clte, randN, randNs, wt_p, cumsum, trIdx, NT, TT, numParticles, jj);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcBackwardsSample<<<%d,%d>>> jj=%d/%d ",nBlocksN,blockSizeN,jj,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randTs, randSizeT);
//ce = hipDeviceSynchronize();
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in final sampler (2). ");
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error synchronizing before kcForwardFinalPass (post random generation) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//samples all latent variables beyond bound hit time
hipLaunchKernelGGL(( kcForwardFinalPass) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, lambdaTarget, auxiliaryTarget, randTs, b_gpu, betaIdxVector, l_0, w, trIdx, NT, beta_sum);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcForwardFinalPass ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//gets some statistics about the latent variables put together to be able to sample the drift rates
KC_FP_TYPE * sampling_c;
KC_FP_TYPE * sampling_p;
checkCudaErrors(hipMalloc((void**)&sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1)));
checkCudaErrors(hipMalloc((void**)&sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1)));
checkCudaErrors(hipMemcpy(sampling_c,(KC_FP_TYPE*)mxGetPr(prhs[14]), sizeof(KC_FP_TYPE)*(numBetas+1),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(sampling_p,(KC_FP_TYPE*)mxGetPr(prhs[15]), sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kcAssembleSamplingStatistics), dim3(1),dim3(1), 0, 0, sampling_p, sampling_c, lambdaTarget, auxiliaryTarget, beta_sum,betaIdxVector,l_0, w, trIdx, NT, numBetas);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(prhs[14]),sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(prhs[15]),sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),hipMemcpyDeviceToHost));
//free up memory
cre = hiprandDestroyGenerator(curandGen);
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error destroying rand generator (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error synchronizing post-rand generator destruction (particleFilter) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
checkCudaErrors(hipFree(b_gpu));
checkCudaErrors(hipFree(p_clte));
checkCudaErrors(hipFree(p_cet));
checkCudaErrors(hipFree(p_cgt));
checkCudaErrors(hipFree(p_clt));
checkCudaErrors(hipFree(p_cpr));
checkCudaErrors(hipFree(pos));
checkCudaErrors(hipFree(wt));
ce = hipFree(wt_p);
if(ce != hipSuccess) {
mexPrintf("Error freeing memory in particle filter (wt_p) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
checkCudaErrors(hipFree(log_li));
checkCudaErrors(hipFree(posc));
checkCudaErrors(hipFree(lw));
checkCudaErrors(hipFree(lw2));
checkCudaErrors(hipFree(ncdf));
checkCudaErrors(hipFree(p_cet_0));
checkCudaErrors(hipFree(p_cgt_0a));
checkCudaErrors(hipFree(p_cgt_0b));
checkCudaErrors(hipFree(lg));
checkCudaErrors(hipFree(cumsum));
checkCudaErrors(hipFree(beta_sum));
checkCudaErrors(hipFree(sampling_c));
checkCudaErrors(hipFree(sampling_p));
checkCudaErrors(hipFree(nEff));
checkCudaErrors(hipFree(randN));
checkCudaErrors(hipFree(randNs));
checkCudaErrors(hipFree(randTs));
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error at the end ofthe particle filter ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
| f74549b8d4d29c5718348aac2b097e52eb9144b5.cu |
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include "cublas_v2.h"
#include <curand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
//KC_FP_TYPE can be assumed to mean "double", but originally
//this definition could also work with "float" for faster speed.
//float compatability is no longer supported in this function.
#include "kcArrayFunctions.h"
#define MAX_P 1e25
#define MIN_P 1e-25
__device__ KC_FP_TYPE positiveBound(KC_FP_TYPE a) {
//return a;
if(isinf(a))
return MAX_P;
else
return fmin(fmax(a,MIN_P),MAX_P);
}
__device__ KC_FP_TYPE h(KC_FP_TYPE z, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh) {
return KC_MAX(MIN_P,KC_MIN(exp(z*gamma+sh)*dt,MAX_P));
}
//one thread per particle <<< nTrials,nParticles >>>
__global__ void kcMoveParticles(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * b, int * betaIdxVector, KC_FP_TYPE l_0, KC_FP_TYPE g, KC_FP_TYPE w, KC_FP_TYPE dt, KC_FP_TYPE * randN, KC_FP_TYPE sigMult, KC_FP_TYPE * log_li, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * ncdf, KC_FP_TYPE * posc, int * trIdx, int NT, int TT, int numParticles, int t) {
int threadNum = blockIdx.x*blockDim.x + threadIdx.x;
int tr_num = (int)threadNum / (int)numParticles;
int p_num = threadNum % numParticles;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
KC_FP_TYPE cb = b[betaIdxVector[row]];
KC_FP_TYPE sw = sqrt(w);
KC_FP_TYPE mup = (t==0)?(l_0):(pos[idx-1]+cb);
KC_FP_TYPE mu = mup;
KC_FP_TYPE sig2 = sigMult*w;
KC_FP_TYPE sig = sqrt(sig2);
KC_FP_TYPE maxI = fmin(1.0-1e-20, fmax( normcdf((1.0-mu)/sig),1e-20 ));
pos[idx] = fmin(1.0-1e-20, normcdfinv(maxI*randN[pidx])*sig + mu);
posc[pidx] = pos[idx];
KC_FP_TYPE dpos = pos[idx]-mu;
KC_FP_TYPE log_pi_k = -log(maxI)-0.5*log(2.0*M_PI*sig2) - 0.5/sig2*(dpos*dpos);
//to be stored for each particle: ncdf, lw, lw2
ncdf[idx] = normcdf((1-mup)/sw);
KC_FP_TYPE dposp = pos[idx]-mup;
KC_FP_TYPE log_p = -0*log(maxI) -0.5*log(2*M_PI*w)- 0.5/w*(dposp*dposp);
log_li[pidx] = -h(pos[idx],g,dt,spe[row])+y[row]*(log(fmax(h(pos[idx],g,1.0,spe[row]),1e-30))+log(dt))-lgamma(y[row]+1);
KC_FP_TYPE pw = (t==0)?(log(1/(KC_FP_TYPE)numParticles) ):( log(fmax(wt[idx-1], 1e-30)) );
lw[pidx] = exp(pw+log_p+log_li[pidx]-log_pi_k);
lw2[pidx] = exp(pw+log_p -log_pi_k);
//safety checks for numerical errors
if(isnan(lw[pidx]) || isinf(lw[pidx]) || isnan(pos[idx]) || isinf(pos[idx]) || isnan(lw2[pidx]) || isinf(lw2[pidx])) {
lw[pidx] = 0;
lw2[pidx] = 0;
pos[idx] = mup;
posc[pidx] = mup;
}
}
}
}
//one thread per trial <<< nTrials,1 >>>
__global__ void kcNormalizeWeights(KC_FP_TYPE * y, KC_FP_TYPE * wt, KC_FP_TYPE * wt_p, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * nEff, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
//sum up and normalize weights
KC_FP_TYPE weightSum = 0;
KC_FP_TYPE weightSum2 = 0;
for(int p_num = 0; p_num < numParticles; p_num++) {
int pidx = tr_num*numParticles+p_num;
weightSum += lw[pidx];
weightSum2 += lw2[pidx];
}
KC_FP_TYPE n_eff_den = 0;
weightSum = fmax(weightSum,1e-20);
weightSum2 = fmax(weightSum2,1e-20);
for(int p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt[idx] = lw[pidx] /weightSum;
wt_p[pidx] = lw2[pidx]/weightSum2;
n_eff_den += wt[idx]*wt[idx];
cumsum[pidx] = (p_num>0)?(cumsum[pidx-1]+wt[idx]):(wt[idx]);//for resampling
}
nEff[tr_num] = 1/n_eff_den;
}
}
}
//initial calculation - probability of each spike count coming from a rate at the bound
__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < TT) {
lg[idx] = exp( -h(1,g, dt,spe[idx]) + y[idx]*log(fmax(h(1,g,dt,spe[idx]),1e-30)) - lgamma(y[idx]+1));
}
}
//one thread per particle <<< nTrials,nParticles >>>
// if particles look bad, resamples them from the distribution before the next step
__global__ void kcResampleParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * posc, KC_FP_TYPE * wt, KC_FP_TYPE * log_li, KC_FP_TYPE * wt_p, int minEffParticles, KC_FP_TYPE * cumsum, KC_FP_TYPE * nEff, KC_FP_TYPE * randU, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * ncdf, int * trIdx, int NT, int TT, int numParticles, int t) {
int threadNum = blockIdx.x*blockDim.x + threadIdx.x;
int tr_num = (int)threadNum / (int)numParticles;
int p_num = threadNum % numParticles;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int pidx = tr_num*numParticles+p_num;
int row = trIdx[tr_num] + t;
int idx = TT*p_num + row;
int pidx_new = pidx;
if(nEff[tr_num] < minEffParticles) {
int p_num_new;
for(p_num_new = 0; p_num_new < numParticles-1 && randU[pidx] > cumsum[numParticles*tr_num+p_num_new]; p_num_new++) {
//everything taken care of in loop statement
}
pidx_new = tr_num*numParticles+p_num_new;
wt[idx] = 1.0/(KC_FP_TYPE)numParticles; //weights are now uniform again
pos[idx] = posc[pidx_new];
}
KC_FP_TYPE wt_old = (t==0)?(1.0/(KC_FP_TYPE)numParticles):(wt[idx-1]);
p_cet_0[pidx] = (1.0-ncdf[idx])*wt_old;
p_cgt_0a[pidx] = exp(log_li[pidx])*wt_p[pidx]; //or pidx_new?
p_cgt_0b[pidx] = ncdf[idx]*wt_old;
}
}
}
//one thread per trial <<< nTrials,1 >>>
//move bound crossing probabilities forward in time
__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
KC_FP_TYPE p_cet_s = 0;
KC_FP_TYPE p_cgt_sa = 0;
KC_FP_TYPE p_cgt_sb = 0;
for(int p_num = 0; p_num < numParticles; p_num++) {
int pidx = tr_num*numParticles+p_num;
//int idx = TT*p_num + row;
p_cet_s += p_cet_0[pidx];
p_cgt_sa += p_cgt_0a[pidx];
p_cgt_sb += p_cgt_0b[pidx];
//finished a bit of the resampler that must run post-sampling for parallelization not to screw up, this will only be used again if this is last timestep in trial
if(nEff[tr_num] < minEffParticles && t-1==trLength) {
cumsum[pidx] = 1/(KC_FP_TYPE)numParticles*(1+p_num);
}
}
KC_FP_TYPE p_clte_old = ((t==0)?(0):(p_clte[row-1]));
KC_FP_TYPE p_cgt_old = ((t==0)?(1):(p_cgt[row-1]));
KC_FP_TYPE p_clt_1 = lg[row]*p_clte_old;
KC_FP_TYPE p_cet_1 = lg[row]*(1.0-p_clte_old)*p_cet_s;
KC_FP_TYPE p_cgt_1 = (1.0-p_clte_old)*p_cgt_sa*p_cgt_sb;
p_cet[row] = p_cet_1/(p_clt_1+p_cet_1+p_cgt_1);
p_clte[row] = (p_cet_1+p_clt_1)/(p_clt_1+p_cet_1+p_cgt_1); //this is a little redudant, but I think it is convenient later?
p_clt[row] = p_clt_1/(p_clt_1+p_cet_1+p_cgt_1);
p_cgt[row] = p_cgt_1/(p_clt_1+p_cet_1+p_cgt_1);
p_cpr[row] = p_cgt_old*p_cet_s; //compare this index in MATLAB code
}
}
}
//Finally do that backwards sampling, <<< NT, 1 >>>
__global__ void kcBackwardsSample(KC_FP_TYPE * sample, int * crossingTimes, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * ncdf, KC_FP_TYPE * b, int * betaIdx, KC_FP_TYPE l_0, KC_FP_TYPE w, KC_FP_TYPE g, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_clte, KC_FP_TYPE * randUp, KC_FP_TYPE * randUb, KC_FP_TYPE * wt_p, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
int row = trIdx[tr_num] + t;
if(t == trLength-1) {
//if t=end of trial, start off the backwards sampling
crossingTimes[tr_num] = trLength;
//decide whether end trial has hit boundary
if(randUb[tr_num] < p_clte[row]) {
sample[row] = 1;
crossingTimes[tr_num] = t;
}
//else select a particle to be end of trial (cumsum holds the CDF of the distribution over particles)
else {
int p_num;
for(p_num = 0; p_num < numParticles-1 && randUp[tr_num] > cumsum[numParticles*tr_num+p_num]; p_num++) {
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
else if(t < trLength-1 && t >= 0) {
//else, propgate backwards
//if previous sample had hit threshold
if(sample[row+1] >= 1) {
//if boundary already reached
if(randUb[tr_num] < p_clte[row]/(p_cpr[row+1] + p_clte[row])) {
crossingTimes[tr_num] = t;
sample[row] = 1;
}
//gets pre-crossing particle
else {
KC_FP_TYPE wtSum = 0;
int p_num;
for(p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt_p[pidx] = wt[idx]*fmax(1.0-ncdf[idx+1],1e-25);
wtSum += wt_p[pidx];
}
wtSum = fmax(wtSum,1e-30);
KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum;
for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) {
int pidx = tr_num*numParticles+p_num+1;
csum += wt_p[pidx]/wtSum;
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
//else, samples a particle
else {
KC_FP_TYPE wtSum = 0;
int p_num;
for(p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt_p[pidx] = wt[idx]*exp(-0.5/w*pow( sample[row+1] - (pos[idx] + b[betaIdx[row]]),2 ));
wtSum += wt_p[pidx];
}
wtSum = fmax(wtSum,1e-30);
KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum;
for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) {
int pidx = tr_num*numParticles+p_num+1;
csum += wt_p[pidx]/wtSum;
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
}
}
/*
Performs a forward sweep of the path after backwards sampling
Draws from prior for steps post-threshold crossing (for conjugate sampling of parameters)
Calculates som statistics for later sampling
trial number given by CUDA thread
*/
__global__ void kcForwardFinalPass( KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * randUni, const KC_FP_TYPE* b, const int * betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx,const int NT, KC_FP_TYPE * beta_sum) {
int tr_num = blockIdx.x*blockDim.x+threadIdx.x;
if(tr_num < NT) {
int t_0 = trIdx[tr_num];
beta_sum[tr_num] = 0;
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
KC_FP_TYPE cb = b[betaIndVec[t_0]];
for(int t = 0; t < trLength; t++) {
if(t == crossingTimes[tr_num]) {
//samples the first value of lambda to cross the bound (truncated normal, > 1)
KC_FP_TYPE mu = (t > 0)?(lambda[t_0 + t-1]+cb):l_0;
KC_FP_TYPE minS = normcdf((1-mu)/sqrt(w));
if(minS >= 1.0-1e-5) {
lambda[t_0 + t] = 1;
}
else {
lambda[t_0 + t] = mu+sqrt(w)*normcdfinv( minS + (1-minS)*randUni[t_0+t]);
}
}
else if(t > crossingTimes[tr_num]) {
lambda[t_0 + t] = lambda[t_0 + t - 1] + cb + KC_SQRT(w)*normcdfinv( randUni[t_0+t]);
}
beta_sum[tr_num] += (t>0 && t <= crossingTimes[tr_num])?(lambda[t_0 + t] - lambda[t_0 + t-1]):0; //only include lambdas up until first threshold crossing to look at drift rates
}
}
}
//single thread kernel to assemble stats of the ramps across trials for sampling beta,l_0
__global__ void kcAssembleSamplingStatistics(KC_FP_TYPE * sigMat, KC_FP_TYPE * muVec, const KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * beta_sum,const int*betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx, const int NT, const int numBetas) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx == 0) {
for(int trNum = 0; trNum < NT; trNum++) {
int t_0 = trIdx[trNum];
int cb = betaIndVec[t_0];
int trLength = trIdx[trNum+1] - trIdx[trNum];
sigMat[(cb)*(numBetas+1) + cb] += fmin(1.0*crossingTimes[trNum],trLength-1.0)/w;
sigMat[(numBetas)*(numBetas+1) + numBetas] += 1.0/w;
muVec[cb] += beta_sum[trNum]/w;
muVec[numBetas] += lambda[t_0]/w;
}
}
}
//Samples a single set of latent paths from the ramping model for a set of trials given fixed parameters
//args
// 0 = new lambda (output, should be pre-allocated on GPU, same size as y)
// 1 = new auxiliary variable for threshold crossing (output, should be pre-allocated on GPU, vector of length number of trials)
// 2 = y (observations)
// 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 4 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB)
// 5 = betas (the beta values)
// 6 = w (variance of diffusion process)
// 7 = l_0 (starting lambda value)
// 8 = g (absorbing boundary effective height)
// 9 = dt (bin/timestep size)
// 10 = numParticles
// 11 = minEffParticles (how many effective particles per trial to keep around)
// 12 = sigMult (used for particle proposals, proposal variance is sigMult*w)
// 13 = maxTrialLength
// 14 = beta/l_0 sampling vec param c (uses this as output for sampling betas, l_0)
// 15 = beta/l_0 sampling vec param p uses this as output for sampling betas, l_0)
// 16 = spike history effect
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
cudaError_t ce;
curandStatus_t cre;
/*ce = cudaSetDevice(KC_GPU_DEVICE);
if(ce != cudaSuccess) {
mexPrintf("Error initializing device (kcParticleFilterProp.cu) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}*/
//init data
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * lambdaTarget = kcGetArrayData(prhs[0]);
int * auxiliaryTarget = kcGetArrayDataInt(prhs[1]);
KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT);
int * trIdx = kcGetArrayDataInt(prhs[3]);
unsigned int NT = kcGetArrayNumEl(prhs[3])-1;
int * betaIdxVector = kcGetArrayDataInt(prhs[4]);
KC_FP_TYPE * b = mxGetPr(prhs[5]);
int numBetas = mxGetNumberOfElements(prhs[5]);
KC_FP_TYPE * b_gpu;
ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas);
if(ce != cudaSuccess) {
mexPrintf("Error allocating space for betas on GPU - first allocation in function (particle filter) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
ce = cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice);
if(ce != cudaSuccess) {
mexPrintf("Error moving betas to GPU (particle filter) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
KC_FP_TYPE w = mxGetScalar(prhs[6]);
KC_FP_TYPE l_0 = mxGetScalar(prhs[7]);
KC_FP_TYPE g = mxGetScalar(prhs[8]);
KC_FP_TYPE dt = mxGetScalar(prhs[9]);
int numParticles = mxGetScalar(prhs[10]);
int minEffParticles = mxGetScalar(prhs[11]);
int sigMult = mxGetScalar(prhs[12]);
int maxTrialLength = mxGetScalar(prhs[13]);
//load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[16],TT);
//particle weights/probabilities of hitting the bound
KC_FP_TYPE * p_clte;
KC_FP_TYPE * p_cet;
KC_FP_TYPE * p_cgt;
KC_FP_TYPE * p_clt;
KC_FP_TYPE * p_cpr;
checkCudaErrors(cudaMalloc((void**)&p_clte, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cet, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cgt, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_clt, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cpr, TT*sizeof(KC_FP_TYPE)));
KC_FP_TYPE * wt;
KC_FP_TYPE * wt_p;
KC_FP_TYPE * pos;//particle positions
checkCudaErrors(cudaMalloc((void**)&wt, (TT)*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&wt_p, (NT)*numParticles*sizeof(KC_FP_TYPE)));
ce = cudaMalloc((void**)&pos, (TT)*numParticles*sizeof(KC_FP_TYPE));
if(ce != cudaSuccess) {
mexPrintf("Error allocating pos ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
KC_FP_TYPE * log_li;
KC_FP_TYPE * posc; //for resampling
KC_FP_TYPE * lw; //unnormalized weights
KC_FP_TYPE * lw2;
KC_FP_TYPE * ncdf;
KC_FP_TYPE * p_cet_0;
KC_FP_TYPE * p_cgt_0a;
KC_FP_TYPE * p_cgt_0b;
KC_FP_TYPE * lg; //log p(y|at boundary)
KC_FP_TYPE * cumsum;
KC_FP_TYPE * beta_sum;
checkCudaErrors(cudaMalloc((void**)&log_li, NT*numParticles*sizeof(KC_FP_TYPE)));
//checkCudaErrors(cudaMalloc((void**)&log_lic, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&posc, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&lw, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&lw2, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&ncdf, TT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cet_0, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cgt_0a, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cgt_0b, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&cumsum, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&beta_sum, NT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&lg, TT*sizeof(KC_FP_TYPE)));
KC_FP_TYPE * nEff;
checkCudaErrors(cudaMalloc((void**)&nEff, NT*sizeof(KC_FP_TYPE)));
int randSize = (NT*numParticles) + ((NT*numParticles)%2==0?0:1);
int randSizeS = (NT) + (NT%2==0?0:1);
int randSizeT = (TT) + (TT%2==0?0:1);
KC_FP_TYPE * randN;
KC_FP_TYPE * randNs;
KC_FP_TYPE * randTs;
ce = cudaMalloc((void**)&randN, randSize *sizeof(KC_FP_TYPE));
if(ce != cudaSuccess) {
mexPrintf("Error allocating randN ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
ce = cudaMalloc((void**)&randNs, randSizeS*sizeof(KC_FP_TYPE));
if(ce != cudaSuccess) {
mexPrintf("Error allocating randNs ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
ce = cudaMalloc((void**)&randTs, randSizeT*sizeof(KC_FP_TYPE));
if(ce != cudaSuccess) {
mexPrintf("Error allocating randTs ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
//setup the random number generator
curandGenerator_t curandGen = 0;
curandStatus_t curandStatus;
curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT);
if(curandStatus != CURAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error initializing random number generator (%d).\n",(int)curandStatus);
mexErrMsgTxt(buffer);
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
//curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, (unsigned int)time(NULL));
if(curandStatus != CURAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error random number seed (%d).\n",(int)curandStatus);
mexErrMsgTxt(buffer);
}
curandStatus = curandGenerateSeeds(curandGen);
if(curandStatus != CURAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error random number generating seed (%d).\n",(int)curandStatus);
mexErrMsgTxt(buffer);
}
//cudaThreadSetLimit(cudaLimitStackSize, 1024);
//setup initial particle positions
int blockSize , nBlocks;
int blockSizeT, nBlocksT;
int blockSizeN, nBlocksN;
blockSizeT = 4;
nBlocksT = TT/blockSizeT + ((TT%blockSizeT==0)?0:1);
blockSizeN = 1;
nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error before kcSetupLG ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) {
kcSetupLG <<< nBlocksT, blockSizeT >>> (y,spe,lg,g,dt,TT);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcSetupLG<<<%d,%d>>> ",nBlocksT,blockSizeT);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
blockSize = 8;
int totalThreads = numParticles*NT;
nBlocks = totalThreads/blockSize + ((totalThreads%blockSize==0)?0:1);
//mexPrintf("Max trial length = %d, blockSizes = %d,%d, nBlocks = %d,%d\n", maxTrialLength,blockSize,blockSizeN,nBlocks,nBlocksN);
//forward pass loop
for (int ii = 0; ii < maxTrialLength;ii++) {
//move all particles foward
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN,randSize); //random sample steps for all particles
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
int currDev;
cudaGetDevice(&currDev);
mexPrintf("Error synchronizing post-rand draw 1 Size=%d ii=%d, current device=%d ",randSize,ii,currDev);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in particle propogation. Size=%d ii=%d ",randSize,ii);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
kcMoveParticles <<< nBlocks, blockSize >>> (y,spe,pos,wt, b_gpu,betaIdxVector,l_0,g,w,dt,randN, sigMult,log_li,lw,lw2,ncdf, posc, trIdx, NT, TT, numParticles, ii);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
int currDev;
cudaGetDevice(&currDev);
mexPrintf("Error after kcMoveParticles<<<%d,%d>>> ii=%d/%d, dev=%d ",nBlocks,blockSize,ii,maxTrialLength,currDev);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//normalize weights
kcNormalizeWeights <<< nBlocksN,blockSizeN >>> (y,wt,wt_p, lw, lw2, nEff, cumsum, trIdx, NT, TT, numParticles, ii);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcNormalizeWeights<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//check effective num particles, resample when necessary
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSize);
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in resampler. ii=%d/%d ",ii,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
kcResampleParticles <<< nBlocks, blockSize >>> (y,pos,posc,wt,log_li,wt_p, minEffParticles,cumsum,nEff,randN,p_cet_0,p_cgt_0a,p_cgt_0b,ncdf,trIdx, NT, TT, numParticles, ii);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcResampleParticles<<<%d,%d>>> ii=%d/%d ",nBlocks,blockSize,ii,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//move passage density foward
//__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, int * trIdx, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int t, int NT, int TT, int numParticles) {
kcPropogateBoundaryDensity <<< nBlocksN,blockSizeN >>> (y,p_clt,p_cet,p_cgt,p_clte,p_cpr,p_cet_0,p_cgt_0a, p_cgt_0b, lg, nEff, minEffParticles, cumsum,trIdx, NT, TT, numParticles, ii);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcPropogateBoundaryDensity<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
//backwards sample the particles
for (int jj = maxTrialLength-1; jj >= 0; jj--) {
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSizeS);
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in backwards sampler (1). jj=%d/%d ",jj,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randNs,randSizeS);
//ce = cudaDeviceSynchronize();
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in backwards sampler (2). jj=%d/%d ",jj,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error synchronizing before kcBackwardsSample (post random generation) jj=%d/%d ",jj,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
kcBackwardsSample <<< nBlocksN,blockSizeN >>> (lambdaTarget, auxiliaryTarget, pos, wt, ncdf, b_gpu, betaIdxVector, l_0, w, g, p_cpr, p_clte, randN, randNs, wt_p, cumsum, trIdx, NT, TT, numParticles, jj);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcBackwardsSample<<<%d,%d>>> jj=%d/%d ",nBlocksN,blockSizeN,jj,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randTs, randSizeT);
//ce = cudaDeviceSynchronize();
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in final sampler (2). ");
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error synchronizing before kcForwardFinalPass (post random generation) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//samples all latent variables beyond bound hit time
kcForwardFinalPass <<< nBlocksN,blockSizeN >>> (lambdaTarget, auxiliaryTarget, randTs, b_gpu, betaIdxVector, l_0, w, trIdx, NT, beta_sum);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcForwardFinalPass ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//gets some statistics about the latent variables put together to be able to sample the drift rates
KC_FP_TYPE * sampling_c;
KC_FP_TYPE * sampling_p;
checkCudaErrors(cudaMalloc((void**)&sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1)));
checkCudaErrors(cudaMalloc((void**)&sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1)));
checkCudaErrors(cudaMemcpy(sampling_c,(KC_FP_TYPE*)mxGetPr(prhs[14]), sizeof(KC_FP_TYPE)*(numBetas+1),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(sampling_p,(KC_FP_TYPE*)mxGetPr(prhs[15]), sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),cudaMemcpyHostToDevice));
kcAssembleSamplingStatistics<<<1,1>>>(sampling_p, sampling_c, lambdaTarget, auxiliaryTarget, beta_sum,betaIdxVector,l_0, w, trIdx, NT, numBetas);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(prhs[14]),sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(prhs[15]),sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),cudaMemcpyDeviceToHost));
//free up memory
cre = curandDestroyGenerator(curandGen);
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error destroying rand generator (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error synchronizing post-rand generator destruction (particleFilter) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
checkCudaErrors(cudaFree(b_gpu));
checkCudaErrors(cudaFree(p_clte));
checkCudaErrors(cudaFree(p_cet));
checkCudaErrors(cudaFree(p_cgt));
checkCudaErrors(cudaFree(p_clt));
checkCudaErrors(cudaFree(p_cpr));
checkCudaErrors(cudaFree(pos));
checkCudaErrors(cudaFree(wt));
ce = cudaFree(wt_p);
if(ce != cudaSuccess) {
mexPrintf("Error freeing memory in particle filter (wt_p) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
checkCudaErrors(cudaFree(log_li));
checkCudaErrors(cudaFree(posc));
checkCudaErrors(cudaFree(lw));
checkCudaErrors(cudaFree(lw2));
checkCudaErrors(cudaFree(ncdf));
checkCudaErrors(cudaFree(p_cet_0));
checkCudaErrors(cudaFree(p_cgt_0a));
checkCudaErrors(cudaFree(p_cgt_0b));
checkCudaErrors(cudaFree(lg));
checkCudaErrors(cudaFree(cumsum));
checkCudaErrors(cudaFree(beta_sum));
checkCudaErrors(cudaFree(sampling_c));
checkCudaErrors(cudaFree(sampling_p));
checkCudaErrors(cudaFree(nEff));
checkCudaErrors(cudaFree(randN));
checkCudaErrors(cudaFree(randNs));
checkCudaErrors(cudaFree(randTs));
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error at the end ofthe particle filter ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
|
11ae1e09e72c9abb4aba61206fa5c6a5c788fe1e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <malloc.h>
#include <cmath>
#include <ctime>
#include <locale.h>
#include <iostream>
#include <iomanip>
#include <omp.h>
#include <ctype.h>
#include <algorithm>
#include <vector>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include <rocblas.h>
#include <cublas_api.h>
#include <cusolverDn.h>
#include <helper_cuda.h>
#include <helper_cuda_drvapi.h>
#include <helper_functions.h>
#include <helper_cusolver.h>
#include <device_launch_parameters.h>
#include "GPU.h"
#define SK_Nev 1.e-6
#define STEP_LIMIT 100000
#define APPROX 1.e-15
#define eps_for_b 1.e-6
//#define MAXITER 200000
//#define MAXRESIDUE 1.e-10
void Debuger_for_matr(double * input, int rows, int columns, double* checking);
void GPU_mult(double* vec, int size, int *nnz, double* diag, int gpu_amount, double **d_A, int **d_B, int ** d_C, double* rezult, int maximumThreads);
int* split(int gpu_amount, double* A, int* B, int* C, int size, int non_zero, double **d_A, int ** d_B, int **d_C);
//void Debuger(double* input, int size);
void Debuger(double * input, int size, double* checking);
void printMatrix(int m, int n, const double*A, int lda, const char* name);
void GPU_mult_for_little_gradient(double* vec, int size, int* nnz, double* diag, int gpu_amount, double** d_val, int** d_col, int** d_row, double* rezult);
void show_eigen_value(double*input, int lanc_count);
//double* GPU_stab_bi_gradient_solver_with_preconditioner(double *val, int *col, int *row, double *right, double *diag, int nnz, int size);
/*ret_val << <1, 1 >> > (converge_temp,i,tmp5);
checkCudaErrors(hipMemcpy(tmp1, tmp5, sizeof(double), hipMemcpyDeviceToHost));
ret_val << <1, 1 >> > (eigen_values_gpu,int(*tmp1),tmp6);
element << <1, 1 >> > (converge_eig_val, *CONVERGE_AMOUNT, *tmp6);*/
//__global__ void rand_vec(double* input,int size)
//{
// int i = rand() % size;
// input[i] = 1;
//}
__global__ void copy_v(double* input, int size,double*where)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
where[i] = input[i];
}
__global__ void add_to_converge_values(double* converge_temp,int i,double * eigen_values_gpu,double *converge_eig_val,int * CONVERGE_AMOUNT, double * tmp5)
{
*tmp5 = converge_temp[i];
converge_eig_val[(*CONVERGE_AMOUNT)+i] = eigen_values_gpu[int(converge_temp[i])];
}
__global__ void proverb(double* eigvecT, double *beta_q, int amount_ev,double eps,int * converge_amount,double * converge_val_number,double * converge_temp)
{
/*int i = blockDim.x*blockIdx.x + threadIdx.x;*/
/*if (i < amount_ev)*/
for(int i=0;i<amount_ev;i++)
if (abs(eigvecT[amount_ev*i + (amount_ev - 1)] * (*beta_q)) <= eps)
{
converge_val_number[int(*converge_temp)] = i;
*converge_temp += 1;
//*converge_amount += 1;
}
}
__global__ void vec_mul_number(double* A, double value, int size, double* res)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
res[i] = A[i] * (value);
}
}
__global__ void connect_diag_matr(double* matrix, double * a, double * b, int lanc_amount)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < lanc_amount)
{
matrix[i*lanc_amount + i] = a[i];
if (i != lanc_amount - 1)
{
matrix[i*lanc_amount+i+1] = b[i];
matrix[i*lanc_amount + i + lanc_amount] = b[i];
}
}
}
//__global__ void correlation(double* matrix, double *input_vector, int size, int count)
//{
//
//}
__global__ void reverse_for_eigen_values_lanc(double* input, int size)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
input[i] = 1 / input[i];
}
}
__global__ void matr_add(double* main, int count, double *arr,int size)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
main[count*size + i] = arr[i];
}
__global__ void return_vec(double* main, int count, int size,double * arr)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
arr[i] = main[count*size + i];
}
__global__ void element(double* A, int i, double res)
{
A[i] = res;
}
__global__ void ret_val(double* A, int i, double *res)
{
*res = A[i];
}
__global__ void all_zero(int size, double* res)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
res[i] = 0;
}
}
__global__ void vector_addition(double *input, int size,double* result)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
result[i] += input[i];
}
}
__global__ void not_full_scalar(double* A, double* B, int size, double* res)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
res[i] = A[i] * B[i];
}
}
__global__ void diag_revers(double* diag, double* res, int size)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
res[i] = 1/diag[i];
}
}
//void Debuger(double* input, int size,int checking)
//{
// double * test = new double[size];
// checkCudaErrors(hipMemcpy(test, input, sizeof(double)*(size), hipMemcpyDeviceToHost));
// for (int i = 0; i<size; i++)
// {
// cout <<endl<< "test[" << i << "] = " << test[i];
// }
// cout << endl;
// delete test;
//}
void Debuger(double * input, int size, double* checking)
{
checkCudaErrors(hipMemcpy(checking, input, sizeof(double)*(size), hipMemcpyDeviceToHost));
}
void Debuger_for_matr(double * input, int rows, int columns, double* checking)
{
checkCudaErrors(hipMemcpy(checking, input, sizeof(double)*(rows)*(columns), hipMemcpyDeviceToHost));
for (int i = 0; i < columns; i++)
{
cout << "Vector " << "[" << i << "]" << endl;
for (int j = 0; j < rows; j++)
{
cout << checking[i*rows + j] << " ";
}
cout << endl;
}
}
void show_eigen_value(double*input,int lanc_count)
{
double* checking = new double[lanc_count];
checkCudaErrors(hipMemcpy(checking, input, sizeof(double)*(lanc_count), hipMemcpyDeviceToHost));
cout << endl << "Eigenvalues: " << endl;
for (int i = 0; i < lanc_count; i++)
cout << "Val ["<<i<<"] = "<<std::setprecision(17)<< checking[i] << endl;
cout << endl;
}
inline double dot_product(double* A, double* B, int size)
{
double rezult = 0;
for (int i = 0; i < size; i++)
{
rezult += (A[i] * B[i]);
}
return rezult;
}
inline void vector_on_number(double* A, double value, int size, double* res)
{
for (int i = 0; i < size; i++)
res[i] = A[i] * value;
}
inline void sum_vector(double* A, double* B, int size, double* res)
{
for (int i = 0; i < size; i++)
{
res[i] = A[i] + B[i];
}
}
inline void raznost_vector(double* A, double* B, int size, double* res)
{
for (int i = 0; i < size; i++)
{
res[i] = A[i] - B[i];
}
}
int return_string(int number, int* C)
{
int i = 0;
while (C[i] <= number)
i++;
return i;
}
int* split(int gpu_amount, double* val, int* col, int* row, int size, int non_zero, double **d_val, int ** d_col, int **d_row) //
{
int mod = non_zero / gpu_amount; //
int rest = non_zero - mod*(gpu_amount - 1); //
int first_position;
int last_position;
int first_string;
int last_string;
double *val_;
int *col_;
int *row_;
int *temp = new int[gpu_amount];
int nsize;
#if CHECKER
cout << endl << "CSR:" << endl;
for (int i = 0; i < non_zero; i++)
{
cout << val[i] << " ";
}
cout << endl;
for (int i = 0; i < non_zero; i++)
{
cout << col[i] << " ";
}
cout << endl;
for (int i = 0; i < size + 1; i++)
{
cout << row[i] << " ";
}
cout << endl;
#endif
for (int number = 0; number < gpu_amount; number++)
{
if (number == gpu_amount - 1)
{
int in1 = 0;
int in2 = 0;
first_position = number*mod;// n
last_position = non_zero - 1;//k
first_string = return_string(number*mod, row) - 1; //i
last_string = return_string(non_zero - 1, row) - 1;//j
nsize = rest + first_string + size - 1 - last_string;
val_ = new double[nsize]; // definition
for (int i = 0; i < nsize; i++)
{
if (i < first_string)
{
val_[i] = 0;
}
else
{
val_[i] = val[first_position + in1];
in1++;
}
}
//memcpy(&A_[first_string],&A[first_position],sizeof(double)*(rest));
col_ = new int[nsize];
for (int i = 0; i < nsize; i++)
{
if (i < first_string)
{
col_[i] = i;
}
else
{
col_[i] = col[first_position + in2];
in2++;
}
}
//memcpy(&B_[first_string], &B[first_position], sizeof(double)*(rest));
row_ = new int[size + 1];
for (int i = 0; i < first_string; i++) //0123..C..000
row_[i] = i;
for (int count = first_string; count <= last_string; count++)
{
row_[count] = row[count] - first_position + first_string;
if (row[count] - first_position < 0) row_[count] = first_string;
}
row_[size] = nsize;
}
else
{
int in1 = 0;
int in2 = 0;
first_position = number*mod;// n
last_position = (number + 1)*mod - 1;//k
first_string = return_string(number*mod, row) - 1; //i
last_string = return_string((number + 1)*mod - 1, row) - 1;//j
nsize = mod + first_string + size - 1 - last_string;
val_ = new double[nsize]; // definition
for (int i = 0; i < nsize; i++)
{
if ((i < first_string) || (i > first_string + mod - 1))
{
val_[i] = 0;
}
else
{
val_[i] = val[first_position + in1];
in1++;
}
}
//memcpy(&A_[first_string], &A[first_position], sizeof(double)*(mod));
col_ = new int[nsize];
int inn = 1;
for (int i = 0; i < nsize; i++)
{
if (i < first_string)
{
col_[i] = i;
}
else if (i < first_string + mod)
{
col_[i] = col[first_position + in2];
in2++;
}
else
{
col_[i] = last_string + inn;
inn++;
}
}
//memcpy(&B_[first_string], &B[first_position], sizeof(double)*(mod));
row_ = new int[size + 1];
for (int i = 0; i < first_string; i++) //0123..C..000
row_[i] = i;
for (int count = first_string; count <= last_string; count++)
{
row_[count] = row[count] - first_position + first_string;
if (row[count] - first_position < 0) row_[count] = first_string;
}
int l = 1;
for (int i = last_string + 1; i < size; i++) //0123..C..n..
{
row_[i] = first_string + last_position - first_position + l;
l++;
}
row_[size] = nsize;
}
#if CHECKER
cout << endl << "Device: " << number << " n: " << first_position << " k: " << last_position << " i: " << first_string << " j: " << last_string << endl;
cout << endl;
for (int i = 0; i < nsize; i++)
{
cout << val_[i] << " ";
}
cout << endl;
for (int i = 0; i < nsize; i++)
{
cout << col_[i] << " ";
}
cout << endl;
for (int i = 0; i < size + 1; i++)
{
cout << row_[i] << " ";
}
cout << endl;
#endif
temp[number] = nsize;
checkCudaErrors(hipSetDevice(number));
checkCudaErrors(hipMalloc((void **)&d_val[number], sizeof(double)*nsize));
checkCudaErrors(hipMalloc((void **)&d_col[number], sizeof(int)*nsize));
checkCudaErrors(hipMalloc((void **)&d_row[number], sizeof(int)*(size + 1)));
checkCudaErrors(hipMemcpy(d_val[number], val_, sizeof(double)*nsize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_col[number], col_, sizeof(int)*nsize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_row[number], row_, sizeof(int)*(size + 1), hipMemcpyHostToDevice));
delete[] val_;
delete[] col_;
delete[] row_;
}
return temp;
}
void GPU_mult(double* vec, int size, int* nnz, double* diag, int gpu_amount, double** d_val, int** d_col, int** d_row, double* rezult, int maximumThreads)
{
double **pipe = new double*[gpu_amount];
for (int i = 0; i < gpu_amount; i++)
{
pipe[i] = new double[size];
}
//size == vec.size()
checkCudaErrors(hipSetDevice(0));
double *temp_rez;
double *vec_temp;
double * checking = new double[size];
double** rez_p = new double *[gpu_amount];
//double** rez_h = new double *[gpu_amount];
checkCudaErrors(hipMalloc((void**)&temp_rez,sizeof(double)*size));
checkCudaErrors(hipMalloc((void**)&vec_temp, sizeof(double)*size));
//checkCudaErrors(hipMemset(temp_rez, 0.0, size));
hipLaunchKernelGGL(( all_zero) , dim3(10000), dim3(maximumThreads), 0, 0, size,temp_rez);
//Debuger(temp_rez, size, checking);
//checkCudaErrors(hipMemset(vec_temp, 0.0, size));
all_zero << <10000, maximumThreads>> > (size, vec_temp);
//Debuger(vec_temp, size, checking);
double *one = new double;
*one = 1.0;
double *zero = new double;
*zero = 0.0;
//double *x_d;
omp_set_num_threads(gpu_amount);
double dtime = omp_get_wtime();
#pragma omp parallel for// private(rez_p)
for (int number = 0; number < gpu_amount; number++)
{
hipsparseHandle_t handle = NULL;
hipsparseMatDescr_t Adescr = NULL;
checkCudaErrors(hipSetDevice(number));
//checkCudaErrors(hipMalloc((void **)&x_d, sizeof(double)*size));
//checkCudaErrors(hipMalloc((void**)&tempnam,sizeof(double)*size));
//checkCudaErrors(hipMemcpy(x_d, vec, sizeof(double)*size, hipMemcpyHostToDevice));
checkCudaErrors(hipsparseCreate(&handle));
checkCudaErrors(hipsparseCreateMatDescr(&Adescr));
checkCudaErrors(hipMalloc((void **)&rez_p[number], sizeof(double)*size));
checkCudaErrors(hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
size, size, nnz[number], one,
Adescr,
d_val[number],
d_row[number], d_col[number],
vec, zero,
rez_p[number]));
checkCudaErrors(hipMemcpy(pipe[number], rez_p[number], sizeof(double)*size, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(rez_p[number]));
//checkCudaErrors(hipFree(x_d));
checkCudaErrors(hipsparseDestroy(handle));
checkCudaErrors(hipsparseDestroyMatDescr(Adescr));
}
// trouble !!!
checkCudaErrors(hipSetDevice(0));
for (int i = 0; i < gpu_amount; i++)
{
checkCudaErrors(hipMemcpy(vec_temp, pipe[i], sizeof(double)*size, hipMemcpyHostToDevice));
//Debuger(vec_temp, size, checking);
//Debuger(temp_rez, size, checking);
vector_addition << <10000, maximumThreads >> > (vec_temp, size, temp_rez);
//Debuger(temp_rez,size,checking);
hipDeviceSynchronize();
}
/*for (int i = 0; i < size; i++)
{
for (int j = 0; j < gpu_amount; j++)
{
rezult[i] += pipe[j][i];
}
}*/
//vector_addition << <10000, maximumThreads >> > (pipe,size,gpu_amount,rezult);
/*for (int i = 0; i < size; i++)
{
rezult[i] += diag[i] * vec[i];
}*/
hipblasHandle_t cublasHandle = NULL;
checkCudaErrors(hipblasCreate(&cublasHandle));
checkCudaErrors(hipSetDevice(0));
not_full_scalar << <10000,maximumThreads >> > (diag,vec,size,vec_temp);
//Debuger(vec_temp, size, checking);
//hipDeviceSynchronize();
vector_addition << <10000, maximumThreads >> > (vec_temp, size, temp_rez);
//Debuger(temp_rez, size, checking);
//hipDeviceSynchronize();
checkCudaErrors(hipblasDcopy(cublasHandle, size, temp_rez, 1, rezult, 1));
checkCudaErrors(hipFree(temp_rez));
// Debuger(temp_rez, size, checking);
checkCudaErrors(hipFree(vec_temp));
//Debuger(temp_rez, size, checking);
//checkCudaErrors(hipFree(rez_p));
/*for (int i = 0; i < gpu_amount; i++)
{
delete pipe[i];
}
delete[] pipe;*/
// delete vec_temp;
delete zero;
//delete x_d;
delete one;
//delete temp_rez;
delete checking;
delete[] pipe;
delete[] rez_p;
}
void GPU_mult_for_little_gradient(double* vec, int size, int* nnz, double* diag, int gpu_amount, double** d_val, int** d_col, int** d_row, double* rezult)
{
double **pipe = new double*[gpu_amount];
for (int i = 0; i < gpu_amount; i++)
{
pipe[i] = new double[size];
}
//size == vec.size()
//double *temp_rez;
double** rez_p = new double *[gpu_amount];
//checkCudaErrors(hipMallocManaged((void**)&temp_rez, sizeof(double)*size));
double *one = new double;
*one = 1.0;
double *zero = new double;
*zero = 0.0;
double *x_d;
omp_set_num_threads(gpu_amount);
double dtime = omp_get_wtime();
#pragma omp parallel for// private(rez_p)
for (int number = 0; number < gpu_amount; number++)
{
hipsparseHandle_t handle = NULL;
hipsparseMatDescr_t Adescr = NULL;
checkCudaErrors(hipSetDevice(number));
checkCudaErrors(hipMalloc((void **)&x_d, sizeof(double)*size));
//checkCudaErrors(hipMalloc((void**)&tempnam,sizeof(double)*size));
checkCudaErrors(hipMemcpy(x_d, vec, sizeof(double)*size, hipMemcpyHostToDevice));
checkCudaErrors(hipsparseCreate(&handle));
checkCudaErrors(hipsparseCreateMatDescr(&Adescr));
checkCudaErrors(hipMalloc((void **)&rez_p[number], sizeof(double)*size));
checkCudaErrors(hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
size, size, nnz[number], one,
Adescr,
d_val[number],
d_row[number], d_col[number],
x_d, zero,
rez_p[number]));
checkCudaErrors(hipMemcpy(pipe[number], rez_p[number], sizeof(double)*size, hipMemcpyDeviceToHost));
//vector_addition << <10000, maximumThreads >> > (rez_p[number], size, number, temp_rez);
checkCudaErrors(hipFree(rez_p[number]));
checkCudaErrors(hipFree(x_d));
checkCudaErrors(hipsparseDestroy(handle));
checkCudaErrors(hipsparseDestroyMatDescr(Adescr));
}
for (int i = 0; i < size; i++)
{
for (int j = 0; j < gpu_amount; j++)
{
rezult[i] += pipe[j][i];
}
}
//vector_addition << <10000, maximumThreads >> > (pipe,size,gpu_amount,rezult);
for (int i = 0; i < size; i++)
{
rezult[i] += diag[i] * vec[i];
}
for (int i = 0; i < gpu_amount; i++)
{
delete pipe[i];
}
delete[] pipe;
delete zero;
delete one;
delete[] rez_p;
}
double* gpu_solver::GPU_gradient_solver(double *val, int *col, int *row, double *right, double *diag, int nnz, int size)
{
int gpu;
checkCudaErrors(hipGetDeviceCount(&gpu));
double ** d_val = new double *[gpu];
int ** d_col = new int *[gpu];
int ** d_row = new int *[gpu];
int *temp = new int[gpu];
double* r0 = new double[size];
double* x0 = new double[size];
double* x_k = new double[size];
double* z0 = new double[size];
double* z_k = new double[size];
double* r_k = new double[size];
double* ch = new double[size];
double* cont = new double[size];
double* testing = new double[size];
//*r0 = right;// x0 ={0...}
memcpy(r0, right, sizeof(double)*(size));
memcpy(z0, r0, sizeof(double)*(size));
memcpy(r_k, r0, sizeof(double)*(size));
double a_k;
double b_k;
double r0_to_r0;
double right_to_right = sqrt(dot_product(right, right, size));
double rk_to_rk;
double checking;
bool fg = true;
int step = 0;
double gpu_time = 0;
clock_t int1 = clock();
for (int i = 0; i < size; i++)
{
x0[i] = 0;
}
temp = split(gpu, val, col, row, size, nnz, d_val, d_col, d_row);
clock_t int2 = clock();
#if CHECKER
cout << "SPLIT TIME: " << double(int2 - int1) / 1000.0 << endl;
#endif
do
{
if (!fg)
{
memcpy(r0, r_k, sizeof(double)*(size));
memcpy(x0, x_k, sizeof(double)*(size));
memcpy(z0, z_k, sizeof(double)*(size));
}
r0_to_r0 = dot_product(r0, r0, size);
clock_t gpu_time1 = clock();
memset(ch,0,sizeof(double)*(size));
GPU_mult_for_little_gradient(z0, size, temp, diag, gpu, d_val, d_col, d_row, ch);
clock_t gpu_time2 = clock();
gpu_time += double(gpu_time2 - gpu_time1);
a_k = r0_to_r0 / dot_product(ch, z0, size);
vector_on_number(z0, a_k, size, cont);
if (step == 640)
cout << "640" << endl;
sum_vector(x0, cont, size, x_k);
vector_on_number(ch, a_k, size, cont);
raznost_vector(r0, cont, size, r_k);
rk_to_rk = dot_product(r_k, r_k, size);
b_k = rk_to_rk / r0_to_r0;
vector_on_number(z0, b_k, size, cont);
sum_vector(r_k, cont, size, z_k);
fg = false;
step++;
checking = sqrt(rk_to_rk) / right_to_right;
//cout << endl<<"Checking" << checking << endl;
} while ((checking >= APPROX) && (step < STEP_LIMIT));
//cout <<endl<< "GPU TIME: " << gpu_time / 1000.0 << endl;
cout << "NEVAZKA: " << checking << endl;
GPU_mult_for_little_gradient(x_k, size, temp, diag, gpu, d_val, d_col, d_row, ch);
raznost_vector(ch, right, size, testing);
double verify = sqrt(dot_product(testing, testing, size));
cout << endl << "VERIFICATION: " << verify << endl;
cout << endl << "Step = " << step << endl;
for (int number = 0; number < gpu; number++)
{
checkCudaErrors(hipSetDevice(number));
checkCudaErrors(hipFree(d_val[number]));
checkCudaErrors(hipFree(d_col[number]));
checkCudaErrors(hipFree(d_row[number]));
}
delete[] temp;
delete[] d_val;
delete[] d_col;
delete[] d_row;
delete[] ch;
delete[] cont;
delete[] x0;
delete[] r0;
//delete[] z0;
//delete[] z_k;
//delete[] r_k;
//delete[] testing;
return x_k;
}
double* gpu_solver::GPU_stab_bi_gradient_solver(double *val, int *col, int *row, double *right, double *diag, int nnz, int size)
{
//Count amount of devices
int gpu;
checkCudaErrors(hipGetDeviceCount(&gpu));
//double *test = new double[size];
//Arrays for devices
double ** d_val = new double *[gpu];
int ** d_col = new int *[gpu];
int ** d_row = new int *[gpu];
//Array with devicearray's sizes
int *temp = new int[gpu];
temp = split(gpu, val, col, row, size, nnz, d_val, d_col, d_row);
//int step = 0;
bool flag = true;
double *minus = new double;
double *zero = new double;
double *one = new double;
*minus = -1.0;
*zero = 0.0;
*one = 1.0;
//Initialization of diag
double* final_result = new double[size];
//Initialization of all variables
checkCudaErrors(hipSetDevice(0));
hipblasHandle_t cublasHandle = NULL;
hipsparseHandle_t cusparseHandle = NULL;
checkCudaErrors(hipsparseCreate(&cusparseHandle));
hipsparseMatDescr_t matDescr = NULL;
checkCudaErrors(hipsparseCreateMatDescr(&matDescr));
checkCudaErrors(hipblasCreate(&cublasHandle));
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0));
double *x0;
checkCudaErrors(hipMalloc((void **)&x0, sizeof(double)*(size)));
checkCudaErrors(hipMemset(x0, 0.0, sizeof(double)*(size)));
double *r0, *rT;
double * diag_gpu;
double * h;
//double * right_part_gpu;
checkCudaErrors(hipMalloc((void **)&r0, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&rT, sizeof(double)*(size)));
//checkCudaErrors(hipMalloc((void **)&right_part_gpu, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&diag_gpu, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&h, sizeof(double)*(size)));
checkCudaErrors(hipMemcpy(diag_gpu, diag, sizeof(double)*(size), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(r0, right, sizeof(double)*(size), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(rT, right, sizeof(double)*(size), hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(right_part_gpu,right, sizeof(double)*(size), hipMemcpyHostToDevice));
double *rho0 = new double;
double *alpha0 = new double;
double *omega0 = new double;
*rho0 = 1.0;
*alpha0 = 1.0;
*omega0 = 1.0;
double *minus_one = new double;
*minus_one = -1.0;
double *nu0, *p0;
int step = 0;
checkCudaErrors(hipMalloc((void **)&nu0, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&p0, sizeof(double)*(size)));
checkCudaErrors(hipMemset(nu0, 0.0, sizeof(double)*(size)));
checkCudaErrors(hipMemset(p0, 0.0, sizeof(double)*(size)));
double *rhoK = new double;
double *alphaK = new double;
double *omegaK = new double;
double *betaK = new double;
double *pK, *nuK, *sK, *tK, *xK, *rK;
checkCudaErrors(hipMalloc((void **)&pK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&nuK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&sK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&tK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&xK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&rK, sizeof(double)*(size)));
double *temp1, *temp2, *temp3;
checkCudaErrors(hipMalloc((void **)&temp1, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&temp2, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&temp3, sizeof(double)*(size)));
//double * NegOmega=new double;
double * temp_var_1=new double;
double * temp_var_2 = new double;
double * checking=new double[size];
//*NegOmega = -(*omega0);
//1
do
{
hipblasDdot(cublasHandle, size, r0, 1, rT, 1, rhoK);
//2
*betaK = (*rhoK / *rho0) * (*alpha0 / *omega0);
//cout <<"OUT: "<< *betaK;
//3
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (nu0, -(*omega0), size, temp1);
hipblasDaxpy(cublasHandle, size, one, p0, 1, temp1, 1);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp1, *betaK, size, temp1);
hipblasDaxpy(cublasHandle, size, one, rT, 1, temp1, 1);
hipblasDcopy(cublasHandle, size, temp1, 1, pK, 1);
Debuger(pK, size,checking);
//4
GPU_mult(pK, size, temp, diag_gpu, gpu, d_val, d_col, d_row, nuK, deviceProp.maxThreadsPerBlock);
Debuger(nuK, size,checking);
//5
hipblasDdot(cublasHandle, size, r0, 1, nuK, 1, temp_var_1);
*alphaK = (*rhoK) / (*temp_var_1);
//6
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (pK, *alphaK, size, temp1);
hipDeviceSynchronize();
hipblasDaxpy(cublasHandle, size, one, x0, 1, temp1, 1);
hipblasDcopy(cublasHandle, size, temp1, 1, h, 1);
Debuger(h, size,checking);
//7
//hipblasDaxpy(cublasHandle, size, minus_one, h, 1, xK, 1);
//8
Debuger(nuK,size,checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (nuK, -(*alphaK), size, temp1);
hipDeviceSynchronize();
Debuger(rT, size, checking);
Debuger(temp1, size, checking);
hipblasDaxpy(cublasHandle, size, one, rT, 1, temp1, 1);
Debuger(temp1, size, checking);
hipblasDcopy(cublasHandle, size, temp1, 1, sK, 1);
Debuger(sK, size,checking);
//9
GPU_mult(sK, size, temp, diag_gpu, gpu, d_val, d_col, d_row, tK, deviceProp.maxThreadsPerBlock);
//Debuger(xK, size,checking);
Debuger(tK,size,checking);
//10
hipblasDdot(cublasHandle, size, tK, 1, sK, 1, temp_var_1);
//Debuger(temp_var_1,size,checking);
hipblasDdot(cublasHandle, size, tK, 1, tK, 1, temp_var_2);
//Debuger(temp_var_2, size, checking);
*omegaK = *temp_var_1 / *temp_var_2;
//11
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (sK, *omegaK, size, temp1);
hipDeviceSynchronize();
hipblasDaxpy(cublasHandle, size, one, h, 1, temp1, 1);
hipblasDcopy(cublasHandle, size, temp1, 1, xK, 1);
Debuger(xK,size,checking);
//12
/*hipblasDaxpy(cublasHandle, size, minus_one, xK, 1, x0, 1);
hipblasDnrm2(cublasHandle, size, x0, 1, temp_var_1);*/
//13
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (tK, -(*omegaK), size, temp1);
hipblasDaxpy(cublasHandle, size, one, sK, 1, temp1, 1);
hipblasDcopy(cublasHandle, size, temp1, 1, rK, 1);
Debuger(rK, size, checking);
hipblasDnrm2(cublasHandle, size, rK, 1, temp_var_1);
hipblasDnrm2(cublasHandle, size, r0,1,temp_var_2);
//if(step%20==0)
//cout <<"NEVAZKA = "<< *temp_var_1/ *temp_var_2 << endl;
if (*temp_var_1 / *temp_var_2<= APPROX)
{
cout <<endl<< "NEVAZKA = " << *temp_var_1/ *temp_var_2 << endl;
checkCudaErrors(hipMemcpy(final_result,xK, sizeof(double)*size, hipMemcpyDefault));
break;
}
hipblasDcopy(cublasHandle, size, rK, 1, rT, 1);
hipblasDcopy(cublasHandle, size, xK, 1, x0, 1);
hipblasDcopy(cublasHandle, size, pK, 1, p0, 1);
hipblasDcopy(cublasHandle, size, nuK, 1, nu0, 1);
*rho0 = *rhoK;
*omega0 = *omegaK;
*alpha0 = *alphaK;
step++;
//cout <<"Step = "<< step << endl;
} while (step<=STEP_LIMIT);
//Verification
GPU_mult(xK, size, temp, diag_gpu, gpu, d_val, d_col, d_row, temp1, deviceProp.maxThreadsPerBlock);
hipblasDaxpy(cublasHandle,size,minus_one,r0,1,temp1,1);
hipblasDnrm2(cublasHandle, size, temp1, 1, temp_var_1);
cout <<endl<< "VERIFICATION: " << *temp_var_1 << endl;
checkCudaErrors(hipFree(r0));
checkCudaErrors(hipFree(rK));
checkCudaErrors(hipFree(x0));
checkCudaErrors(hipFree(xK));
checkCudaErrors(hipFree(pK));
checkCudaErrors(hipFree(p0));
//checkCudaErrors(hipFree(right_part_gpu));
checkCudaErrors(hipFree(nuK));
checkCudaErrors(hipFree(nu0));
checkCudaErrors(hipFree(temp1));
checkCudaErrors(hipFree(temp2));
checkCudaErrors(hipFree(temp3));
checkCudaErrors(hipFree(sK));
checkCudaErrors(hipFree(tK));
checkCudaErrors(hipFree(h));
checkCudaErrors(hipblasDestroy(cublasHandle));
checkCudaErrors(hipsparseDestroy(cusparseHandle));
cout <<endl<< "STEPS: = " << step << endl;
delete temp_var_1;
delete temp_var_2;
return final_result;
}
double* gpu_solver::GPU_stab_bi_gradient_solver_with_preconditioner(double *val, int *col, int *row, double *right, double *diag, int nnz, int size)
{
//Count amount of devices
int gpu;
checkCudaErrors(hipGetDeviceCount(&gpu));
double *test = new double[size];
//Arrays for devices
double ** d_val = new double *[gpu];
int ** d_col = new int *[gpu];
int ** d_row = new int *[gpu];
//Array with devicearray's sizes
int *temp = new int[gpu];
temp = split(gpu, val, col, row, size, nnz, d_val, d_col, d_row);
//int step = 0;
bool flag = true;
double *minus = new double;
double *zero = new double;
double *one = new double;
*minus = -1.0;
*zero = 0.0;
*one = 1.0;
//Initialization of diag
double* final_result = new double[size];
//Initialization of all variables
checkCudaErrors(hipSetDevice(0));
hipblasHandle_t cublasHandle = NULL;
hipsparseHandle_t cusparseHandle = NULL;
checkCudaErrors(hipsparseCreate(&cusparseHandle));
hipsparseMatDescr_t matDescr = NULL;
checkCudaErrors(hipsparseCreateMatDescr(&matDescr));
checkCudaErrors(hipblasCreate(&cublasHandle));
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0));
double *x0;
checkCudaErrors(hipMalloc((void **)&x0, sizeof(double)*(size)));
checkCudaErrors(hipMemset(x0, 0, sizeof(double)*(size)));
double *r0, *rT;
double * diag_gpu;
double * h;
checkCudaErrors(hipMalloc((void **)&r0, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&rT, sizeof(double)*(size)));
//checkCudaErrors(hipMalloc((void **)&right_part_gpu, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&diag_gpu, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&h, sizeof(double)*(size)));
checkCudaErrors(hipMemcpy(diag_gpu, diag, sizeof(double)*(size), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(r0, right, sizeof(double)*(size), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(rT, right, sizeof(double)*(size), hipMemcpyHostToDevice));
double *rho0 = new double;
double *alpha0 = new double;
double *omega0 = new double;
*rho0 = 1.0;
*alpha0 = 1.0;
*omega0 = 1.0;
double *minus_one = new double;
*minus_one = -1.0;
double *nu0, *p0;
int step = 0;
checkCudaErrors(hipMalloc((void **)&nu0, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&p0, sizeof(double)*(size)));
checkCudaErrors(hipMemset(nu0, 0, sizeof(double)*(size)));
checkCudaErrors(hipMemset(p0, 0, sizeof(double)*(size)));
double *rhoK = new double;
double *alphaK = new double;
double *omegaK = new double;
double *betaK = new double;
double *pK, *nuK, *sK, *tK, *xK, *rK;
checkCudaErrors(hipMalloc((void **)&pK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&nuK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&sK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&tK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&xK, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&rK, sizeof(double)*(size)));
double *temp1, *temp2, *temp3,*temp4,*temp5;
checkCudaErrors(hipMalloc((void **)&temp1, sizeof(double)*(size))); // for many
checkCudaErrors(hipMalloc((void **)&temp2, sizeof(double)*(size))); //K^(-1)
checkCudaErrors(hipMalloc((void **)&temp3, sizeof(double)*(size))); //z
checkCudaErrors(hipMalloc((void **)&temp4, sizeof(double)*(size))); //ddot tk*K^-1
checkCudaErrors(hipMalloc((void **)&temp5, sizeof(double)*(size))); //ddot sk*^(-1)
double * temp_var_1 = new double;
double * temp_var_2 = new double;
diag_revers << <10000, deviceProp.maxThreadsPerBlock >> > (diag_gpu, temp2, size);
double *checking = new double[size];
do
{
//1
hipblasDdot(cublasHandle, size, r0, 1, rT, 1, rhoK);
//2
*betaK = (*rhoK / *rho0) * (*alpha0 / *omega0);
//cout <<"OUT: "<< *betaK;
//3
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (nu0, -(*omega0), size, temp1);
Debuger(temp1, size, checking);
hipblasDaxpy(cublasHandle, size, one, p0, 1, temp1, 1);
Debuger(temp1, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp1, *betaK, size, temp1);
hipblasDaxpy(cublasHandle, size, one, rT, 1, temp1, 1);
hipblasDcopy(cublasHandle, size, temp1, 1, pK, 1);
Debuger(pK, size, checking);
//4
not_full_scalar << <10000, deviceProp.maxThreadsPerBlock >> > (temp2,pK,size,temp3);
//Debuger(temp3, size,checking);
Debuger(temp3, size, checking);
//5
GPU_mult(temp3, size, temp, diag_gpu, gpu, d_val, d_col, d_row, nuK, deviceProp.maxThreadsPerBlock);
Debuger(nuK, size,checking);
Debuger(nuK, size, checking);
//6
hipblasDdot(cublasHandle, size, r0, 1, nuK, 1, temp_var_1);
*alphaK = (*rhoK) / (*temp_var_1);
//7
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp3, *alphaK, size, temp1);
Debuger(temp1, size, checking);
hipblasDaxpy(cublasHandle, size, one, x0, 1, temp1, 1);
hipblasDcopy(cublasHandle, size, temp1, 1, h, 1);
Debuger(h, size, checking);
//7
//hipblasDaxpy(cublasHandle, size, minus_one, h, 1, xK, 1);
//9
//Debuger(temp1, size);
//Debuger(temp1, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (nuK, -(*alphaK), size, temp1);
//Debuger(temp1, size);
Debuger(temp1, size, checking);
Debuger(rT, size, checking);
hipblasDaxpy(cublasHandle, size, one, rT, 1, temp1, 1);
//Debuger(temp1, size);
//Debuger(sK, size, checking);
Debuger(temp1,size,checking);
hipblasDcopy(cublasHandle, size, temp1, 1, sK, 1);
Debuger(sK, size,checking);
//10
Debuger(sK, size,checking);
not_full_scalar << <10000, deviceProp.maxThreadsPerBlock >> > (temp2, sK, size, temp3);
Debuger(temp3, size,checking);
//11
GPU_mult(temp3, size, temp, diag_gpu, gpu, d_val, d_col, d_row, tK, deviceProp.maxThreadsPerBlock);
Debuger(tK, size,checking);
//12
not_full_scalar << <10000, deviceProp.maxThreadsPerBlock >> > (temp2, tK, size, temp4);
not_full_scalar << <10000, deviceProp.maxThreadsPerBlock >> > (temp2, sK, size, temp5);
Debuger(temp4, size, checking);
Debuger(temp5, size, checking);
hipblasDdot(cublasHandle, size, temp4, 1, temp5, 1, temp_var_1);
hipblasDdot(cublasHandle, size, temp4, 1, temp4, 1, temp_var_2);
*omegaK = *temp_var_1 / *temp_var_2;
//13
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock>> > (temp3, *omegaK, size, temp1);
Debuger(temp1, size, checking);
hipblasDaxpy(cublasHandle, size, one, h, 1, temp1, 1);
Debuger(temp1, size, checking);
hipblasDcopy(cublasHandle, size, temp1, 1, xK, 1);
Debuger(xK, size,checking);
//12
/*hipblasDaxpy(cublasHandle, size, minus_one, xK, 1, x0, 1);
hipblasDnrm2(cublasHandle, size, x0, 1, temp_var_1);*/
//15
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock>> > (tK, -(*omegaK), size, temp1);
Debuger(temp1, size, checking);
hipblasDaxpy(cublasHandle, size, one, sK, 1, temp1, 1);
Debuger(temp1, size,checking);
hipblasDcopy(cublasHandle, size, temp1, 1, rK, 1);
Debuger(rK, size,checking);
hipblasDnrm2(cublasHandle, size, rK, 1, temp_var_1);
hipblasDnrm2(cublasHandle, size, r0, 1, temp_var_2);
//if(step%20==0)
//cout <<"NEVAZKA = "<< *temp_var_1 / *temp_var_2 << endl;
//if (*temp_var_1/ *temp_var_2 <= APPROX || *omegaK<=APPROX)
if (*temp_var_1 / *temp_var_2 <= APPROX )
{
//cout << endl << "NEVAZKA = " << *temp_var_1/ *temp_var_2<< endl;
checkCudaErrors(hipMemcpy(final_result, xK, sizeof(double)*size, hipMemcpyDefault));
break;
}
hipblasDcopy(cublasHandle, size, rK, 1, rT, 1);
hipblasDcopy(cublasHandle, size, xK, 1, x0, 1);
hipblasDcopy(cublasHandle, size, pK, 1, p0, 1);
hipblasDcopy(cublasHandle, size, nuK, 1, nu0, 1);
*rho0 = *rhoK;
*omega0 = *omegaK;
*alpha0 = *alphaK;
step++;
//cout <<"Step = "<< step << endl;
} while (step <= STEP_LIMIT);
//Verification
GPU_mult(xK, size, temp, diag_gpu, gpu, d_val, d_col, d_row, temp1, deviceProp.maxThreadsPerBlock);
hipblasDaxpy(cublasHandle, size,minus_one, r0, 1, temp1, 1);
hipblasDnrm2(cublasHandle, size, temp1, 1, temp_var_1);
//cout << endl << "VERIFICATION: " << *temp_var_1 << endl;
checkCudaErrors(hipFree(r0));
checkCudaErrors(hipFree(rK));
checkCudaErrors(hipFree(x0));
checkCudaErrors(hipFree(xK));
checkCudaErrors(hipFree(pK));
checkCudaErrors(hipFree(p0));
//checkCudaErrors(hipFree(right_part_gpu));
checkCudaErrors(hipFree(nuK));
checkCudaErrors(hipFree(nu0));
checkCudaErrors(hipFree(temp1));
checkCudaErrors(hipFree(temp2));
checkCudaErrors(hipFree(temp3));
checkCudaErrors(hipFree(sK));
checkCudaErrors(hipFree(temp4));
checkCudaErrors(hipFree(temp5));
checkCudaErrors(hipFree(tK));
checkCudaErrors(hipFree(h));
checkCudaErrors(hipblasDestroy(cublasHandle));
checkCudaErrors(hipsparseDestroy(cusparseHandle));
//cout << endl << "STEPS: = " << step << endl;
delete temp_var_1;
delete temp_var_2;
return final_result;
}
void gpu_solver::matrix_eigenvalues(double *val, int *col, int *row, double *diag, int non_zero, int size, double * eigen_values, double** eigen_vectors,double * b, double *val2, int *col2, int *row2, double *diag2, int non_zero2, int size2, int amount_ev)
{
int gpu;
checkCudaErrors(hipGetDeviceCount(&gpu));
double ** d_val = new double *[gpu];
int ** d_col = new int *[gpu];
int ** d_row = new int *[gpu];
double ** d_val2 = new double *[gpu];
int ** d_col2 = new int *[gpu];
int ** d_row2 = new int *[gpu];
//Array with devicearray's sizes
int *temp = new int[gpu];
int *temp_M = new int[gpu];
temp = split(gpu, val, col, row, size, non_zero, d_val, d_col, d_row);
temp_M = split(gpu, val2, col2, row2, size2, non_zero2, d_val2, d_col2, d_row2);
checkCudaErrors(hipSetDevice(0));
hipblasHandle_t cublasHandle = NULL;
hipsparseHandle_t cusparseHandle = NULL;
checkCudaErrors(hipsparseCreate(&cusparseHandle));
hipsparseMatDescr_t matDescr = NULL;
checkCudaErrors(hipsparseCreateMatDescr(&matDescr));
checkCudaErrors(hipblasCreate(&cublasHandle));
hipDeviceProp_t deviceProp;
double *diag_gpu;
checkCudaErrors(hipMalloc((void **)&diag_gpu, sizeof(double)*(size)));
checkCudaErrors(hipMemcpy(diag_gpu, diag, sizeof(double)*(size), hipMemcpyHostToDevice));
double *diag_gpu2;
checkCudaErrors(hipMalloc((void **)&diag_gpu2, sizeof(double)*(size2)));
checkCudaErrors(hipMemcpy(diag_gpu2, diag2, sizeof(double)*(size2), hipMemcpyHostToDevice));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0));
double* nu_vec,
double * alpha_vec;
double * beta_vec;
double* w_vec ; // myvector
double* alpha_j=new double;
double* t=new double;
double * beta_new;
double * checking = new double[size];
double * matrix_w;
double * multi_temp;
double * matr_dense;
double * eigen_values_gpu;
double * eigenvectors_gpu;
int * CONVERGE_AMOUNT_CPU = new int;
*CONVERGE_AMOUNT_CPU = 0;
int * CONVERGE_AMOUNT;
checkCudaErrors(hipMalloc((void **)&CONVERGE_AMOUNT, sizeof(int)));
checkCudaErrors(hipMemset(CONVERGE_AMOUNT, 0, sizeof(int)));
//*CONVERGE_AMOUNT = 0; //current converge
double * converge_eig_vec; //result converge vectors
double * converge_eig_val; //result converge values
double *converge_eig_val_numb_T; //temp for ind of converge values in array
double *converge_temp ;
//*converge_temp = 0;
checkCudaErrors(hipMalloc((void **)&converge_temp, sizeof(double)));
checkCudaErrors(hipMemset(converge_temp, 0.0, sizeof(double)));
double* temp_ev; //temp
checkCudaErrors(hipMalloc((void **)&temp_ev, sizeof(double)*(amount_ev)));
checkCudaErrors(hipMalloc((void **)&converge_eig_val_numb_T, sizeof(double)*(amount_ev)));
checkCudaErrors(hipMalloc((void **)&converge_eig_vec, sizeof(double)*(size)*size));
checkCudaErrors(hipMalloc((void **)&converge_eig_val, sizeof(double)*(amount_ev)));
checkCudaErrors(hipMalloc((void **)&eigenvectors_gpu, sizeof(double)*(amount_ev)*(size)));
checkCudaErrors(hipMalloc((void **)&eigen_values_gpu, sizeof(double)*(amount_ev)));
checkCudaErrors(hipMalloc((void **)&matr_dense, sizeof(double)*(amount_ev)*amount_ev));
checkCudaErrors(hipMemset(matr_dense, 0.0, sizeof(double)*(amount_ev)*(amount_ev)));
checkCudaErrors(hipMalloc((void **)&matrix_w, sizeof(double)*(size)*(amount_ev)));
checkCudaErrors(hipMalloc((void **)&multi_temp, sizeof(double)*(size)*(amount_ev)));
checkCudaErrors(hipMemset(matrix_w, 0.0, sizeof(double)*(size*amount_ev)));
checkCudaErrors(hipMalloc((void **)&nu_vec, sizeof(double)*(size)));
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipMemset(nu_vec, 0, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&alpha_vec, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&beta_new, sizeof(double)));
checkCudaErrors(hipMemset(alpha_vec, 0, sizeof(double)*(size)));
checkCudaErrors(hipMemset(beta_new, 0.0, sizeof(double)));
checkCudaErrors(hipMalloc((void **)&beta_vec, sizeof(double)*(amount_ev-1)));
checkCudaErrors(hipMemset(beta_vec, 0.0, sizeof(double)*(amount_ev-1)));
checkCudaErrors(hipMalloc((void **)&w_vec, sizeof(double)*(size))); // myvector
double *right_gpu;
double * right_gpu_input_once;
checkCudaErrors(hipMalloc((void **)&right_gpu_input_once, sizeof(double)*(size)));
checkCudaErrors(hipMemcpy(right_gpu_input_once, b, sizeof(double)*(size), hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&right_gpu, sizeof(double)*(size)));
checkCudaErrors(hipMemcpy(right_gpu, b, sizeof(double)*(size), hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(w_vec, b, sizeof(double)*(size), hipMemcpyHostToDevice));
double *temp1_b;
checkCudaErrors(hipMalloc((void **)&temp1_b, sizeof(double)*(size)));
//checkCudaErrors(hipMemset(w_vec, 2.0, sizeof(double)*(size)));
// matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, 0, w_vec, size);
double *tmp5;
checkCudaErrors(hipMalloc((void **)&tmp5, sizeof(double)));
double *tmp6;
checkCudaErrors(hipMalloc((void **)&tmp6, sizeof(double)));
double * temp2;
checkCudaErrors(hipMalloc((void **)&temp2, sizeof(double)*(size)));
double * temp3;
checkCudaErrors(hipMalloc((void **)&temp3, sizeof(double)*(size)));
double * temp6;
checkCudaErrors(hipMalloc((void **)&temp6, sizeof(double)*(size)));
double *tmp1=new double;
double *tmp_2 = new double;
double *tmp3 = new double;
int* tmp_int = new int;
bool main_flag = false;
double * zero_f = new double;
*zero_f = 0;
double*tmp2 = new double;
double * one = new double;
*one = 1;
double *b_i = new double;
double * b_i_old = new double;
*b_i_old = 0;
bool exit = false;
double *x_temp = new double[size];
double * temp1; //right_gpu /aka first vector
checkCudaErrors(hipMalloc((void **)&temp1, sizeof(double)*(size)));
double *x_temp_tld = new double[size]; //x^~
int step = 0;
double * last_beta = new double;
bool first_flag = false;
checkCudaErrors(hipMalloc((void **)&x_temp, sizeof(double)*(size)));
checkCudaErrors(hipMalloc((void **)&x_temp_tld, sizeof(double)*(size)));
double * minus_one = new double;
*minus_one = -1.0;
while (*CONVERGE_AMOUNT_CPU != amount_ev)
{
cout << endl << "THE " << step << " STAGE IS RINNING!" << endl;
//checkCudaErrors(hipDeviceSynchronize());
if (first_flag != false)
{
bool flag_cor;
//Gramma-Shmidt procedure
for (int j = 0; j < *CONVERGE_AMOUNT_CPU; j++)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, j, size, temp1);
checkCudaErrors(hipblasDdot(cublasHandle, size, temp1, 1, right_gpu_input_once, 1, tmp1));
checkCudaErrors(hipblasDdot(cublasHandle, size, temp1, 1, temp1, 1, tmp2));
*tmp1 = (-1)*(*tmp1) / (*tmp2);
checkCudaErrors(hipblasDaxpy(cublasHandle, size, tmp1, temp1, 1, right_gpu, 1));
Debuger(temp1,size,checking);
}
flag_cor = true;
for (int j = 0; j < *CONVERGE_AMOUNT_CPU; j++)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, j, size, temp1);
checkCudaErrors(hipblasDdot(cublasHandle, size, temp1, 1, right_gpu, 1, tmp1));
if (abs(*tmp1) > SK_Nev)
{
flag_cor = false;
}
//cout << "Checking: " << abs(*tmp1) << endl;
}
if (flag_cor == false)
{
cout <<endl<< "BAD G_SH" << endl;
}
else { cout<<endl << "CORRECT G_SH" << endl; }
// } while (flag_cor != true);
// matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, 0, right_gpu, size);
}
// all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (size, temp1);
/* Debuger(right_gpu, size, checking);
Debuger(temp1, size, checking);*/
GPU_mult(right_gpu, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1, deviceProp.maxThreadsPerBlock);
hipblasDdot(cublasHandle, size, temp1, 1, right_gpu, 1, tmp1);
*tmp_2 = sqrt(*tmp1);
Debuger(temp1, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (right_gpu, 1 / (*tmp_2), size, w_vec); //first vector of Q matrix
Debuger(w_vec, size, checking);
matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, 0, w_vec, size);
Debuger(matrix_w, size, checking);
//x^-
double * proc_temp = new double[size];
*one = 1;
bool flag_cor = true;
for (int i = 0; i < amount_ev; i++) //
{
// if(exit==false)
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, i, size, w_vec);
Debuger(w_vec, size, checking);
GPU_mult(w_vec, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1, deviceProp.maxThreadsPerBlock); // M_xi
matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (multi_temp, i, temp1, size);
Debuger(temp1, size, checking);
checkCudaErrors(hipMemcpy(proc_temp, temp1, sizeof(double)*size, hipMemcpyDeviceToHost));
proc_temp = GPU_stab_bi_gradient_solver_with_preconditioner(val, col, row, proc_temp, diag, non_zero, size); // x^-
checkCudaErrors(hipMemcpy(x_temp, proc_temp, sizeof(double)*(size), hipMemcpyHostToDevice));
hipblasDdot(cublasHandle, size, temp1, 1, x_temp, 1, tmp1); //alpha
element << <1, 1 >> > (alpha_vec, i, *tmp1);//alpha_i
// if (i != amount_ev - 1)
double * checking1 = new double[size*(i + 1)];
*tmp1 = -*tmp1; //-alpha_i
Debuger(temp1, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (w_vec, (*tmp1), size, temp1);
checkCudaErrors(hipblasDaxpy(cublasHandle, size, one, temp1, 1, x_temp, 1));//end
if (i != 0)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, i - 1, size, temp2);
// (*b_i_old) = -(*b_i_old);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp2, -(*b_i_old), size, temp2); //
Debuger(temp2, size, checking);
checkCudaErrors(hipblasDaxpy(cublasHandle, size, one, temp2, 1, x_temp, 1)); //x_temp = x^~i
copy_v << <1000, deviceProp.maxThreadsPerBlock >> > (x_temp, size, temp6);
//Debuger(temp2, size, checking);
for (int s = 0; s < i; s++) //full ortagonalization G_SH
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (multi_temp, s, size, temp3);
checkCudaErrors(hipblasDdot(cublasHandle, size, temp3, 1, x_temp, 1, tmp3));
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, s, size, temp3);
(*tmp3) = -*tmp3;
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp3, *tmp3, size, temp3);
checkCudaErrors(hipblasDaxpy(cublasHandle, size, one, temp3, 1, x_temp, 1));
}
for (int j = 0; j < *CONVERGE_AMOUNT_CPU; j++)
{
//temp3
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, j, size, temp3);
GPU_mult(temp3, size, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp2, deviceProp.maxThreadsPerBlock);
checkCudaErrors(hipblasDdot(cublasHandle, size, temp6, 1, temp2, 1, tmp1));
*tmp1 = -*tmp1;
checkCudaErrors(hipblasDaxpy(cublasHandle, size, tmp1, temp2, 1, x_temp, 1));
}
}
GPU_mult(x_temp, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1, deviceProp.maxThreadsPerBlock);
checkCudaErrors(hipblasDdot(cublasHandle, size, temp1, 1, x_temp, 1, tmp1));
Debuger(temp1, size, checking);
Debuger(x_temp, size, checking);
if (i != amount_ev - 1)
{
*b_i_old = sqrt(*tmp1);
element << <1, 1 >> > (beta_vec, i, *b_i_old);
//Debuger(x_temp, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (x_temp, 1 / (*b_i_old), size, x_temp); //
Debuger(x_temp, size, checking);
flag_cor = true;
for (int j = 0; j < i /*+ 1*/; j++)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (multi_temp, j, size, temp1);
checkCudaErrors(hipblasDdot(cublasHandle, size, temp1, 1, x_temp, 1, tmp1));
if (abs(*tmp1) > SK_Nev)
{
flag_cor = false;
cout << endl << "FUck in Lanc " << j;
}
// cout << "Checking: " << abs(*tmp1) << endl;
}
for (int j = 0; j < *CONVERGE_AMOUNT_CPU /*+ 1*/; j++)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_val, j, size, temp1);
GPU_mult(temp1, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1, deviceProp.maxThreadsPerBlock);
checkCudaErrors(hipblasDdot(cublasHandle, size, temp1, 1, x_temp, 1, tmp1));
if (abs(*tmp1) > SK_Nev)
{
flag_cor = false;
cout << endl << "FUck in eigenvec " << j;
}
// cout << "Checking: " << abs(*tmp1) << endl;
}
if (flag_cor == true)
cout <<endl<< "CHECKING COMPLETED AT STAGE " << i << endl;
else
{
cout <<endl<< "CHECKING is not COMPLETED AT STAGE " << i << endl;
// exit = true;
}
// if (exit == false)
matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, i + 1, x_temp, size);
}
else {
*last_beta = sqrt(*tmp1);
}
}
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (amount_ev*amount_ev, matr_dense);
hipsolverDnHandle_t cusolverH = NULL;
int info_gpu = 0;
checkCudaErrors(hipsolverDnCreate(&cusolverH));
int lwork = 0;
const int lda = amount_ev /*i + 1*/;
connect_diag_matr << <10000, deviceProp.maxThreadsPerBlock >> > (matr_dense, alpha_vec, beta_vec,amount_ev);
double* checking_mat = new double[(amount_ev*amount_ev)];
// cout << "MATRIX T: " << endl;
// Debuger_for_matr(matr_dense, amount_ev, amount_ev, checking_mat);
// cout << endl;
hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR; // compute eigenvalues and eigenvectors.
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER;
checkCudaErrors(hipsolverDnDsyevd_bufferSize( //allocated memory for buffer
cusolverH,
jobz,
uplo,
amount_ev, /*i + 1,*/
matr_dense,
lda,
eigen_values_gpu,
&lwork
));
int *devInfo = NULL;
checkCudaErrors(hipMalloc((void**)&devInfo, sizeof(int)));
double *d_work = NULL;
checkCudaErrors(hipMalloc((void**)&d_work, sizeof(double)*lwork));
checkCudaErrors(hipsolverDnDsyevd( //solver
cusolverH,
jobz,
uplo,
amount_ev,// i + 1,
matr_dense, //eigenvectors for T
lda,
eigen_values_gpu, //eigenvalues for T
d_work,
lwork,
devInfo
));
checkCudaErrors(hipDeviceSynchronize());
ret_val << <1, 1 >> > (beta_vec,amount_ev-2,tmp5);
double * a = new double[amount_ev];
Debuger(beta_vec,amount_ev-1,a);
double * ch3 = new double;
double* checking_matr = new double[(amount_ev*amount_ev)];
cout << endl << "TEMPORARY EIGEN VALUES:" << endl;
double*ch_v = new double[amount_ev];
Debuger(eigen_values_gpu, amount_ev, ch_v);
for (int i = 0; i < amount_ev; i++)
cout << "Value: " << 1/ch_v[i] << endl;
cout << endl << "TEMPORARY EIGEN VECTOR:" << endl;
Debuger_for_matr(matr_dense, amount_ev, amount_ev, checking_matr);
Debuger(tmp5, 1, ch3);
cout << endl<<"BETA_q=" << *last_beta << endl;
element << <1, 1 >> > (tmp5, 0, *last_beta);
proverb << <1, 1 >> > (matr_dense,tmp5,amount_ev, eps_for_b,CONVERGE_AMOUNT, converge_eig_val_numb_T, converge_temp);
//converge_eig_val_numb_T - array of places of converged current values in eigen_values_gpu
//converge_temp - number of converged values on current stage
//checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(tmp3, converge_temp, sizeof(double), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(CONVERGE_AMOUNT_CPU, CONVERGE_AMOUNT, sizeof(int), hipMemcpyDeviceToHost));
cout <<endl<<"Converged values "<< *tmp3 << endl;
for (int i = 0; i < *tmp3; i++)
{
add_to_converge_values << <1, 1 >> > (converge_eig_val_numb_T, i, eigen_values_gpu,converge_eig_val, CONVERGE_AMOUNT,tmp5);
checkCudaErrors(hipMemcpy(tmp1, tmp5, sizeof(double), hipMemcpyDeviceToHost));
/*ret_val << <1, 1 >> > (converge_temp,i,tmp5);
checkCudaErrors(hipMemcpy(tmp1, tmp5, sizeof(double), hipMemcpyDeviceToHost));
ret_val << <1, 1 >> > (eigen_values_gpu,int(*tmp1),tmp6);
element << <1, 1 >> > (converge_eig_val, *CONVERGE_AMOUNT, *tmp6);*/
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (matr_dense, int(*tmp1), amount_ev, temp_ev);
checkCudaErrors(hipblasDgemv(cublasHandle,
HIPBLAS_OP_N,
size,
amount_ev,
one,
matrix_w,
size,
temp_ev,
1,
zero_f,
temp3,
1));
matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, *CONVERGE_AMOUNT_CPU+i, temp3, size);
// first_flag = false;
/* cout << "MATR Q: " << endl;
double* checking_matr = new double[(size*amount_ev)];
Debuger_for_matr(matrix_w,size ,amount_ev, checking_matr);
cout << endl;*/
/*double * ch = new double[size];
Debuger(temp3, size, ch);
for(int i=0;i<size;i++)
cout << ch[i] << " " << endl;*/
}
//All zero
// *converge_temp = 0;
*b_i_old=0;
exit = false;
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (size*amount_ev, matrix_w); //clear lanc vectors matr
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (amount_ev*amount_ev, matr_dense); // clear matr T or eigen vectors matr
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (size*amount_ev, multi_temp); // clear Mx matr
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (amount_ev-1, beta_vec);
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (amount_ev, alpha_vec);
double * ch = new double[amount_ev];
Debuger(converge_eig_val, amount_ev, ch);
for(int i=0;i<*tmp3;i++)
cout << "E_Values on iteration: " << 1 / ch[i] << endl;
checkCudaErrors(hipMemset(converge_temp, 0.0, sizeof(double)));
*CONVERGE_AMOUNT_CPU += *tmp3;
checkCudaErrors(hipMemcpy(CONVERGE_AMOUNT, CONVERGE_AMOUNT_CPU, sizeof(int), hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(CONVERGE_AMOUNT_CPU, CONVERGE_AMOUNT, sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
step++;
if(*tmp3==0)
{
cout << endl << "NO NEW CONVERGED EIGEN VALUES" << endl;
}
else
{ first_flag = true;}
if (step == 10)
break;
// *CONVERGE_AMOUNT_CPU = amount_ev;
}
cout << "STEP: "<<step << endl;
reverse_for_eigen_values_lanc << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_val, amount_ev);
checkCudaErrors(hipMemcpy(eigen_values, converge_eig_val, sizeof(double)*(amount_ev), hipMemcpyDeviceToHost));
//HIPBLAS_OP_N - no N or n
//HIPBLAS_OP_T - yes T or t
cout << "+++++++++FINAL SOLUTION ++++++++" << endl << endl << endl;
double* checking_matr = new double[amount_ev*size];
Debuger_for_matr(converge_eig_vec, size, amount_ev, checking_matr);
double * one_f = new double;
cout << "PROVERB ON RESULT:" << endl;
for (int j = 0; j < amount_ev; j++)
{
*one_f = -eigen_values[j];
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, j, size, temp3);
GPU_mult(temp3, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1_b, deviceProp.maxThreadsPerBlock);
GPU_mult(temp3, size, temp, diag_gpu, gpu, d_val, d_col, d_row, temp2, deviceProp.maxThreadsPerBlock);
checkCudaErrors(hipblasDaxpy(cublasHandle, size, one_f, temp1_b, 1, temp2, 1));//end
checkCudaErrors(hipblasDnrm2(cublasHandle,size,temp2,1,tmp1));
cout << "Nevazka:[" << j << "]= " << *tmp1<<endl;
}
// *one_f = 0.098569;//1.786052;
// cout << endl << "Mathcad check:" << endl;
// cout << "For eigenvalue: " << *one_f << endl;
// *one_f = -*one_f;
// double * test = new double[size];
// double * test_gpu;
// test[0] = -1;//-0.127;
// test[1] = -0.898;//0,006265;
// test[2] = -0.372;//-1;
// test[3] =0;//0.074;
// test[4] =0;//0;
// test[5] =0;//0;
// test[6] =0;//0;
// test[7] =0;//0;
// test[8] =0;//0;
// test[9] = 0.368;//0.569;
// checkCudaErrors(hipMalloc((void **)&test_gpu, sizeof(double)*(size)));
// checkCudaErrors(hipMemcpy(test_gpu, test, sizeof(double)*(size), hipMemcpyHostToDevice));
//// return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (test_gpu, j, size, temp3);
// GPU_mult(test_gpu, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1_b, deviceProp.maxThreadsPerBlock);
// GPU_mult(test_gpu, size, temp, diag_gpu, gpu, d_val, d_col, d_row, temp2, deviceProp.maxThreadsPerBlock);
// checkCudaErrors(hipblasDaxpy(cublasHandle, size, one_f, temp1_b, 1, temp2, 1));//end
// checkCudaErrors(hipblasDnrm2(cublasHandle, size, temp2, 1, tmp1));
// cout << "Nevazka:= " << *tmp1 << endl;
}
void printMatrix(int m, int n, const double*A, int lda, const char* name)
{
for (int row = 0; row < m; row++) {
for (int col = 0; col < n; col++) {
double Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row + 1, col + 1, Areg);
}
}
}
| 11ae1e09e72c9abb4aba61206fa5c6a5c788fe1e.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <malloc.h>
#include <cmath>
#include <ctime>
#include <locale.h>
#include <iostream>
#include <iomanip>
#include <omp.h>
#include <ctype.h>
#include <algorithm>
#include <vector>
#include <assert.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <cuda.h>
#include <cusparse_v2.h>
#include <cublas_v2.h>
#include <cublas_api.h>
#include <cusolverDn.h>
#include <helper_cuda.h>
#include <helper_cuda_drvapi.h>
#include <helper_functions.h>
#include <helper_cusolver.h>
#include <device_launch_parameters.h>
#include "GPU.h"
#define SK_Nev 1.e-6
#define STEP_LIMIT 100000
#define APPROX 1.e-15
#define eps_for_b 1.e-6
//#define MAXITER 200000
//#define MAXRESIDUE 1.e-10
void Debuger_for_matr(double * input, int rows, int columns, double* checking);
void GPU_mult(double* vec, int size, int *nnz, double* diag, int gpu_amount, double **d_A, int **d_B, int ** d_C, double* rezult, int maximumThreads);
int* split(int gpu_amount, double* A, int* B, int* C, int size, int non_zero, double **d_A, int ** d_B, int **d_C);
//void Debuger(double* input, int size);
void Debuger(double * input, int size, double* checking);
void printMatrix(int m, int n, const double*A, int lda, const char* name);
void GPU_mult_for_little_gradient(double* vec, int size, int* nnz, double* diag, int gpu_amount, double** d_val, int** d_col, int** d_row, double* rezult);
void show_eigen_value(double*input, int lanc_count);
//double* GPU_stab_bi_gradient_solver_with_preconditioner(double *val, int *col, int *row, double *right, double *diag, int nnz, int size);
/*ret_val << <1, 1 >> > (converge_temp,i,tmp5);
checkCudaErrors(cudaMemcpy(tmp1, tmp5, sizeof(double), cudaMemcpyDeviceToHost));
ret_val << <1, 1 >> > (eigen_values_gpu,int(*tmp1),tmp6);
element << <1, 1 >> > (converge_eig_val, *CONVERGE_AMOUNT, *tmp6);*/
//__global__ void rand_vec(double* input,int size)
//{
// int i = rand() % size;
// input[i] = 1;
//}
__global__ void copy_v(double* input, int size,double*where)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
where[i] = input[i];
}
__global__ void add_to_converge_values(double* converge_temp,int i,double * eigen_values_gpu,double *converge_eig_val,int * CONVERGE_AMOUNT, double * tmp5)
{
*tmp5 = converge_temp[i];
converge_eig_val[(*CONVERGE_AMOUNT)+i] = eigen_values_gpu[int(converge_temp[i])];
}
__global__ void proverb(double* eigvecT, double *beta_q, int amount_ev,double eps,int * converge_amount,double * converge_val_number,double * converge_temp)
{
/*int i = blockDim.x*blockIdx.x + threadIdx.x;*/
/*if (i < amount_ev)*/
for(int i=0;i<amount_ev;i++)
if (abs(eigvecT[amount_ev*i + (amount_ev - 1)] * (*beta_q)) <= eps)
{
converge_val_number[int(*converge_temp)] = i;
*converge_temp += 1;
//*converge_amount += 1;
}
}
__global__ void vec_mul_number(double* A, double value, int size, double* res)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
res[i] = A[i] * (value);
}
}
__global__ void connect_diag_matr(double* matrix, double * a, double * b, int lanc_amount)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < lanc_amount)
{
matrix[i*lanc_amount + i] = a[i];
if (i != lanc_amount - 1)
{
matrix[i*lanc_amount+i+1] = b[i];
matrix[i*lanc_amount + i + lanc_amount] = b[i];
}
}
}
//__global__ void correlation(double* matrix, double *input_vector, int size, int count)
//{
//
//}
__global__ void reverse_for_eigen_values_lanc(double* input, int size)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
input[i] = 1 / input[i];
}
}
__global__ void matr_add(double* main, int count, double *arr,int size)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
main[count*size + i] = arr[i];
}
__global__ void return_vec(double* main, int count, int size,double * arr)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
arr[i] = main[count*size + i];
}
__global__ void element(double* A, int i, double res)
{
A[i] = res;
}
__global__ void ret_val(double* A, int i, double *res)
{
*res = A[i];
}
__global__ void all_zero(int size, double* res)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
res[i] = 0;
}
}
__global__ void vector_addition(double *input, int size,double* result)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
result[i] += input[i];
}
}
__global__ void not_full_scalar(double* A, double* B, int size, double* res)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
res[i] = A[i] * B[i];
}
}
__global__ void diag_revers(double* diag, double* res, int size)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < size)
{
res[i] = 1/diag[i];
}
}
//void Debuger(double* input, int size,int checking)
//{
// double * test = new double[size];
// checkCudaErrors(cudaMemcpy(test, input, sizeof(double)*(size), cudaMemcpyDeviceToHost));
// for (int i = 0; i<size; i++)
// {
// cout <<endl<< "test[" << i << "] = " << test[i];
// }
// cout << endl;
// delete test;
//}
void Debuger(double * input, int size, double* checking)
{
checkCudaErrors(cudaMemcpy(checking, input, sizeof(double)*(size), cudaMemcpyDeviceToHost));
}
void Debuger_for_matr(double * input, int rows, int columns, double* checking)
{
checkCudaErrors(cudaMemcpy(checking, input, sizeof(double)*(rows)*(columns), cudaMemcpyDeviceToHost));
for (int i = 0; i < columns; i++)
{
cout << "Vector " << "[" << i << "]" << endl;
for (int j = 0; j < rows; j++)
{
cout << checking[i*rows + j] << " ";
}
cout << endl;
}
}
void show_eigen_value(double*input,int lanc_count)
{
double* checking = new double[lanc_count];
checkCudaErrors(cudaMemcpy(checking, input, sizeof(double)*(lanc_count), cudaMemcpyDeviceToHost));
cout << endl << "Eigenvalues: " << endl;
for (int i = 0; i < lanc_count; i++)
cout << "Val ["<<i<<"] = "<<std::setprecision(17)<< checking[i] << endl;
cout << endl;
}
inline double dot_product(double* A, double* B, int size)
{
double rezult = 0;
for (int i = 0; i < size; i++)
{
rezult += (A[i] * B[i]);
}
return rezult;
}
inline void vector_on_number(double* A, double value, int size, double* res)
{
for (int i = 0; i < size; i++)
res[i] = A[i] * value;
}
inline void sum_vector(double* A, double* B, int size, double* res)
{
for (int i = 0; i < size; i++)
{
res[i] = A[i] + B[i];
}
}
inline void raznost_vector(double* A, double* B, int size, double* res)
{
for (int i = 0; i < size; i++)
{
res[i] = A[i] - B[i];
}
}
int return_string(int number, int* C)
{
int i = 0;
while (C[i] <= number)
i++;
return i;
}
int* split(int gpu_amount, double* val, int* col, int* row, int size, int non_zero, double **d_val, int ** d_col, int **d_row) // Êîñòëÿâî
{
int mod = non_zero / gpu_amount; // óõîäèò íà âñå
int rest = non_zero - mod*(gpu_amount - 1); //óõîäèò íà ïîñëåäíþþ
int first_position;
int last_position;
int first_string;
int last_string;
double *val_;
int *col_;
int *row_;
int *temp = new int[gpu_amount];
int nsize;
#if CHECKER
cout << endl << "CSR:" << endl;
for (int i = 0; i < non_zero; i++)
{
cout << val[i] << " ";
}
cout << endl;
for (int i = 0; i < non_zero; i++)
{
cout << col[i] << " ";
}
cout << endl;
for (int i = 0; i < size + 1; i++)
{
cout << row[i] << " ";
}
cout << endl;
#endif
for (int number = 0; number < gpu_amount; number++)
{
if (number == gpu_amount - 1)
{
int in1 = 0;
int in2 = 0;
first_position = number*mod;// n
last_position = non_zero - 1;//k
first_string = return_string(number*mod, row) - 1; //i
last_string = return_string(non_zero - 1, row) - 1;//j
nsize = rest + first_string + size - 1 - last_string;
val_ = new double[nsize]; // definition
for (int i = 0; i < nsize; i++)
{
if (i < first_string)
{
val_[i] = 0;
}
else
{
val_[i] = val[first_position + in1];
in1++;
}
}
//memcpy(&A_[first_string],&A[first_position],sizeof(double)*(rest));
col_ = new int[nsize];
for (int i = 0; i < nsize; i++)
{
if (i < first_string)
{
col_[i] = i;
}
else
{
col_[i] = col[first_position + in2];
in2++;
}
}
//memcpy(&B_[first_string], &B[first_position], sizeof(double)*(rest));
row_ = new int[size + 1];
for (int i = 0; i < first_string; i++) //0123..C..000
row_[i] = i;
for (int count = first_string; count <= last_string; count++)
{
row_[count] = row[count] - first_position + first_string;
if (row[count] - first_position < 0) row_[count] = first_string;
}
row_[size] = nsize;
}
else
{
int in1 = 0;
int in2 = 0;
first_position = number*mod;// n
last_position = (number + 1)*mod - 1;//k
first_string = return_string(number*mod, row) - 1; //i
last_string = return_string((number + 1)*mod - 1, row) - 1;//j
nsize = mod + first_string + size - 1 - last_string;
val_ = new double[nsize]; // definition
for (int i = 0; i < nsize; i++)
{
if ((i < first_string) || (i > first_string + mod - 1))
{
val_[i] = 0;
}
else
{
val_[i] = val[first_position + in1];
in1++;
}
}
//memcpy(&A_[first_string], &A[first_position], sizeof(double)*(mod));
col_ = new int[nsize];
int inn = 1;
for (int i = 0; i < nsize; i++)
{
if (i < first_string)
{
col_[i] = i;
}
else if (i < first_string + mod)
{
col_[i] = col[first_position + in2];
in2++;
}
else
{
col_[i] = last_string + inn;
inn++;
}
}
//memcpy(&B_[first_string], &B[first_position], sizeof(double)*(mod));
row_ = new int[size + 1];
for (int i = 0; i < first_string; i++) //0123..C..000
row_[i] = i;
for (int count = first_string; count <= last_string; count++)
{
row_[count] = row[count] - first_position + first_string;
if (row[count] - first_position < 0) row_[count] = first_string;
}
int l = 1;
for (int i = last_string + 1; i < size; i++) //0123..C..n..
{
row_[i] = first_string + last_position - first_position + l;
l++;
}
row_[size] = nsize;
}
#if CHECKER
cout << endl << "Device: " << number << " n: " << first_position << " k: " << last_position << " i: " << first_string << " j: " << last_string << endl;
cout << endl;
for (int i = 0; i < nsize; i++)
{
cout << val_[i] << " ";
}
cout << endl;
for (int i = 0; i < nsize; i++)
{
cout << col_[i] << " ";
}
cout << endl;
for (int i = 0; i < size + 1; i++)
{
cout << row_[i] << " ";
}
cout << endl;
#endif
temp[number] = nsize;
checkCudaErrors(cudaSetDevice(number));
checkCudaErrors(cudaMalloc((void **)&d_val[number], sizeof(double)*nsize));
checkCudaErrors(cudaMalloc((void **)&d_col[number], sizeof(int)*nsize));
checkCudaErrors(cudaMalloc((void **)&d_row[number], sizeof(int)*(size + 1)));
checkCudaErrors(cudaMemcpy(d_val[number], val_, sizeof(double)*nsize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_col[number], col_, sizeof(int)*nsize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_row[number], row_, sizeof(int)*(size + 1), cudaMemcpyHostToDevice));
delete[] val_;
delete[] col_;
delete[] row_;
}
return temp;
}
void GPU_mult(double* vec, int size, int* nnz, double* diag, int gpu_amount, double** d_val, int** d_col, int** d_row, double* rezult, int maximumThreads)
{
double **pipe = new double*[gpu_amount];
for (int i = 0; i < gpu_amount; i++)
{
pipe[i] = new double[size];
}
//size == vec.size()
checkCudaErrors(cudaSetDevice(0));
double *temp_rez;
double *vec_temp;
double * checking = new double[size];
double** rez_p = new double *[gpu_amount];
//double** rez_h = new double *[gpu_amount];
checkCudaErrors(cudaMalloc((void**)&temp_rez,sizeof(double)*size));
checkCudaErrors(cudaMalloc((void**)&vec_temp, sizeof(double)*size));
//checkCudaErrors(cudaMemset(temp_rez, 0.0, size));
all_zero <<<10000, maximumThreads>>> (size,temp_rez);
//Debuger(temp_rez, size, checking);
//checkCudaErrors(cudaMemset(vec_temp, 0.0, size));
all_zero << <10000, maximumThreads>> > (size, vec_temp);
//Debuger(vec_temp, size, checking);
double *one = new double;
*one = 1.0;
double *zero = new double;
*zero = 0.0;
//double *x_d;
omp_set_num_threads(gpu_amount);
double dtime = omp_get_wtime();
#pragma omp parallel for// private(rez_p)
for (int number = 0; number < gpu_amount; number++)
{
cusparseHandle_t handle = NULL;
cusparseMatDescr_t Adescr = NULL;
checkCudaErrors(cudaSetDevice(number));
//checkCudaErrors(cudaMalloc((void **)&x_d, sizeof(double)*size));
//checkCudaErrors(cudaMalloc((void**)&tempnam,sizeof(double)*size));
//checkCudaErrors(cudaMemcpy(x_d, vec, sizeof(double)*size, cudaMemcpyHostToDevice));
checkCudaErrors(cusparseCreate(&handle));
checkCudaErrors(cusparseCreateMatDescr(&Adescr));
checkCudaErrors(cudaMalloc((void **)&rez_p[number], sizeof(double)*size));
checkCudaErrors(cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
size, size, nnz[number], one,
Adescr,
d_val[number],
d_row[number], d_col[number],
vec, zero,
rez_p[number]));
checkCudaErrors(cudaMemcpy(pipe[number], rez_p[number], sizeof(double)*size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(rez_p[number]));
//checkCudaErrors(cudaFree(x_d));
checkCudaErrors(cusparseDestroy(handle));
checkCudaErrors(cusparseDestroyMatDescr(Adescr));
}
// trouble !!!
checkCudaErrors(cudaSetDevice(0));
for (int i = 0; i < gpu_amount; i++)
{
checkCudaErrors(cudaMemcpy(vec_temp, pipe[i], sizeof(double)*size, cudaMemcpyHostToDevice));
//Debuger(vec_temp, size, checking);
//Debuger(temp_rez, size, checking);
vector_addition << <10000, maximumThreads >> > (vec_temp, size, temp_rez);
//Debuger(temp_rez,size,checking);
cudaDeviceSynchronize();
}
/*for (int i = 0; i < size; i++)
{
for (int j = 0; j < gpu_amount; j++)
{
rezult[i] += pipe[j][i];
}
}*/
//vector_addition << <10000, maximumThreads >> > (pipe,size,gpu_amount,rezult);
/*for (int i = 0; i < size; i++)
{
rezult[i] += diag[i] * vec[i];
}*/
cublasHandle_t cublasHandle = NULL;
checkCudaErrors(cublasCreate(&cublasHandle));
checkCudaErrors(cudaSetDevice(0));
not_full_scalar << <10000,maximumThreads >> > (diag,vec,size,vec_temp);
//Debuger(vec_temp, size, checking);
//cudaDeviceSynchronize();
vector_addition << <10000, maximumThreads >> > (vec_temp, size, temp_rez);
//Debuger(temp_rez, size, checking);
//cudaDeviceSynchronize();
checkCudaErrors(cublasDcopy(cublasHandle, size, temp_rez, 1, rezult, 1));
checkCudaErrors(cudaFree(temp_rez));
// Debuger(temp_rez, size, checking);
checkCudaErrors(cudaFree(vec_temp));
//Debuger(temp_rez, size, checking);
//checkCudaErrors(cudaFree(rez_p));
/*for (int i = 0; i < gpu_amount; i++)
{
delete pipe[i];
}
delete[] pipe;*/
// delete vec_temp;
delete zero;
//delete x_d;
delete one;
//delete temp_rez;
delete checking;
delete[] pipe;
delete[] rez_p;
}
void GPU_mult_for_little_gradient(double* vec, int size, int* nnz, double* diag, int gpu_amount, double** d_val, int** d_col, int** d_row, double* rezult)
{
double **pipe = new double*[gpu_amount];
for (int i = 0; i < gpu_amount; i++)
{
pipe[i] = new double[size];
}
//size == vec.size()
//double *temp_rez;
double** rez_p = new double *[gpu_amount];
//checkCudaErrors(cudaMallocManaged((void**)&temp_rez, sizeof(double)*size));
double *one = new double;
*one = 1.0;
double *zero = new double;
*zero = 0.0;
double *x_d;
omp_set_num_threads(gpu_amount);
double dtime = omp_get_wtime();
#pragma omp parallel for// private(rez_p)
for (int number = 0; number < gpu_amount; number++)
{
cusparseHandle_t handle = NULL;
cusparseMatDescr_t Adescr = NULL;
checkCudaErrors(cudaSetDevice(number));
checkCudaErrors(cudaMalloc((void **)&x_d, sizeof(double)*size));
//checkCudaErrors(cudaMalloc((void**)&tempnam,sizeof(double)*size));
checkCudaErrors(cudaMemcpy(x_d, vec, sizeof(double)*size, cudaMemcpyHostToDevice));
checkCudaErrors(cusparseCreate(&handle));
checkCudaErrors(cusparseCreateMatDescr(&Adescr));
checkCudaErrors(cudaMalloc((void **)&rez_p[number], sizeof(double)*size));
checkCudaErrors(cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
size, size, nnz[number], one,
Adescr,
d_val[number],
d_row[number], d_col[number],
x_d, zero,
rez_p[number]));
checkCudaErrors(cudaMemcpy(pipe[number], rez_p[number], sizeof(double)*size, cudaMemcpyDeviceToHost));
//vector_addition << <10000, maximumThreads >> > (rez_p[number], size, number, temp_rez);
checkCudaErrors(cudaFree(rez_p[number]));
checkCudaErrors(cudaFree(x_d));
checkCudaErrors(cusparseDestroy(handle));
checkCudaErrors(cusparseDestroyMatDescr(Adescr));
}
for (int i = 0; i < size; i++)
{
for (int j = 0; j < gpu_amount; j++)
{
rezult[i] += pipe[j][i];
}
}
//vector_addition << <10000, maximumThreads >> > (pipe,size,gpu_amount,rezult);
for (int i = 0; i < size; i++)
{
rezult[i] += diag[i] * vec[i];
}
for (int i = 0; i < gpu_amount; i++)
{
delete pipe[i];
}
delete[] pipe;
delete zero;
delete one;
delete[] rez_p;
}
double* gpu_solver::GPU_gradient_solver(double *val, int *col, int *row, double *right, double *diag, int nnz, int size)
{
int gpu;
checkCudaErrors(cudaGetDeviceCount(&gpu));
double ** d_val = new double *[gpu];
int ** d_col = new int *[gpu];
int ** d_row = new int *[gpu];
int *temp = new int[gpu];
double* r0 = new double[size];
double* x0 = new double[size];
double* x_k = new double[size];
double* z0 = new double[size];
double* z_k = new double[size];
double* r_k = new double[size];
double* ch = new double[size];
double* cont = new double[size];
double* testing = new double[size];
//*r0 = right;// x0 ={0...}
memcpy(r0, right, sizeof(double)*(size));
memcpy(z0, r0, sizeof(double)*(size));
memcpy(r_k, r0, sizeof(double)*(size));
double a_k;
double b_k;
double r0_to_r0;
double right_to_right = sqrt(dot_product(right, right, size));
double rk_to_rk;
double checking;
bool fg = true;
int step = 0;
double gpu_time = 0;
clock_t int1 = clock();
for (int i = 0; i < size; i++)
{
x0[i] = 0;
}
temp = split(gpu, val, col, row, size, nnz, d_val, d_col, d_row);
clock_t int2 = clock();
#if CHECKER
cout << "SPLIT TIME: " << double(int2 - int1) / 1000.0 << endl;
#endif
do
{
if (!fg)
{
memcpy(r0, r_k, sizeof(double)*(size));
memcpy(x0, x_k, sizeof(double)*(size));
memcpy(z0, z_k, sizeof(double)*(size));
}
r0_to_r0 = dot_product(r0, r0, size);
clock_t gpu_time1 = clock();
memset(ch,0,sizeof(double)*(size));
GPU_mult_for_little_gradient(z0, size, temp, diag, gpu, d_val, d_col, d_row, ch);
clock_t gpu_time2 = clock();
gpu_time += double(gpu_time2 - gpu_time1);
a_k = r0_to_r0 / dot_product(ch, z0, size);
vector_on_number(z0, a_k, size, cont);
if (step == 640)
cout << "640" << endl;
sum_vector(x0, cont, size, x_k);
vector_on_number(ch, a_k, size, cont);
raznost_vector(r0, cont, size, r_k);
rk_to_rk = dot_product(r_k, r_k, size);
b_k = rk_to_rk / r0_to_r0;
vector_on_number(z0, b_k, size, cont);
sum_vector(r_k, cont, size, z_k);
fg = false;
step++;
checking = sqrt(rk_to_rk) / right_to_right;
//cout << endl<<"Checking" << checking << endl;
} while ((checking >= APPROX) && (step < STEP_LIMIT));
//cout <<endl<< "GPU TIME: " << gpu_time / 1000.0 << endl;
cout << "NEVAZKA: " << checking << endl;
GPU_mult_for_little_gradient(x_k, size, temp, diag, gpu, d_val, d_col, d_row, ch);
raznost_vector(ch, right, size, testing);
double verify = sqrt(dot_product(testing, testing, size));
cout << endl << "VERIFICATION: " << verify << endl;
cout << endl << "Step = " << step << endl;
for (int number = 0; number < gpu; number++)
{
checkCudaErrors(cudaSetDevice(number));
checkCudaErrors(cudaFree(d_val[number]));
checkCudaErrors(cudaFree(d_col[number]));
checkCudaErrors(cudaFree(d_row[number]));
}
delete[] temp;
delete[] d_val;
delete[] d_col;
delete[] d_row;
delete[] ch;
delete[] cont;
delete[] x0;
delete[] r0;
//delete[] z0;
//delete[] z_k;
//delete[] r_k;
//delete[] testing;
return x_k;
}
double* gpu_solver::GPU_stab_bi_gradient_solver(double *val, int *col, int *row, double *right, double *diag, int nnz, int size)
{
//Count amount of devices
int gpu;
checkCudaErrors(cudaGetDeviceCount(&gpu));
//double *test = new double[size];
//Arrays for devices
double ** d_val = new double *[gpu];
int ** d_col = new int *[gpu];
int ** d_row = new int *[gpu];
//Array with devicearray's sizes
int *temp = new int[gpu];
temp = split(gpu, val, col, row, size, nnz, d_val, d_col, d_row);
//int step = 0;
bool flag = true;
double *minus = new double;
double *zero = new double;
double *one = new double;
*minus = -1.0;
*zero = 0.0;
*one = 1.0;
//Initialization of diag
double* final_result = new double[size];
//Initialization of all variables
checkCudaErrors(cudaSetDevice(0));
cublasHandle_t cublasHandle = NULL;
cusparseHandle_t cusparseHandle = NULL;
checkCudaErrors(cusparseCreate(&cusparseHandle));
cusparseMatDescr_t matDescr = NULL;
checkCudaErrors(cusparseCreateMatDescr(&matDescr));
checkCudaErrors(cublasCreate(&cublasHandle));
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0));
double *x0;
checkCudaErrors(cudaMalloc((void **)&x0, sizeof(double)*(size)));
checkCudaErrors(cudaMemset(x0, 0.0, sizeof(double)*(size)));
double *r0, *rT;
double * diag_gpu;
double * h;
//double * right_part_gpu;
checkCudaErrors(cudaMalloc((void **)&r0, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&rT, sizeof(double)*(size)));
//checkCudaErrors(cudaMalloc((void **)&right_part_gpu, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&diag_gpu, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&h, sizeof(double)*(size)));
checkCudaErrors(cudaMemcpy(diag_gpu, diag, sizeof(double)*(size), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(r0, right, sizeof(double)*(size), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(rT, right, sizeof(double)*(size), cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(right_part_gpu,right, sizeof(double)*(size), cudaMemcpyHostToDevice));
double *rho0 = new double;
double *alpha0 = new double;
double *omega0 = new double;
*rho0 = 1.0;
*alpha0 = 1.0;
*omega0 = 1.0;
double *minus_one = new double;
*minus_one = -1.0;
double *nu0, *p0;
int step = 0;
checkCudaErrors(cudaMalloc((void **)&nu0, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&p0, sizeof(double)*(size)));
checkCudaErrors(cudaMemset(nu0, 0.0, sizeof(double)*(size)));
checkCudaErrors(cudaMemset(p0, 0.0, sizeof(double)*(size)));
double *rhoK = new double;
double *alphaK = new double;
double *omegaK = new double;
double *betaK = new double;
double *pK, *nuK, *sK, *tK, *xK, *rK;
checkCudaErrors(cudaMalloc((void **)&pK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&nuK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&sK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&tK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&xK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&rK, sizeof(double)*(size)));
double *temp1, *temp2, *temp3;
checkCudaErrors(cudaMalloc((void **)&temp1, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&temp2, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&temp3, sizeof(double)*(size)));
//double * NegOmega=new double;
double * temp_var_1=new double;
double * temp_var_2 = new double;
double * checking=new double[size];
//*NegOmega = -(*omega0);
//1
do
{
cublasDdot(cublasHandle, size, r0, 1, rT, 1, rhoK);
//2
*betaK = (*rhoK / *rho0) * (*alpha0 / *omega0);
//cout <<"OUT: "<< *betaK;
//3
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (nu0, -(*omega0), size, temp1);
cublasDaxpy(cublasHandle, size, one, p0, 1, temp1, 1);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp1, *betaK, size, temp1);
cublasDaxpy(cublasHandle, size, one, rT, 1, temp1, 1);
cublasDcopy(cublasHandle, size, temp1, 1, pK, 1);
Debuger(pK, size,checking);
//4
GPU_mult(pK, size, temp, diag_gpu, gpu, d_val, d_col, d_row, nuK, deviceProp.maxThreadsPerBlock);
Debuger(nuK, size,checking);
//5
cublasDdot(cublasHandle, size, r0, 1, nuK, 1, temp_var_1);
*alphaK = (*rhoK) / (*temp_var_1);
//6
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (pK, *alphaK, size, temp1);
cudaDeviceSynchronize();
cublasDaxpy(cublasHandle, size, one, x0, 1, temp1, 1);
cublasDcopy(cublasHandle, size, temp1, 1, h, 1);
Debuger(h, size,checking);
//7
//cublasDaxpy(cublasHandle, size, minus_one, h, 1, xK, 1);
//8
Debuger(nuK,size,checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (nuK, -(*alphaK), size, temp1);
cudaDeviceSynchronize();
Debuger(rT, size, checking);
Debuger(temp1, size, checking);
cublasDaxpy(cublasHandle, size, one, rT, 1, temp1, 1);
Debuger(temp1, size, checking);
cublasDcopy(cublasHandle, size, temp1, 1, sK, 1);
Debuger(sK, size,checking);
//9
GPU_mult(sK, size, temp, diag_gpu, gpu, d_val, d_col, d_row, tK, deviceProp.maxThreadsPerBlock);
//Debuger(xK, size,checking);
Debuger(tK,size,checking);
//10
cublasDdot(cublasHandle, size, tK, 1, sK, 1, temp_var_1);
//Debuger(temp_var_1,size,checking);
cublasDdot(cublasHandle, size, tK, 1, tK, 1, temp_var_2);
//Debuger(temp_var_2, size, checking);
*omegaK = *temp_var_1 / *temp_var_2;
//11
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (sK, *omegaK, size, temp1);
cudaDeviceSynchronize();
cublasDaxpy(cublasHandle, size, one, h, 1, temp1, 1);
cublasDcopy(cublasHandle, size, temp1, 1, xK, 1);
Debuger(xK,size,checking);
//12
/*cublasDaxpy(cublasHandle, size, minus_one, xK, 1, x0, 1);
cublasDnrm2(cublasHandle, size, x0, 1, temp_var_1);*/
//13
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (tK, -(*omegaK), size, temp1);
cublasDaxpy(cublasHandle, size, one, sK, 1, temp1, 1);
cublasDcopy(cublasHandle, size, temp1, 1, rK, 1);
Debuger(rK, size, checking);
cublasDnrm2(cublasHandle, size, rK, 1, temp_var_1);
cublasDnrm2(cublasHandle, size, r0,1,temp_var_2);
//if(step%20==0)
//cout <<"NEVAZKA = "<< *temp_var_1/ *temp_var_2 << endl;
if (*temp_var_1 / *temp_var_2<= APPROX)
{
cout <<endl<< "NEVAZKA = " << *temp_var_1/ *temp_var_2 << endl;
checkCudaErrors(cudaMemcpy(final_result,xK, sizeof(double)*size, cudaMemcpyDefault));
break;
}
cublasDcopy(cublasHandle, size, rK, 1, rT, 1);
cublasDcopy(cublasHandle, size, xK, 1, x0, 1);
cublasDcopy(cublasHandle, size, pK, 1, p0, 1);
cublasDcopy(cublasHandle, size, nuK, 1, nu0, 1);
*rho0 = *rhoK;
*omega0 = *omegaK;
*alpha0 = *alphaK;
step++;
//cout <<"Step = "<< step << endl;
} while (step<=STEP_LIMIT);
//Verification
GPU_mult(xK, size, temp, diag_gpu, gpu, d_val, d_col, d_row, temp1, deviceProp.maxThreadsPerBlock);
cublasDaxpy(cublasHandle,size,minus_one,r0,1,temp1,1);
cublasDnrm2(cublasHandle, size, temp1, 1, temp_var_1);
cout <<endl<< "VERIFICATION: " << *temp_var_1 << endl;
checkCudaErrors(cudaFree(r0));
checkCudaErrors(cudaFree(rK));
checkCudaErrors(cudaFree(x0));
checkCudaErrors(cudaFree(xK));
checkCudaErrors(cudaFree(pK));
checkCudaErrors(cudaFree(p0));
//checkCudaErrors(cudaFree(right_part_gpu));
checkCudaErrors(cudaFree(nuK));
checkCudaErrors(cudaFree(nu0));
checkCudaErrors(cudaFree(temp1));
checkCudaErrors(cudaFree(temp2));
checkCudaErrors(cudaFree(temp3));
checkCudaErrors(cudaFree(sK));
checkCudaErrors(cudaFree(tK));
checkCudaErrors(cudaFree(h));
checkCudaErrors(cublasDestroy(cublasHandle));
checkCudaErrors(cusparseDestroy(cusparseHandle));
cout <<endl<< "STEPS: = " << step << endl;
delete temp_var_1;
delete temp_var_2;
return final_result;
}
double* gpu_solver::GPU_stab_bi_gradient_solver_with_preconditioner(double *val, int *col, int *row, double *right, double *diag, int nnz, int size)
{
//Count amount of devices
int gpu;
checkCudaErrors(cudaGetDeviceCount(&gpu));
double *test = new double[size];
//Arrays for devices
double ** d_val = new double *[gpu];
int ** d_col = new int *[gpu];
int ** d_row = new int *[gpu];
//Array with devicearray's sizes
int *temp = new int[gpu];
temp = split(gpu, val, col, row, size, nnz, d_val, d_col, d_row);
//int step = 0;
bool flag = true;
double *minus = new double;
double *zero = new double;
double *one = new double;
*minus = -1.0;
*zero = 0.0;
*one = 1.0;
//Initialization of diag
double* final_result = new double[size];
//Initialization of all variables
checkCudaErrors(cudaSetDevice(0));
cublasHandle_t cublasHandle = NULL;
cusparseHandle_t cusparseHandle = NULL;
checkCudaErrors(cusparseCreate(&cusparseHandle));
cusparseMatDescr_t matDescr = NULL;
checkCudaErrors(cusparseCreateMatDescr(&matDescr));
checkCudaErrors(cublasCreate(&cublasHandle));
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0));
double *x0;
checkCudaErrors(cudaMalloc((void **)&x0, sizeof(double)*(size)));
checkCudaErrors(cudaMemset(x0, 0, sizeof(double)*(size)));
double *r0, *rT;
double * diag_gpu;
double * h;
checkCudaErrors(cudaMalloc((void **)&r0, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&rT, sizeof(double)*(size)));
//checkCudaErrors(cudaMalloc((void **)&right_part_gpu, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&diag_gpu, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&h, sizeof(double)*(size)));
checkCudaErrors(cudaMemcpy(diag_gpu, diag, sizeof(double)*(size), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(r0, right, sizeof(double)*(size), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(rT, right, sizeof(double)*(size), cudaMemcpyHostToDevice));
double *rho0 = new double;
double *alpha0 = new double;
double *omega0 = new double;
*rho0 = 1.0;
*alpha0 = 1.0;
*omega0 = 1.0;
double *minus_one = new double;
*minus_one = -1.0;
double *nu0, *p0;
int step = 0;
checkCudaErrors(cudaMalloc((void **)&nu0, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&p0, sizeof(double)*(size)));
checkCudaErrors(cudaMemset(nu0, 0, sizeof(double)*(size)));
checkCudaErrors(cudaMemset(p0, 0, sizeof(double)*(size)));
double *rhoK = new double;
double *alphaK = new double;
double *omegaK = new double;
double *betaK = new double;
double *pK, *nuK, *sK, *tK, *xK, *rK;
checkCudaErrors(cudaMalloc((void **)&pK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&nuK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&sK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&tK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&xK, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&rK, sizeof(double)*(size)));
double *temp1, *temp2, *temp3,*temp4,*temp5;
checkCudaErrors(cudaMalloc((void **)&temp1, sizeof(double)*(size))); // for many
checkCudaErrors(cudaMalloc((void **)&temp2, sizeof(double)*(size))); //K^(-1)
checkCudaErrors(cudaMalloc((void **)&temp3, sizeof(double)*(size))); //z
checkCudaErrors(cudaMalloc((void **)&temp4, sizeof(double)*(size))); //ddot tk*K^-1
checkCudaErrors(cudaMalloc((void **)&temp5, sizeof(double)*(size))); //ddot sk*^(-1)
double * temp_var_1 = new double;
double * temp_var_2 = new double;
diag_revers << <10000, deviceProp.maxThreadsPerBlock >> > (diag_gpu, temp2, size);
double *checking = new double[size];
do
{
//1
cublasDdot(cublasHandle, size, r0, 1, rT, 1, rhoK);
//2
*betaK = (*rhoK / *rho0) * (*alpha0 / *omega0);
//cout <<"OUT: "<< *betaK;
//3
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (nu0, -(*omega0), size, temp1);
Debuger(temp1, size, checking);
cublasDaxpy(cublasHandle, size, one, p0, 1, temp1, 1);
Debuger(temp1, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp1, *betaK, size, temp1);
cublasDaxpy(cublasHandle, size, one, rT, 1, temp1, 1);
cublasDcopy(cublasHandle, size, temp1, 1, pK, 1);
Debuger(pK, size, checking);
//4
not_full_scalar << <10000, deviceProp.maxThreadsPerBlock >> > (temp2,pK,size,temp3);
//Debuger(temp3, size,checking);
Debuger(temp3, size, checking);
//5
GPU_mult(temp3, size, temp, diag_gpu, gpu, d_val, d_col, d_row, nuK, deviceProp.maxThreadsPerBlock);
Debuger(nuK, size,checking);
Debuger(nuK, size, checking);
//6
cublasDdot(cublasHandle, size, r0, 1, nuK, 1, temp_var_1);
*alphaK = (*rhoK) / (*temp_var_1);
//7
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp3, *alphaK, size, temp1);
Debuger(temp1, size, checking);
cublasDaxpy(cublasHandle, size, one, x0, 1, temp1, 1);
cublasDcopy(cublasHandle, size, temp1, 1, h, 1);
Debuger(h, size, checking);
//7
//cublasDaxpy(cublasHandle, size, minus_one, h, 1, xK, 1);
//9
//Debuger(temp1, size);
//Debuger(temp1, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (nuK, -(*alphaK), size, temp1);
//Debuger(temp1, size);
Debuger(temp1, size, checking);
Debuger(rT, size, checking);
cublasDaxpy(cublasHandle, size, one, rT, 1, temp1, 1);
//Debuger(temp1, size);
//Debuger(sK, size, checking);
Debuger(temp1,size,checking);
cublasDcopy(cublasHandle, size, temp1, 1, sK, 1);
Debuger(sK, size,checking);
//10
Debuger(sK, size,checking);
not_full_scalar << <10000, deviceProp.maxThreadsPerBlock >> > (temp2, sK, size, temp3);
Debuger(temp3, size,checking);
//11
GPU_mult(temp3, size, temp, diag_gpu, gpu, d_val, d_col, d_row, tK, deviceProp.maxThreadsPerBlock);
Debuger(tK, size,checking);
//12
not_full_scalar << <10000, deviceProp.maxThreadsPerBlock >> > (temp2, tK, size, temp4);
not_full_scalar << <10000, deviceProp.maxThreadsPerBlock >> > (temp2, sK, size, temp5);
Debuger(temp4, size, checking);
Debuger(temp5, size, checking);
cublasDdot(cublasHandle, size, temp4, 1, temp5, 1, temp_var_1);
cublasDdot(cublasHandle, size, temp4, 1, temp4, 1, temp_var_2);
*omegaK = *temp_var_1 / *temp_var_2;
//13
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock>> > (temp3, *omegaK, size, temp1);
Debuger(temp1, size, checking);
cublasDaxpy(cublasHandle, size, one, h, 1, temp1, 1);
Debuger(temp1, size, checking);
cublasDcopy(cublasHandle, size, temp1, 1, xK, 1);
Debuger(xK, size,checking);
//12
/*cublasDaxpy(cublasHandle, size, minus_one, xK, 1, x0, 1);
cublasDnrm2(cublasHandle, size, x0, 1, temp_var_1);*/
//15
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock>> > (tK, -(*omegaK), size, temp1);
Debuger(temp1, size, checking);
cublasDaxpy(cublasHandle, size, one, sK, 1, temp1, 1);
Debuger(temp1, size,checking);
cublasDcopy(cublasHandle, size, temp1, 1, rK, 1);
Debuger(rK, size,checking);
cublasDnrm2(cublasHandle, size, rK, 1, temp_var_1);
cublasDnrm2(cublasHandle, size, r0, 1, temp_var_2);
//if(step%20==0)
//cout <<"NEVAZKA = "<< *temp_var_1 / *temp_var_2 << endl;
//if (*temp_var_1/ *temp_var_2 <= APPROX || *omegaK<=APPROX)
if (*temp_var_1 / *temp_var_2 <= APPROX )
{
//cout << endl << "NEVAZKA = " << *temp_var_1/ *temp_var_2<< endl;
checkCudaErrors(cudaMemcpy(final_result, xK, sizeof(double)*size, cudaMemcpyDefault));
break;
}
cublasDcopy(cublasHandle, size, rK, 1, rT, 1);
cublasDcopy(cublasHandle, size, xK, 1, x0, 1);
cublasDcopy(cublasHandle, size, pK, 1, p0, 1);
cublasDcopy(cublasHandle, size, nuK, 1, nu0, 1);
*rho0 = *rhoK;
*omega0 = *omegaK;
*alpha0 = *alphaK;
step++;
//cout <<"Step = "<< step << endl;
} while (step <= STEP_LIMIT);
//Verification
GPU_mult(xK, size, temp, diag_gpu, gpu, d_val, d_col, d_row, temp1, deviceProp.maxThreadsPerBlock);
cublasDaxpy(cublasHandle, size,minus_one, r0, 1, temp1, 1);
cublasDnrm2(cublasHandle, size, temp1, 1, temp_var_1);
//cout << endl << "VERIFICATION: " << *temp_var_1 << endl;
checkCudaErrors(cudaFree(r0));
checkCudaErrors(cudaFree(rK));
checkCudaErrors(cudaFree(x0));
checkCudaErrors(cudaFree(xK));
checkCudaErrors(cudaFree(pK));
checkCudaErrors(cudaFree(p0));
//checkCudaErrors(cudaFree(right_part_gpu));
checkCudaErrors(cudaFree(nuK));
checkCudaErrors(cudaFree(nu0));
checkCudaErrors(cudaFree(temp1));
checkCudaErrors(cudaFree(temp2));
checkCudaErrors(cudaFree(temp3));
checkCudaErrors(cudaFree(sK));
checkCudaErrors(cudaFree(temp4));
checkCudaErrors(cudaFree(temp5));
checkCudaErrors(cudaFree(tK));
checkCudaErrors(cudaFree(h));
checkCudaErrors(cublasDestroy(cublasHandle));
checkCudaErrors(cusparseDestroy(cusparseHandle));
//cout << endl << "STEPS: = " << step << endl;
delete temp_var_1;
delete temp_var_2;
return final_result;
}
void gpu_solver::matrix_eigenvalues(double *val, int *col, int *row, double *diag, int non_zero, int size, double * eigen_values, double** eigen_vectors,double * b, double *val2, int *col2, int *row2, double *diag2, int non_zero2, int size2, int amount_ev)
{
int gpu;
checkCudaErrors(cudaGetDeviceCount(&gpu));
double ** d_val = new double *[gpu];
int ** d_col = new int *[gpu];
int ** d_row = new int *[gpu];
double ** d_val2 = new double *[gpu];
int ** d_col2 = new int *[gpu];
int ** d_row2 = new int *[gpu];
//Array with devicearray's sizes
int *temp = new int[gpu];
int *temp_M = new int[gpu];
temp = split(gpu, val, col, row, size, non_zero, d_val, d_col, d_row);
temp_M = split(gpu, val2, col2, row2, size2, non_zero2, d_val2, d_col2, d_row2);
checkCudaErrors(cudaSetDevice(0));
cublasHandle_t cublasHandle = NULL;
cusparseHandle_t cusparseHandle = NULL;
checkCudaErrors(cusparseCreate(&cusparseHandle));
cusparseMatDescr_t matDescr = NULL;
checkCudaErrors(cusparseCreateMatDescr(&matDescr));
checkCudaErrors(cublasCreate(&cublasHandle));
cudaDeviceProp deviceProp;
double *diag_gpu;
checkCudaErrors(cudaMalloc((void **)&diag_gpu, sizeof(double)*(size)));
checkCudaErrors(cudaMemcpy(diag_gpu, diag, sizeof(double)*(size), cudaMemcpyHostToDevice));
double *diag_gpu2;
checkCudaErrors(cudaMalloc((void **)&diag_gpu2, sizeof(double)*(size2)));
checkCudaErrors(cudaMemcpy(diag_gpu2, diag2, sizeof(double)*(size2), cudaMemcpyHostToDevice));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0));
double* nu_vec,
double * alpha_vec;
double * beta_vec;
double* w_vec ; // myvector
double* alpha_j=new double;
double* t=new double;
double * beta_new;
double * checking = new double[size];
double * matrix_w;
double * multi_temp;
double * matr_dense;
double * eigen_values_gpu;
double * eigenvectors_gpu;
int * CONVERGE_AMOUNT_CPU = new int;
*CONVERGE_AMOUNT_CPU = 0;
int * CONVERGE_AMOUNT;
checkCudaErrors(cudaMalloc((void **)&CONVERGE_AMOUNT, sizeof(int)));
checkCudaErrors(cudaMemset(CONVERGE_AMOUNT, 0, sizeof(int)));
//*CONVERGE_AMOUNT = 0; //current converge
double * converge_eig_vec; //result converge vectors
double * converge_eig_val; //result converge values
double *converge_eig_val_numb_T; //temp for ind of converge values in array
double *converge_temp ;
//*converge_temp = 0;
checkCudaErrors(cudaMalloc((void **)&converge_temp, sizeof(double)));
checkCudaErrors(cudaMemset(converge_temp, 0.0, sizeof(double)));
double* temp_ev; //temp
checkCudaErrors(cudaMalloc((void **)&temp_ev, sizeof(double)*(amount_ev)));
checkCudaErrors(cudaMalloc((void **)&converge_eig_val_numb_T, sizeof(double)*(amount_ev)));
checkCudaErrors(cudaMalloc((void **)&converge_eig_vec, sizeof(double)*(size)*size));
checkCudaErrors(cudaMalloc((void **)&converge_eig_val, sizeof(double)*(amount_ev)));
checkCudaErrors(cudaMalloc((void **)&eigenvectors_gpu, sizeof(double)*(amount_ev)*(size)));
checkCudaErrors(cudaMalloc((void **)&eigen_values_gpu, sizeof(double)*(amount_ev)));
checkCudaErrors(cudaMalloc((void **)&matr_dense, sizeof(double)*(amount_ev)*amount_ev));
checkCudaErrors(cudaMemset(matr_dense, 0.0, sizeof(double)*(amount_ev)*(amount_ev)));
checkCudaErrors(cudaMalloc((void **)&matrix_w, sizeof(double)*(size)*(amount_ev)));
checkCudaErrors(cudaMalloc((void **)&multi_temp, sizeof(double)*(size)*(amount_ev)));
checkCudaErrors(cudaMemset(matrix_w, 0.0, sizeof(double)*(size*amount_ev)));
checkCudaErrors(cudaMalloc((void **)&nu_vec, sizeof(double)*(size)));
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaMemset(nu_vec, 0, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&alpha_vec, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&beta_new, sizeof(double)));
checkCudaErrors(cudaMemset(alpha_vec, 0, sizeof(double)*(size)));
checkCudaErrors(cudaMemset(beta_new, 0.0, sizeof(double)));
checkCudaErrors(cudaMalloc((void **)&beta_vec, sizeof(double)*(amount_ev-1)));
checkCudaErrors(cudaMemset(beta_vec, 0.0, sizeof(double)*(amount_ev-1)));
checkCudaErrors(cudaMalloc((void **)&w_vec, sizeof(double)*(size))); // myvector
double *right_gpu;
double * right_gpu_input_once;
checkCudaErrors(cudaMalloc((void **)&right_gpu_input_once, sizeof(double)*(size)));
checkCudaErrors(cudaMemcpy(right_gpu_input_once, b, sizeof(double)*(size), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&right_gpu, sizeof(double)*(size)));
checkCudaErrors(cudaMemcpy(right_gpu, b, sizeof(double)*(size), cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(w_vec, b, sizeof(double)*(size), cudaMemcpyHostToDevice));
double *temp1_b;
checkCudaErrors(cudaMalloc((void **)&temp1_b, sizeof(double)*(size)));
//checkCudaErrors(cudaMemset(w_vec, 2.0, sizeof(double)*(size)));
// matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, 0, w_vec, size);
double *tmp5;
checkCudaErrors(cudaMalloc((void **)&tmp5, sizeof(double)));
double *tmp6;
checkCudaErrors(cudaMalloc((void **)&tmp6, sizeof(double)));
double * temp2;
checkCudaErrors(cudaMalloc((void **)&temp2, sizeof(double)*(size)));
double * temp3;
checkCudaErrors(cudaMalloc((void **)&temp3, sizeof(double)*(size)));
double * temp6;
checkCudaErrors(cudaMalloc((void **)&temp6, sizeof(double)*(size)));
double *tmp1=new double;
double *tmp_2 = new double;
double *tmp3 = new double;
int* tmp_int = new int;
bool main_flag = false;
double * zero_f = new double;
*zero_f = 0;
double*tmp2 = new double;
double * one = new double;
*one = 1;
double *b_i = new double;
double * b_i_old = new double;
*b_i_old = 0;
bool exit = false;
double *x_temp = new double[size];
double * temp1; //right_gpu /aka first vector
checkCudaErrors(cudaMalloc((void **)&temp1, sizeof(double)*(size)));
double *x_temp_tld = new double[size]; //x^~
int step = 0;
double * last_beta = new double;
bool first_flag = false;
checkCudaErrors(cudaMalloc((void **)&x_temp, sizeof(double)*(size)));
checkCudaErrors(cudaMalloc((void **)&x_temp_tld, sizeof(double)*(size)));
double * minus_one = new double;
*minus_one = -1.0;
while (*CONVERGE_AMOUNT_CPU != amount_ev)
{
cout << endl << "THE " << step << " STAGE IS RINNING!" << endl;
//checkCudaErrors(cudaDeviceSynchronize());
if (first_flag != false)
{
bool flag_cor;
//Gramma-Shmidt procedure
for (int j = 0; j < *CONVERGE_AMOUNT_CPU; j++)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, j, size, temp1);
checkCudaErrors(cublasDdot(cublasHandle, size, temp1, 1, right_gpu_input_once, 1, tmp1));
checkCudaErrors(cublasDdot(cublasHandle, size, temp1, 1, temp1, 1, tmp2));
*tmp1 = (-1)*(*tmp1) / (*tmp2);
checkCudaErrors(cublasDaxpy(cublasHandle, size, tmp1, temp1, 1, right_gpu, 1));
Debuger(temp1,size,checking);
}
flag_cor = true;
for (int j = 0; j < *CONVERGE_AMOUNT_CPU; j++)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, j, size, temp1);
checkCudaErrors(cublasDdot(cublasHandle, size, temp1, 1, right_gpu, 1, tmp1));
if (abs(*tmp1) > SK_Nev)
{
flag_cor = false;
}
//cout << "Checking: " << abs(*tmp1) << endl;
}
if (flag_cor == false)
{
cout <<endl<< "BAD G_SH" << endl;
}
else { cout<<endl << "CORRECT G_SH" << endl; }
// } while (flag_cor != true);
// matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, 0, right_gpu, size);
}
// all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (size, temp1);
/* Debuger(right_gpu, size, checking);
Debuger(temp1, size, checking);*/
GPU_mult(right_gpu, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1, deviceProp.maxThreadsPerBlock);
cublasDdot(cublasHandle, size, temp1, 1, right_gpu, 1, tmp1);
*tmp_2 = sqrt(*tmp1);
Debuger(temp1, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (right_gpu, 1 / (*tmp_2), size, w_vec); //first vector of Q matrix
Debuger(w_vec, size, checking);
matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, 0, w_vec, size);
Debuger(matrix_w, size, checking);
//x^-
double * proc_temp = new double[size];
*one = 1;
bool flag_cor = true;
for (int i = 0; i < amount_ev; i++) //этап ланцоша
{
// if(exit==false)
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, i, size, w_vec);
Debuger(w_vec, size, checking);
GPU_mult(w_vec, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1, deviceProp.maxThreadsPerBlock); // M_xi
matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (multi_temp, i, temp1, size);
Debuger(temp1, size, checking);
checkCudaErrors(cudaMemcpy(proc_temp, temp1, sizeof(double)*size, cudaMemcpyDeviceToHost));
proc_temp = GPU_stab_bi_gradient_solver_with_preconditioner(val, col, row, proc_temp, diag, non_zero, size); // x^-
checkCudaErrors(cudaMemcpy(x_temp, proc_temp, sizeof(double)*(size), cudaMemcpyHostToDevice));
cublasDdot(cublasHandle, size, temp1, 1, x_temp, 1, tmp1); //alpha
element << <1, 1 >> > (alpha_vec, i, *tmp1);//alpha_i
// if (i != amount_ev - 1)
double * checking1 = new double[size*(i + 1)];
*tmp1 = -*tmp1; //-alpha_i
Debuger(temp1, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (w_vec, (*tmp1), size, temp1);
checkCudaErrors(cublasDaxpy(cublasHandle, size, one, temp1, 1, x_temp, 1));//end
if (i != 0)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, i - 1, size, temp2);
// (*b_i_old) = -(*b_i_old);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp2, -(*b_i_old), size, temp2); //
Debuger(temp2, size, checking);
checkCudaErrors(cublasDaxpy(cublasHandle, size, one, temp2, 1, x_temp, 1)); //x_temp = x^~i
copy_v << <1000, deviceProp.maxThreadsPerBlock >> > (x_temp, size, temp6);
//Debuger(temp2, size, checking);
for (int s = 0; s < i; s++) //full ortagonalization G_SH
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (multi_temp, s, size, temp3);
checkCudaErrors(cublasDdot(cublasHandle, size, temp3, 1, x_temp, 1, tmp3));
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, s, size, temp3);
(*tmp3) = -*tmp3;
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (temp3, *tmp3, size, temp3);
checkCudaErrors(cublasDaxpy(cublasHandle, size, one, temp3, 1, x_temp, 1));
}
for (int j = 0; j < *CONVERGE_AMOUNT_CPU; j++)
{
//temp3
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, j, size, temp3);
GPU_mult(temp3, size, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp2, deviceProp.maxThreadsPerBlock);
checkCudaErrors(cublasDdot(cublasHandle, size, temp6, 1, temp2, 1, tmp1));
*tmp1 = -*tmp1;
checkCudaErrors(cublasDaxpy(cublasHandle, size, tmp1, temp2, 1, x_temp, 1));
}
}
GPU_mult(x_temp, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1, deviceProp.maxThreadsPerBlock);
checkCudaErrors(cublasDdot(cublasHandle, size, temp1, 1, x_temp, 1, tmp1));
Debuger(temp1, size, checking);
Debuger(x_temp, size, checking);
if (i != amount_ev - 1)
{
*b_i_old = sqrt(*tmp1);
element << <1, 1 >> > (beta_vec, i, *b_i_old);
//Debuger(x_temp, size, checking);
vec_mul_number << <10000, deviceProp.maxThreadsPerBlock >> > (x_temp, 1 / (*b_i_old), size, x_temp); //
Debuger(x_temp, size, checking);
flag_cor = true;
for (int j = 0; j < i /*+ 1*/; j++)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (multi_temp, j, size, temp1);
checkCudaErrors(cublasDdot(cublasHandle, size, temp1, 1, x_temp, 1, tmp1));
if (abs(*tmp1) > SK_Nev)
{
flag_cor = false;
cout << endl << "FUck in Lanc " << j;
}
// cout << "Checking: " << abs(*tmp1) << endl;
}
for (int j = 0; j < *CONVERGE_AMOUNT_CPU /*+ 1*/; j++)
{
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_val, j, size, temp1);
GPU_mult(temp1, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1, deviceProp.maxThreadsPerBlock);
checkCudaErrors(cublasDdot(cublasHandle, size, temp1, 1, x_temp, 1, tmp1));
if (abs(*tmp1) > SK_Nev)
{
flag_cor = false;
cout << endl << "FUck in eigenvec " << j;
}
// cout << "Checking: " << abs(*tmp1) << endl;
}
if (flag_cor == true)
cout <<endl<< "CHECKING COMPLETED AT STAGE " << i << endl;
else
{
cout <<endl<< "CHECKING is not COMPLETED AT STAGE " << i << endl;
// exit = true;
}
// if (exit == false)
matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (matrix_w, i + 1, x_temp, size);
}
else {
*last_beta = sqrt(*tmp1);
}
}
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (amount_ev*amount_ev, matr_dense);
cusolverDnHandle_t cusolverH = NULL;
int info_gpu = 0;
checkCudaErrors(cusolverDnCreate(&cusolverH));
int lwork = 0;
const int lda = amount_ev /*i + 1*/;
connect_diag_matr << <10000, deviceProp.maxThreadsPerBlock >> > (matr_dense, alpha_vec, beta_vec,amount_ev);
double* checking_mat = new double[(amount_ev*amount_ev)];
// cout << "MATRIX T: " << endl;
// Debuger_for_matr(matr_dense, amount_ev, amount_ev, checking_mat);
// cout << endl;
cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; // compute eigenvalues and eigenvectors.
cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER;
checkCudaErrors(cusolverDnDsyevd_bufferSize( //allocated memory for buffer
cusolverH,
jobz,
uplo,
amount_ev, /*i + 1,*/
matr_dense,
lda,
eigen_values_gpu,
&lwork
));
int *devInfo = NULL;
checkCudaErrors(cudaMalloc((void**)&devInfo, sizeof(int)));
double *d_work = NULL;
checkCudaErrors(cudaMalloc((void**)&d_work, sizeof(double)*lwork));
checkCudaErrors(cusolverDnDsyevd( //solver
cusolverH,
jobz,
uplo,
amount_ev,// i + 1,
matr_dense, //eigenvectors for T
lda,
eigen_values_gpu, //eigenvalues for T
d_work,
lwork,
devInfo
));
checkCudaErrors(cudaDeviceSynchronize());
ret_val << <1, 1 >> > (beta_vec,amount_ev-2,tmp5);
double * a = new double[amount_ev];
Debuger(beta_vec,amount_ev-1,a);
double * ch3 = new double;
double* checking_matr = new double[(amount_ev*amount_ev)];
cout << endl << "TEMPORARY EIGEN VALUES:" << endl;
double*ch_v = new double[amount_ev];
Debuger(eigen_values_gpu, amount_ev, ch_v);
for (int i = 0; i < amount_ev; i++)
cout << "Value: " << 1/ch_v[i] << endl;
cout << endl << "TEMPORARY EIGEN VECTOR:" << endl;
Debuger_for_matr(matr_dense, amount_ev, amount_ev, checking_matr);
Debuger(tmp5, 1, ch3);
cout << endl<<"BETA_q=" << *last_beta << endl;
element << <1, 1 >> > (tmp5, 0, *last_beta);
proverb << <1, 1 >> > (matr_dense,tmp5,amount_ev, eps_for_b,CONVERGE_AMOUNT, converge_eig_val_numb_T, converge_temp);
//converge_eig_val_numb_T - array of places of converged current values in eigen_values_gpu
//converge_temp - number of converged values on current stage
//checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(tmp3, converge_temp, sizeof(double), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(CONVERGE_AMOUNT_CPU, CONVERGE_AMOUNT, sizeof(int), cudaMemcpyDeviceToHost));
cout <<endl<<"Converged values "<< *tmp3 << endl;
for (int i = 0; i < *tmp3; i++)
{
add_to_converge_values << <1, 1 >> > (converge_eig_val_numb_T, i, eigen_values_gpu,converge_eig_val, CONVERGE_AMOUNT,tmp5);
checkCudaErrors(cudaMemcpy(tmp1, tmp5, sizeof(double), cudaMemcpyDeviceToHost));
/*ret_val << <1, 1 >> > (converge_temp,i,tmp5);
checkCudaErrors(cudaMemcpy(tmp1, tmp5, sizeof(double), cudaMemcpyDeviceToHost));
ret_val << <1, 1 >> > (eigen_values_gpu,int(*tmp1),tmp6);
element << <1, 1 >> > (converge_eig_val, *CONVERGE_AMOUNT, *tmp6);*/
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (matr_dense, int(*tmp1), amount_ev, temp_ev);
checkCudaErrors(cublasDgemv(cublasHandle,
CUBLAS_OP_N,
size,
amount_ev,
one,
matrix_w,
size,
temp_ev,
1,
zero_f,
temp3,
1));
matr_add << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, *CONVERGE_AMOUNT_CPU+i, temp3, size);
// first_flag = false;
/* cout << "MATR Q: " << endl;
double* checking_matr = new double[(size*amount_ev)];
Debuger_for_matr(matrix_w,size ,amount_ev, checking_matr);
cout << endl;*/
/*double * ch = new double[size];
Debuger(temp3, size, ch);
for(int i=0;i<size;i++)
cout << ch[i] << " " << endl;*/
}
//All zero
// *converge_temp = 0;
*b_i_old=0;
exit = false;
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (size*amount_ev, matrix_w); //clear lanc vectors matr
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (amount_ev*amount_ev, matr_dense); // clear matr T or eigen vectors matr
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (size*amount_ev, multi_temp); // clear Mx matr
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (amount_ev-1, beta_vec);
all_zero << <10000, deviceProp.maxThreadsPerBlock >> > (amount_ev, alpha_vec);
double * ch = new double[amount_ev];
Debuger(converge_eig_val, amount_ev, ch);
for(int i=0;i<*tmp3;i++)
cout << "E_Values on iteration: " << 1 / ch[i] << endl;
checkCudaErrors(cudaMemset(converge_temp, 0.0, sizeof(double)));
*CONVERGE_AMOUNT_CPU += *tmp3;
checkCudaErrors(cudaMemcpy(CONVERGE_AMOUNT, CONVERGE_AMOUNT_CPU, sizeof(int), cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(CONVERGE_AMOUNT_CPU, CONVERGE_AMOUNT, sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
step++;
if(*tmp3==0)
{
cout << endl << "NO NEW CONVERGED EIGEN VALUES" << endl;
}
else
{ first_flag = true;}
if (step == 10)
break;
// *CONVERGE_AMOUNT_CPU = amount_ev;
}
cout << "STEP: "<<step << endl;
reverse_for_eigen_values_lanc << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_val, amount_ev);
checkCudaErrors(cudaMemcpy(eigen_values, converge_eig_val, sizeof(double)*(amount_ev), cudaMemcpyDeviceToHost));
//CUBLAS_OP_N - no ‘N’ or ‘n’
//CUBLAS_OP_T - yes ‘T’ or ‘t’
cout << "+++++++++FINAL SOLUTION ++++++++" << endl << endl << endl;
double* checking_matr = new double[amount_ev*size];
Debuger_for_matr(converge_eig_vec, size, amount_ev, checking_matr);
double * one_f = new double;
cout << "PROVERB ON RESULT:" << endl;
for (int j = 0; j < amount_ev; j++)
{
*one_f = -eigen_values[j];
return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (converge_eig_vec, j, size, temp3);
GPU_mult(temp3, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1_b, deviceProp.maxThreadsPerBlock);
GPU_mult(temp3, size, temp, diag_gpu, gpu, d_val, d_col, d_row, temp2, deviceProp.maxThreadsPerBlock);
checkCudaErrors(cublasDaxpy(cublasHandle, size, one_f, temp1_b, 1, temp2, 1));//end
checkCudaErrors(cublasDnrm2(cublasHandle,size,temp2,1,tmp1));
cout << "Nevazka:[" << j << "]= " << *tmp1<<endl;
}
// *one_f = 0.098569;//1.786052;
// cout << endl << "Mathcad check:" << endl;
// cout << "For eigenvalue: " << *one_f << endl;
// *one_f = -*one_f;
// double * test = new double[size];
// double * test_gpu;
// test[0] = -1;//-0.127;
// test[1] = -0.898;//0,006265;
// test[2] = -0.372;//-1;
// test[3] =0;//0.074;
// test[4] =0;//0;
// test[5] =0;//0;
// test[6] =0;//0;
// test[7] =0;//0;
// test[8] =0;//0;
// test[9] = 0.368;//0.569;
// checkCudaErrors(cudaMalloc((void **)&test_gpu, sizeof(double)*(size)));
// checkCudaErrors(cudaMemcpy(test_gpu, test, sizeof(double)*(size), cudaMemcpyHostToDevice));
//// return_vec << <10000, deviceProp.maxThreadsPerBlock >> > (test_gpu, j, size, temp3);
// GPU_mult(test_gpu, size2, temp_M, diag_gpu2, gpu, d_val2, d_col2, d_row2, temp1_b, deviceProp.maxThreadsPerBlock);
// GPU_mult(test_gpu, size, temp, diag_gpu, gpu, d_val, d_col, d_row, temp2, deviceProp.maxThreadsPerBlock);
// checkCudaErrors(cublasDaxpy(cublasHandle, size, one_f, temp1_b, 1, temp2, 1));//end
// checkCudaErrors(cublasDnrm2(cublasHandle, size, temp2, 1, tmp1));
// cout << "Nevazka:= " << *tmp1 << endl;
}
void printMatrix(int m, int n, const double*A, int lda, const char* name)
{
for (int row = 0; row < m; row++) {
for (int col = 0; col < n; col++) {
double Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row + 1, col + 1, Areg);
}
}
}
|
f79ec98b058d5ef722ddeb50875b6abc9f16297a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Unauthorized copying of this file, via any medium is strictly prohibited
* Proprietary and confidential
*/
#include <cudnn.h>
#include <cudnn/impl/cudnn_common_def.h>
#include <cudnn/impl/cudnn_deref.h>
#include <cudnn/impl/cudnn_handle.h>
#include <cudnn/impl/cudnn_tensor_descriptor.h>
#include <cudnn/impl/kernel/cudnn_tensor_descriptor.cuh>
#include <algorithm>
namespace cudnn {
namespace impl {
void AddTensor(CuHandle& handle,
const void* alpha,
const CuTensorDescriptor& a_desc,
const void* a,
const void* beta,
const CuTensorDescriptor& c_desc,
void* c) {
cudnnDataType_t in_data_type;
in_data_type = a_desc.GetDataType();
if (DNN_DATA_FLOAT == in_data_type) {
const float* in_a = reinterpret_cast<const float*>(a);
const float* in_alpha = reinterpret_cast<const float*>(alpha);
const float* in_beta = reinterpret_cast<const float*>(beta);
float* out_c = reinterpret_cast<float*>(c);
LaunchAddTensorKernel(in_alpha, a_desc, in_a, in_beta, c_desc, out_c);
} else if (DNN_DATA_HALF == in_data_type) {
/** to do half addtensor*/
} else if (DNN_DATA_INT8 == in_data_type) {
/** to do int8 addtensor*/
} else {
}
}
void TransformTensor(CuHandle& handle,
const void* alpha,
const CuTensorDescriptor& x_desc,
const void* x,
const void* beta,
const CuTensorDescriptor& y_desc,
void* y) {
cudnnDataType_t in_data_type;
in_data_type = x_desc.GetDataType();
if (DNN_DATA_FLOAT == in_data_type) {
const float* in_x = reinterpret_cast<const float*>(x);
const float* in_alpha = reinterpret_cast<const float*>(alpha);
const float* in_beta = reinterpret_cast<const float*>(beta);
float* out_y = reinterpret_cast<float*>(y);
LaunchTransformTensorKernel(in_alpha, x_desc, in_x, in_beta, y_desc, out_y);
} else if (DNN_DATA_HALF == in_data_type) {
/** to do half transformTensor */
} else if (DNN_DATA_INT8 == in_data_type || DNN_DATA_INT8x4 == in_data_type) {
const char* in_x = reinterpret_cast<const char*>(x);
const float* in_alpha = reinterpret_cast<const float*>(alpha);
const float* in_beta = reinterpret_cast<const float*>(beta);
char* out_y = reinterpret_cast<char*>(y);
LaunchTransformTensorKernel(in_alpha, x_desc, in_x, in_beta, y_desc, out_y);
} else {
}
}
// AddTensor support 2d and 3d, NCHW NCDHW and NHWC format
template <typename T1, typename T2>
void LaunchAddTensorKernel(const T2* alpha,
const CuTensorDescriptor& a_desc,
const T1* a,
const T2* beta,
const CuTensorDescriptor& c_desc,
T1* c) {
if (a_desc.GetN() == c_desc.GetN() && a_desc.GetH() == c_desc.GetH() && a_desc.GetW() == c_desc.GetW()) {
int image_size = 0;
image_size = a_desc.GetStride(1); // only support batch_size is first dimension
int n_out = c_desc.GetN();
int threads_total_x = n_out * image_size;
int threads_perblk_x = ::min(threads_total_x, kMaxThreadNbPerBlock);
int nblocks_x = (threads_total_x + threads_perblk_x - 1) / threads_perblk_x;
dim3 gridSize(nblocks_x, 1, 1);
dim3 blockSize(threads_perblk_x, 1, 1);
hipLaunchKernelGGL(( kernel::CudnnAddTensorEQDimKernel), dim3(gridSize), dim3(blockSize), 0, 0,
a, *alpha, *beta, c, threads_total_x);
} else {
int c_batch = c_desc.GetN();
int a_chls_stride = a_desc.GetStride(2);
int c_chls_stride = c_desc.GetStride(2);
int c_chls = c_desc.GetDim(2);
int c_image_size = c_desc.GetStride(1); // c*h*w
int a_feature_size = 1;
int c_feature_size = 1;
for (int idx = 3; idx <= c_desc.GetNbDims(); idx++) {
a_feature_size = a_feature_size * a_desc.GetDim(idx);
}
for (int idx = 3; idx <= c_desc.GetNbDims(); idx++) {
c_feature_size = c_feature_size * c_desc.GetDim(idx);
}
int threads_total_x = c_feature_size;
int threads_perblk_x = ::min(threads_total_x, kMaxThreadNbPerBlock);
int nblocks_x = (threads_total_x + threads_perblk_x - 1) / threads_perblk_x;
dim3 gridSize(nblocks_x, c_batch, 1);
dim3 blockSize(threads_perblk_x, 1, 1);
hipLaunchKernelGGL(( kernel::CudnnAddTensorNotEQDimKernel), dim3(gridSize), dim3(blockSize), 0, 0,
a, a_chls_stride, a_feature_size, *alpha, *beta, c, c_chls, c_chls_stride, c_image_size);
}
}
template <class T>
void LaunchTransformTensorKernel(const float* alpha,
const CuTensorDescriptor& x_desc,
const T* x,
const float* beta,
const CuTensorDescriptor& y_desc,
T* y) {
int in_n, in_c, in_w, in_h, in_stride_n, in_stride_c, in_stride_h, in_stride_w;
int out_n, out_c, out_w, out_h, out_stride_n, out_stride_c, out_stride_h, out_stride_w;
cudnnDataType_t in_data_type, out_data_type;
x_desc.Get(&in_data_type,
&in_n,
&in_c,
&in_h,
&in_w,
&in_stride_n,
&in_stride_c,
&in_stride_h,
&in_stride_w);
y_desc.Get(&out_data_type,
&out_n,
&out_c,
&out_h,
&out_w,
&out_stride_n,
&out_stride_c,
&out_stride_h,
&out_stride_w);
cudnnTensorFormat_t x_format = x_desc.GetTensorFormat();
cudnnTensorFormat_t y_format = y_desc.GetTensorFormat();
if (x_format == DNN_TENSOR_NCHW_VECT_C && y_format != DNN_TENSOR_NCHW_VECT_C) {
int feature_num = in_h * in_w * in_c;
int quad_hw = in_h * in_w << 2;
int threads_x;
int blocks_x = in_n;
int blocks_y;
if (feature_num <= kMaxThreadNbPerBlock) {
threads_x = feature_num;
blocks_y = 1;
} else {
threads_x = kMaxThreadNbPerBlock;
blocks_y = (feature_num % kMaxThreadNbPerBlock == 0)
? feature_num / kMaxThreadNbPerBlock
: feature_num / kMaxThreadNbPerBlock + 1;
}
dim3 gridSize(blocks_x, blocks_y, 1);
dim3 blockSize(threads_x, 1, 1);
hipLaunchKernelGGL(( kernel::CudnnTransformVecToScaKernel), dim3(gridSize), dim3(blockSize), 0, 0,
x, *alpha, *beta, y, out_stride_n, out_stride_c, out_stride_w, quad_hw);
} else if (x_format != DNN_TENSOR_NCHW_VECT_C && y_format == DNN_TENSOR_NCHW_VECT_C) {
int feature_num = in_h * in_w * in_c;
int quad_hw = in_h * in_w << 2;
int threads_x;
int blocks_x = in_n;
int blocks_y;
if (feature_num <= kMaxThreadNbPerBlock) {
threads_x = feature_num;
blocks_y = 1;
} else {
threads_x = kMaxThreadNbPerBlock;
blocks_y = (feature_num % kMaxThreadNbPerBlock == 0)
? feature_num / kMaxThreadNbPerBlock
: feature_num / kMaxThreadNbPerBlock + 1;
}
dim3 gridSize(blocks_x, blocks_y, 1);
dim3 blockSize(threads_x, 1, 1);
hipLaunchKernelGGL(( kernel::CudnnTransformScaToVecKernel), dim3(gridSize), dim3(blockSize), 0, 0,
x, *alpha, *beta, y, in_stride_n, in_stride_c, in_stride_w, quad_hw);
} else {
int muti_hw = in_h * in_w;
int threads_x;
int blocks_x = in_n;
int blocks_y = in_c;
int blocks_z;
if (muti_hw <= kMaxThreadNbPerBlock) {
threads_x = muti_hw;
blocks_z = 1;
} else {
threads_x = kMaxThreadNbPerBlock;
blocks_z = (muti_hw % kMaxThreadNbPerBlock == 0) ? muti_hw / kMaxThreadNbPerBlock
: muti_hw / kMaxThreadNbPerBlock + 1;
}
dim3 gridSize(blocks_x, blocks_y, blocks_z);
dim3 blockSize(threads_x, 1, 1);
hipLaunchKernelGGL(( kernel::CudnnTransformTensorKernel), dim3(gridSize), dim3(blockSize), 0, 0, x,
*alpha,
*beta,
y,
out_stride_n,
out_stride_c,
out_stride_h,
out_stride_w,
in_stride_n,
in_stride_c,
in_stride_h,
in_stride_w,
in_w,
in_h);
}
}
} // namespace impl
} // namespace cudnn
| f79ec98b058d5ef722ddeb50875b6abc9f16297a.cu | /*
* Unauthorized copying of this file, via any medium is strictly prohibited
* Proprietary and confidential
*/
#include <cudnn.h>
#include <cudnn/impl/cudnn_common_def.h>
#include <cudnn/impl/cudnn_deref.h>
#include <cudnn/impl/cudnn_handle.h>
#include <cudnn/impl/cudnn_tensor_descriptor.h>
#include <cudnn/impl/kernel/cudnn_tensor_descriptor.cuh>
#include <algorithm>
namespace cudnn {
namespace impl {
void AddTensor(CuHandle& handle,
const void* alpha,
const CuTensorDescriptor& a_desc,
const void* a,
const void* beta,
const CuTensorDescriptor& c_desc,
void* c) {
cudnnDataType_t in_data_type;
in_data_type = a_desc.GetDataType();
if (DNN_DATA_FLOAT == in_data_type) {
const float* in_a = reinterpret_cast<const float*>(a);
const float* in_alpha = reinterpret_cast<const float*>(alpha);
const float* in_beta = reinterpret_cast<const float*>(beta);
float* out_c = reinterpret_cast<float*>(c);
LaunchAddTensorKernel(in_alpha, a_desc, in_a, in_beta, c_desc, out_c);
} else if (DNN_DATA_HALF == in_data_type) {
/** to do half addtensor*/
} else if (DNN_DATA_INT8 == in_data_type) {
/** to do int8 addtensor*/
} else {
}
}
void TransformTensor(CuHandle& handle,
const void* alpha,
const CuTensorDescriptor& x_desc,
const void* x,
const void* beta,
const CuTensorDescriptor& y_desc,
void* y) {
cudnnDataType_t in_data_type;
in_data_type = x_desc.GetDataType();
if (DNN_DATA_FLOAT == in_data_type) {
const float* in_x = reinterpret_cast<const float*>(x);
const float* in_alpha = reinterpret_cast<const float*>(alpha);
const float* in_beta = reinterpret_cast<const float*>(beta);
float* out_y = reinterpret_cast<float*>(y);
LaunchTransformTensorKernel(in_alpha, x_desc, in_x, in_beta, y_desc, out_y);
} else if (DNN_DATA_HALF == in_data_type) {
/** to do half transformTensor */
} else if (DNN_DATA_INT8 == in_data_type || DNN_DATA_INT8x4 == in_data_type) {
const char* in_x = reinterpret_cast<const char*>(x);
const float* in_alpha = reinterpret_cast<const float*>(alpha);
const float* in_beta = reinterpret_cast<const float*>(beta);
char* out_y = reinterpret_cast<char*>(y);
LaunchTransformTensorKernel(in_alpha, x_desc, in_x, in_beta, y_desc, out_y);
} else {
}
}
// AddTensor support 2d and 3d, NCHW NCDHW and NHWC format
template <typename T1, typename T2>
void LaunchAddTensorKernel(const T2* alpha,
const CuTensorDescriptor& a_desc,
const T1* a,
const T2* beta,
const CuTensorDescriptor& c_desc,
T1* c) {
if (a_desc.GetN() == c_desc.GetN() && a_desc.GetH() == c_desc.GetH() && a_desc.GetW() == c_desc.GetW()) {
int image_size = 0;
image_size = a_desc.GetStride(1); // only support batch_size is first dimension
int n_out = c_desc.GetN();
int threads_total_x = n_out * image_size;
int threads_perblk_x = std::min(threads_total_x, kMaxThreadNbPerBlock);
int nblocks_x = (threads_total_x + threads_perblk_x - 1) / threads_perblk_x;
dim3 gridSize(nblocks_x, 1, 1);
dim3 blockSize(threads_perblk_x, 1, 1);
kernel::CudnnAddTensorEQDimKernel<<<gridSize, blockSize>>>(
a, *alpha, *beta, c, threads_total_x);
} else {
int c_batch = c_desc.GetN();
int a_chls_stride = a_desc.GetStride(2);
int c_chls_stride = c_desc.GetStride(2);
int c_chls = c_desc.GetDim(2);
int c_image_size = c_desc.GetStride(1); // c*h*w
int a_feature_size = 1;
int c_feature_size = 1;
for (int idx = 3; idx <= c_desc.GetNbDims(); idx++) {
a_feature_size = a_feature_size * a_desc.GetDim(idx);
}
for (int idx = 3; idx <= c_desc.GetNbDims(); idx++) {
c_feature_size = c_feature_size * c_desc.GetDim(idx);
}
int threads_total_x = c_feature_size;
int threads_perblk_x = std::min(threads_total_x, kMaxThreadNbPerBlock);
int nblocks_x = (threads_total_x + threads_perblk_x - 1) / threads_perblk_x;
dim3 gridSize(nblocks_x, c_batch, 1);
dim3 blockSize(threads_perblk_x, 1, 1);
kernel::CudnnAddTensorNotEQDimKernel<<<gridSize, blockSize>>>(
a, a_chls_stride, a_feature_size, *alpha, *beta, c, c_chls, c_chls_stride, c_image_size);
}
}
template <class T>
void LaunchTransformTensorKernel(const float* alpha,
const CuTensorDescriptor& x_desc,
const T* x,
const float* beta,
const CuTensorDescriptor& y_desc,
T* y) {
int in_n, in_c, in_w, in_h, in_stride_n, in_stride_c, in_stride_h, in_stride_w;
int out_n, out_c, out_w, out_h, out_stride_n, out_stride_c, out_stride_h, out_stride_w;
cudnnDataType_t in_data_type, out_data_type;
x_desc.Get(&in_data_type,
&in_n,
&in_c,
&in_h,
&in_w,
&in_stride_n,
&in_stride_c,
&in_stride_h,
&in_stride_w);
y_desc.Get(&out_data_type,
&out_n,
&out_c,
&out_h,
&out_w,
&out_stride_n,
&out_stride_c,
&out_stride_h,
&out_stride_w);
cudnnTensorFormat_t x_format = x_desc.GetTensorFormat();
cudnnTensorFormat_t y_format = y_desc.GetTensorFormat();
if (x_format == DNN_TENSOR_NCHW_VECT_C && y_format != DNN_TENSOR_NCHW_VECT_C) {
int feature_num = in_h * in_w * in_c;
int quad_hw = in_h * in_w << 2;
int threads_x;
int blocks_x = in_n;
int blocks_y;
if (feature_num <= kMaxThreadNbPerBlock) {
threads_x = feature_num;
blocks_y = 1;
} else {
threads_x = kMaxThreadNbPerBlock;
blocks_y = (feature_num % kMaxThreadNbPerBlock == 0)
? feature_num / kMaxThreadNbPerBlock
: feature_num / kMaxThreadNbPerBlock + 1;
}
dim3 gridSize(blocks_x, blocks_y, 1);
dim3 blockSize(threads_x, 1, 1);
kernel::CudnnTransformVecToScaKernel<<<gridSize, blockSize>>>(
x, *alpha, *beta, y, out_stride_n, out_stride_c, out_stride_w, quad_hw);
} else if (x_format != DNN_TENSOR_NCHW_VECT_C && y_format == DNN_TENSOR_NCHW_VECT_C) {
int feature_num = in_h * in_w * in_c;
int quad_hw = in_h * in_w << 2;
int threads_x;
int blocks_x = in_n;
int blocks_y;
if (feature_num <= kMaxThreadNbPerBlock) {
threads_x = feature_num;
blocks_y = 1;
} else {
threads_x = kMaxThreadNbPerBlock;
blocks_y = (feature_num % kMaxThreadNbPerBlock == 0)
? feature_num / kMaxThreadNbPerBlock
: feature_num / kMaxThreadNbPerBlock + 1;
}
dim3 gridSize(blocks_x, blocks_y, 1);
dim3 blockSize(threads_x, 1, 1);
kernel::CudnnTransformScaToVecKernel<<<gridSize, blockSize>>>(
x, *alpha, *beta, y, in_stride_n, in_stride_c, in_stride_w, quad_hw);
} else {
int muti_hw = in_h * in_w;
int threads_x;
int blocks_x = in_n;
int blocks_y = in_c;
int blocks_z;
if (muti_hw <= kMaxThreadNbPerBlock) {
threads_x = muti_hw;
blocks_z = 1;
} else {
threads_x = kMaxThreadNbPerBlock;
blocks_z = (muti_hw % kMaxThreadNbPerBlock == 0) ? muti_hw / kMaxThreadNbPerBlock
: muti_hw / kMaxThreadNbPerBlock + 1;
}
dim3 gridSize(blocks_x, blocks_y, blocks_z);
dim3 blockSize(threads_x, 1, 1);
kernel::CudnnTransformTensorKernel<<<gridSize, blockSize>>>(x,
*alpha,
*beta,
y,
out_stride_n,
out_stride_c,
out_stride_h,
out_stride_w,
in_stride_n,
in_stride_c,
in_stride_h,
in_stride_w,
in_w,
in_h);
}
}
} // namespace impl
} // namespace cudnn
|
167b9ae2e419503456de9f929ce7716252dd4a7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../image_headers/canny.cuh"
#include "../image_headers/threshold.cuh"
#include "../image_headers/convolution.cuh"
#include "../image_headers/gradient.cuh"
#include "../image_headers/suppression.cuh"
#include "../image_headers/image_utils.cuh"
#include "../image_headers/hystersis.cuh"
#include <vector>
#include <iostream>
void canny(unsigned char* image, unsigned char* output, float* theta, float* gradient, float* I_x, float* I_y, size_t width, size_t height) {
float gaussian_blur_kernel[9] = {0.0625, 0.125, 0.0625, 0.125, 0.25, 0.125, 0.0625, 0.125, 0.0625};
float k_x[9] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
float k_y[9] = {1, 2, 1, 0, 0, 0, -1, -2 , -1};
float *dgaussian, *dKx;
float *dKy;
hipMalloc((void **)&dgaussian, 9 * sizeof(float));
hipMalloc((void **)&dKx, 9 * sizeof(float));
hipMalloc((void **)&dKy, 9 * sizeof(float));
hipMemcpy(dgaussian, gaussian_blur_kernel, 9 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dKx, k_x, 9 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dKy, k_y, 9 * sizeof(float), hipMemcpyHostToDevice);
int size = width * height;
int threads_per_block = 256;
int num_blocks = (size - 1) / threads_per_block + 1;
hipLaunchKernelGGL(( convolve_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, image, output, width, height, dgaussian, 3);
hipLaunchKernelGGL(( convolve_kernel2), dim3(num_blocks), dim3(threads_per_block), 0, 0, image, I_x, width, height, dKx, 3);
hipLaunchKernelGGL(( convolve_kernel2), dim3(num_blocks), dim3(threads_per_block), 0, 0, image, I_y, width, height, dKy, 3);
hipLaunchKernelGGL(( gradient_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, I_x, I_y, gradient, width);
hipLaunchKernelGGL(( angle_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, I_x, I_y, theta, width);
hipLaunchKernelGGL(( suppression_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, image, output, width, height, gradient, theta);
hipLaunchKernelGGL(( threshold_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, output, image, width, height, 50, 200, 255, 25);
hipLaunchKernelGGL(( hystersis_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, image, output, width, height, 50, 200, 255, 25);
} | 167b9ae2e419503456de9f929ce7716252dd4a7c.cu | #include "../image_headers/canny.cuh"
#include "../image_headers/threshold.cuh"
#include "../image_headers/convolution.cuh"
#include "../image_headers/gradient.cuh"
#include "../image_headers/suppression.cuh"
#include "../image_headers/image_utils.cuh"
#include "../image_headers/hystersis.cuh"
#include <vector>
#include <iostream>
void canny(unsigned char* image, unsigned char* output, float* theta, float* gradient, float* I_x, float* I_y, size_t width, size_t height) {
float gaussian_blur_kernel[9] = {0.0625, 0.125, 0.0625, 0.125, 0.25, 0.125, 0.0625, 0.125, 0.0625};
float k_x[9] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
float k_y[9] = {1, 2, 1, 0, 0, 0, -1, -2 , -1};
float *dgaussian, *dKx;
float *dKy;
cudaMalloc((void **)&dgaussian, 9 * sizeof(float));
cudaMalloc((void **)&dKx, 9 * sizeof(float));
cudaMalloc((void **)&dKy, 9 * sizeof(float));
cudaMemcpy(dgaussian, gaussian_blur_kernel, 9 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dKx, k_x, 9 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dKy, k_y, 9 * sizeof(float), cudaMemcpyHostToDevice);
int size = width * height;
int threads_per_block = 256;
int num_blocks = (size - 1) / threads_per_block + 1;
convolve_kernel<<<num_blocks, threads_per_block>>>(image, output, width, height, dgaussian, 3);
convolve_kernel2<<<num_blocks, threads_per_block>>>(image, I_x, width, height, dKx, 3);
convolve_kernel2<<<num_blocks, threads_per_block>>>(image, I_y, width, height, dKy, 3);
gradient_kernel<<<num_blocks, threads_per_block>>>(I_x, I_y, gradient, width);
angle_kernel<<<num_blocks, threads_per_block>>>(I_x, I_y, theta, width);
suppression_kernel<<<num_blocks, threads_per_block>>>(image, output, width, height, gradient, theta);
threshold_kernel<<<num_blocks, threads_per_block>>>(output, image, width, height, 50, 200, 255, 25);
hystersis_kernel<<<num_blocks, threads_per_block>>>(image, output, width, height, 50, 200, 255, 25);
} |
99d8e8076b5be59d19a2d7a8ef31115a7f461644.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//**********************************************************************
// *
// University Of North Carolina Charlotte *
// *
//Program: Vecotr adder *
//Description: This program is for testing GPU performance with one *
// stencil. *
// *
// *
//File Name: pb2c_gpu.cu *
//File Version: 1.0 *
//Baseline: Homework_0 *
// *
//Course: ECGR6090- Heterogeneous Computing *
// *
//Programmed by: Roy Liu *
//Under Suppervision of: Dr. Hamed Tabkhi *
// *
//Input file: No *
// *
//Output:Time of program running *
//**********************************************************************
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<cuda_runtime.h>
#define N 10000
#define RADIUS 8
#define BLOCK_SIZE 128
__global__ void stencil_1d(int *in, int *out)
{
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockDim.x * blockIdx.x;
int lindex = threadIdx.x + RADIUS;
// Reads input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS)
{
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
// Applies the stencil
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++)
result += temp[lindex + offset];
// Stores the result
out[gindex] = result;
}
void random_ints(int* r, int n);
int main()
{
//for counting run time
struct timeval start, end;
float timer;
gettimeofday(&start, NULL);
int*in, *d_in, *out, *d_out;
int n;
n = N;
int size = (n+2*RADIUS)*sizeof(int);
// data initializing
in = (int *)malloc(size); random_ints(in, n);
out = (int *)malloc(size);
//for (int i=0;i<n;i++) printf("%d\n",a[i]);//for testing
hipMalloc((void**)&d_in, size);
hipMalloc((void**)&d_out, size);
// CPU TO GPU
hipMemcpy(d_in, in, size, hipMemcpyHostToDevice);
// Define kernel,block:(1024*1024/512)512 threds each block
dim3 dimGrid(n/BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE); //each block has X threads
// kernel
hipLaunchKernelGGL(( stencil_1d), dim3(dimGrid), dim3(dimBlock), 0, 0, d_in, d_out);
hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost);
// cleanup
free(in);
free(out);
hipFree(d_in);
hipFree(d_out);
gettimeofday(&end, NULL);
timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("Data number is: %d\nBlocksize is: %d\nRadius is: %d\nRunning time is: %f ms\n", n,BLOCK_SIZE,RADIUS,timer/1000);
return 0;
}
//**********************************************************************
// Function Name: random_ints *
// Description: - Generate random integer *
// Input : None *
// Output : Random integer *
// Return: None *
//**********************************************************************
void random_ints(int* r, int n)
{
int i;
for (i=0; i < n+2*RADIUS; ++i)
{
r[i] = rand()/2;
}
}
| 99d8e8076b5be59d19a2d7a8ef31115a7f461644.cu | //**********************************************************************
// *
// University Of North Carolina Charlotte *
// *
//Program: Vecotr adder *
//Description: This program is for testing GPU performance with one *
// stencil. *
// *
// *
//File Name: pb2c_gpu.cu *
//File Version: 1.0 *
//Baseline: Homework_0 *
// *
//Course: ECGR6090- Heterogeneous Computing *
// *
//Programmed by: Roy Liu *
//Under Suppervision of: Dr. Hamed Tabkhi *
// *
//Input file: No *
// *
//Output:Time of program running *
//**********************************************************************
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<cuda_runtime.h>
#define N 10000
#define RADIUS 8
#define BLOCK_SIZE 128
__global__ void stencil_1d(int *in, int *out)
{
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockDim.x * blockIdx.x;
int lindex = threadIdx.x + RADIUS;
// Reads input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS)
{
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
// Applies the stencil
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++)
result += temp[lindex + offset];
// Stores the result
out[gindex] = result;
}
void random_ints(int* r, int n);
int main()
{
//for counting run time
struct timeval start, end;
float timer;
gettimeofday(&start, NULL);
int*in, *d_in, *out, *d_out;
int n;
n = N;
int size = (n+2*RADIUS)*sizeof(int);
// data initializing
in = (int *)malloc(size); random_ints(in, n);
out = (int *)malloc(size);
//for (int i=0;i<n;i++) printf("%d\n",a[i]);//for testing
cudaMalloc((void**)&d_in, size);
cudaMalloc((void**)&d_out, size);
// CPU TO GPU
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
// Define kernel,block:(1024*1024/512)úČ512 threds each block
dim3 dimGrid(n/BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE); //each block has X threads
// kernel
stencil_1d<<<dimGrid, dimBlock>>>(d_in, d_out);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
// cleanup
free(in);
free(out);
cudaFree(d_in);
cudaFree(d_out);
gettimeofday(&end, NULL);
timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("Data number is: %d\nBlocksize is: %d\nRadius is: %d\nRunning time is: %f ms\n", n,BLOCK_SIZE,RADIUS,timer/1000);
return 0;
}
//**********************************************************************
// Function Name: random_ints *
// Description: - Generate random integer *
// Input : None *
// Output : Random integer *
// Return: None *
//**********************************************************************
void random_ints(int* r, int n)
{
int i;
for (i=0; i < n+2*RADIUS; ++i)
{
r[i] = rand()/2;
}
}
|
906c6879fd92d5f395d785dc50d8d08a931a4624.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col + row * numRows;
if (col < numRows && row < numCols){
greyImage[index] = rgbaImage[index].x * 0.299f + rgbaImage[index].y * 0.587f + rgbaImage[index].z * 0.114f;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16, 16, 1); //TODO
unsigned int gridX = ( numRows % blockSize.x) == 0 ? ( numRows / blockSize.x ) : ( numRows / blockSize.x + 1);
unsigned int gridY = ( numCols % blockSize.y) == 0 ? ( numCols / blockSize.y ) : ( numCols / blockSize.y + 1);
const dim3 gridSize( gridX, gridY, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 906c6879fd92d5f395d785dc50d8d08a931a4624.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col + row * numRows;
if (col < numRows && row < numCols){
greyImage[index] = rgbaImage[index].x * 0.299f + rgbaImage[index].y * 0.587f + rgbaImage[index].z * 0.114f;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16, 16, 1); //TODO
unsigned int gridX = ( numRows % blockSize.x) == 0 ? ( numRows / blockSize.x ) : ( numRows / blockSize.x + 1);
unsigned int gridY = ( numCols % blockSize.y) == 0 ? ( numCols / blockSize.y ) : ( numCols / blockSize.y + 1);
const dim3 gridSize( gridX, gridY, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
4573327715dfb1f7076db3908915046af269078d.hip | // !!! This is a file automatically generated by hipify!!!
#include "VolumeVisualization.h"
#include <cinder/Log.h>
#include "Utils.h"
#include "MarchingCubes.h"
#include "helper_matrixmath.h"
#include "TrilinearInterpolation.h"
#include <ObjectLoader.h>
using namespace Eigen;
ar3d::VolumeVisualizationParams::VolumeVisualizationParams()
: mode_(Mode::Volume)
, slicePosition_(0)
, sliceAxis_(0)
, rangeMin_(0)
, rangeMax_(1)
, stepSize_(0.02)
, showGridCells_(false)
, gridCellColor_(100, 150, 150)
{
//default light, may be reconstructed later
directionalLightDir_ = ar::utils::toGLM(Vector3d(0.2f, -1.6f, -0.4f).normalized());
directionalLightColor_ = cinder::vec4(1, 1, 1, 1);
ambientLightColor_ = cinder::vec4(0.25f, 0.25f, 0.25f, 1.0f);
showNormals_ = false;
}
void ar3d::VolumeVisualizationParams::addParams(const cinder::params::InterfaceGlRef& params)
{
this->params_ = params;
std::vector<std::string> visualizationModeEnums = { "slice", "raytracing", "marching cubes", "high resolution" };
params->addParam("SdfVisualizationMode", visualizationModeEnums, reinterpret_cast<int*>(&mode_)).group("Rendering").label("SDF Rendering").updateFn([this]() {this->updateVisualizationMode(); });
std::vector<std::string> sliceAxisEnums = { "free", "X", "Y", "Z" };
params->addParam("SdfVisualizationSliceAxis", sliceAxisEnums, &sliceAxis_).group("Rendering").label("Slice Axis");
params->addParam("SdfVisualizationLightDir", &directionalLightDir_).group("Rendering").label("Light direction");
params->addParam("SdfVisualizationLightColor", &directionalLightColor_).group("Rendering").label("Light color");
params->addParam("SdfVisualizationAmbientColor", &ambientLightColor_).group("Rendering").label("Ambient color");
params->addParam("SdfVisualizationShowNormals", &showNormals_).group("Rendering").label("Show normals");
params->addParam("SdfVisualizationStepSize", &stepSize_).min(0.01f).max(1).step(0.01).group("Rendering").label("Step size");
params->addParam("SdfVisualizationGridCells", &showGridCells_).group("Rendering").label("Show grid cells");
updateVisualizationMode();
}
void ar3d::VolumeVisualizationParams::load(const cinder::JsonTree& parent)
{
mode_ = ToMode(parent.getValueForKey("Mode"));
directionalLightDir_.x = parent.getChild("LightDirection").getValueAtIndex<float>(0);
directionalLightDir_.y = parent.getChild("LightDirection").getValueAtIndex<float>(1);
directionalLightDir_.z = parent.getChild("LightDirection").getValueAtIndex<float>(2);
directionalLightColor_.r = parent.getChild("LightColor").getValueAtIndex<float>(0);
directionalLightColor_.g = parent.getChild("LightColor").getValueAtIndex<float>(1);
directionalLightColor_.b = parent.getChild("LightColor").getValueAtIndex<float>(2);
ambientLightColor_.r = parent.getChild("AmbientColor").getValueAtIndex<float>(0);
ambientLightColor_.g = parent.getChild("AmbientColor").getValueAtIndex<float>(1);
ambientLightColor_.b = parent.getChild("AmbientColor").getValueAtIndex<float>(2);
showNormals_ = parent.getValueForKey<bool>("ShowNormals");
stepSize_ = parent.getValueForKey<double>("StepSize");
if (parent.hasChild("ShowGridCells")) showGridCells_ = parent.getValueForKey<bool>("ShowGridCells");
updateVisualizationMode();
}
void ar3d::VolumeVisualizationParams::save(cinder::JsonTree& parent) const
{
parent.addChild(cinder::JsonTree("Mode", FromMode(mode_)));
parent.addChild(cinder::JsonTree::makeArray("LightDirection")
.addChild(cinder::JsonTree("", directionalLightDir_.x))
.addChild(cinder::JsonTree("", directionalLightDir_.y))
.addChild(cinder::JsonTree("", directionalLightDir_.z)));
parent.addChild(cinder::JsonTree::makeArray("LightColor")
.addChild(cinder::JsonTree("", directionalLightColor_.r))
.addChild(cinder::JsonTree("", directionalLightColor_.g))
.addChild(cinder::JsonTree("", directionalLightColor_.b)));
parent.addChild(cinder::JsonTree::makeArray("AmbientColor")
.addChild(cinder::JsonTree("", ambientLightColor_.r))
.addChild(cinder::JsonTree("", ambientLightColor_.g))
.addChild(cinder::JsonTree("", ambientLightColor_.b)));
parent.addChild(cinder::JsonTree("ShowNormals", showNormals_));
parent.addChild(cinder::JsonTree("StepSize", stepSize_));
parent.addChild(cinder::JsonTree("ShowGridCells", showGridCells_));
}
void ar3d::VolumeVisualizationParams::updateVisualizationMode()
{
if (mode_ == Mode::Slice)
{
params_->setOptions("SdfVisualizationSliceAxis", "visible=true");
params_->setOptions("SdfVisualizationLightDir", "visible=false");
params_->setOptions("SdfVisualizationLightColor", "visible=false");
params_->setOptions("SdfVisualizationAmbientColor", "visible=false");
params_->setOptions("SdfVisualizationShowNormals", "visible=false");
params_->setOptions("SdfVisualizationStepSize", "visible=false");
}
else
{
params_->setOptions("SdfVisualizationSliceAxis", "visible=false");
params_->setOptions("SdfVisualizationLightDir", "visible=true");
params_->setOptions("SdfVisualizationLightColor", "visible=true");
params_->setOptions("SdfVisualizationAmbientColor", "visible=true");
params_->setOptions("SdfVisualizationShowNormals", "visible=true");
params_->setOptions("SdfVisualizationStepSize", "visible=true");
}
}
ar3d::VolumeVisualization::VolumeVisualization(const cinder::app::WindowRef& window,
const cinder::Camera* cam, VolumeVisualizationParams* params)
: volumeValid_(false)
, cam_(cam)
, window_(window)
, cellValid_(false)
, params_(params)
{
if (window) {
//connect input
window->getSignalMouseWheel().connect(900, [this](cinder::app::MouseEvent& event) {this->mouseWheel(event); });
//load resources
reloadResources();
}
//else: running in window-less mode (only for export)
}
ar3d::VolumeVisualization::~VolumeVisualization()
{
if (sdfData_)
sdfData_->deleteTexture();
//This fails because when the destructor is called, the OpenGL-context is already destroyed.
//if (cellVertexBuffer_)
// CUMAT_SAFE_CALL(hipGLUnregisterBufferObject(cellVertexBuffer_->getId()));
}
void ar3d::VolumeVisualization::setInput(const SoftBodyGrid3D::Input& input)
{
std::lock_guard<std::mutex> guard(inputMutex_);
input_ = input;
positions_ = input_.referencePositions_.deepClone();
advectedSdf_ = nullptr;
volumeValid_ = false;
mcBatch_ = nullptr;
highResBatch_ = nullptr;
highResMesh_ = nullptr;
cellValid_ = false;
cellBatch_ = nullptr;
}
void ar3d::VolumeVisualization::setHighResInputMesh(const std::string & file)
{
if (file.empty()) {
//no high resolution mesh
highResMesh_ = nullptr;
modifiedHighResMesh_ = nullptr;
}
else {
//load mesh
highResMesh_ = ObjectLoader::loadCustomObj(file);
highResBatch_ = nullptr;
if (highResMesh_->hasTexCoords0()) {
//use original texture coordinates
modifiedHighResMesh_ = cinder::TriMesh::create(*highResMesh_);
}
else {
//use generated texture coordinates (the original vertex position)
//to provide a mapping back to the undeformed mesh.
//If the original mesh has texture coordinates, write original positions into texCoords1,
//else, write them into tex coords 0
auto format = cinder::TriMesh::formatFromSource(*highResMesh_).normals();
if (highResMesh_->hasTexCoords0()) format = format.texCoords0(2).texCoords1(3);
else format = format.texCoords0(3);
auto mesh = cinder::TriMesh::create(*highResMesh_, format);
size_t n = mesh->getNumVertices();
if (highResMesh_->hasTexCoords0()) {
mesh->getBufferTexCoords1().clear();
for (size_t i = 0; i < n; ++i) {
mesh->appendTexCoord1(mesh->getPositions<3>()[i]);
}
}
else {
mesh->getBufferTexCoords0().clear();
for (size_t i = 0; i < n; ++i) {
mesh->appendTexCoord0(mesh->getPositions<3>()[i]);
}
}
modifiedHighResMesh_ = mesh;
}
}
}
bool ar3d::VolumeVisualization::hasHighResMesh() const
{
return modifiedHighResMesh_ != nullptr && highResMesh_!=nullptr;
}
void ar3d::VolumeVisualization::update(const SoftBodyGrid3D::State& state)
{
positions_ = input_.referencePositions_ + state.displacements_;
advectedSdf_ = state.advectedSDF_;
gridDisplacements_ = state.gridDisplacements_;
volumeValid_ = false;
mcBatch_ = nullptr;
cellValid_ = false;
highResBatch_ = nullptr;
real3 centerOfMass;
positions_.segment<1>(input_.centerOfMassIndex_).eval().copyToHost(¢erOfMass);
CI_LOG_I("center of mass: " << centerOfMass);
}
bool ar3d::VolumeVisualization::needsTransferFunction() const
{
return params_->mode_ == VolumeVisualizationParams::Mode::Slice;
}
void ar3d::VolumeVisualization::setTransferFunction(cinder::gl::Texture1dRef transferFunction,
double rangeMin, double rangeMax)
{
this->transferFunctionTexture_ = transferFunction;
params_->rangeMin_ = rangeMin;
params_->rangeMax_ = rangeMax;
}
void ar3d::VolumeVisualization::draw()
{
if (!volumeValid_)
{
//update volume / create textures
std::lock_guard<std::mutex> guard(inputMutex_);
if (input_.grid_ == nullptr) return; //no input yet
WorldGridRealDataPtr currentSdf = advectedSdf_ != nullptr ? advectedSdf_ : input_.referenceSdf_;
//invalidate volume
if (sdfData_ == nullptr || currentSdf->getGrid()->getSize() != sdfData_->getGrid()->getSize())
{
//allocate new texture
if (sdfData_ != nullptr) sdfData_->deleteTexture();
sdfData_ = std::make_shared<WorldGridData<real>>(currentSdf->getGrid());
} else
{
//just update grid
sdfData_->getGrid()->setOffset(currentSdf->getGrid()->getOffset());
}
//update data
if (currentSdf->hasHostMemory())
{
sdfData_->setHostMemory(currentSdf->getHostMemory());
sdfData_->invalidateTexture();
sdfTexture_ = sdfData_->getTexture(WorldGridData<real>::DataSource::HOST);
} else
{
//sdfData_->setDeviceMemory(currentSdf->getDeviceMemory());
//sdfData_->invalidateTexture();
//sdfTexture_ = sdfData_->getTexture(WorldGridData<float>::DataSource::DEVICE);
sdfData_->setDeviceMemory(currentSdf->getDeviceMemory());
sdfData_->copyDeviceToHost();
sdfData_->invalidateTexture();
sdfTexture_ = sdfData_->getTexture(WorldGridData<real>::DataSource::HOST);
}
volumeValid_ = true;
}
//actual drawing
if (params_->showGridCells_)
drawCells();
switch (params_->mode_)
{
case VolumeVisualizationParams::Mode::Slice: drawSlice(); break;
case VolumeVisualizationParams::Mode::Volume: drawSurface(); break;
case VolumeVisualizationParams::Mode::MCSurface: drawMarchingCubes(); break;
case VolumeVisualizationParams::Mode::HighRes: drawHighResMesh(); break;
default: throw std::exception("Unknown visualization mode");
}
}
void ar3d::VolumeVisualization::reloadResources()
{
try {
sliceShader_ = cinder::gl::GlslProg::create(
cinder::app::loadAsset("shaders/VolumeVisualizationSlice.vert"),
cinder::app::loadAsset("shaders/VolumeVisualizationSlice.frag"));
sliceBatch_ = cinder::gl::Batch::create(cinder::geom::Rect(), sliceShader_);
CI_LOG_I("slice shader (re)loaded");
surfaceShader_ = cinder::gl::GlslProg::create(
cinder::app::loadAsset("shaders/VolumeVisualizationSurface.vert"),
cinder::app::loadAsset("shaders/VolumeVisualizationSurface.frag"));
surfaceBatch_ = cinder::gl::Batch::create(cinder::geom::Rect(cinder::Rectf(-1,-1, +1,+1)), surfaceShader_);
CI_LOG_I("surface shader (re)loaded");
cinder::gl::GlslProg::Format mcShaderFormat;
mcShaderFormat.vertex(cinder::app::loadAsset("shaders/VolumeVisualizationMC.vert"));
mcShaderFormat.geometry(cinder::app::loadAsset("shaders/VolumeVisualizationMC.geom"));
mcShaderFormat.fragment(cinder::app::loadAsset("shaders/VolumeVisualizationMC.frag"));
mcShaderFormat.attrib(cinder::geom::CUSTOM_0, "in_NodeIndices");
mcShaderFormat.attrib(cinder::geom::CUSTOM_1, "in_InterpWeight");
mcShader_ = cinder::gl::GlslProg::create(mcShaderFormat);
mcBatch_ = nullptr;
CI_LOG_I("marching cubes shader (re)loaded");
} catch (const cinder::gl::GlslProgExc& ex)
{
CI_LOG_EXCEPTION("Unable to load shaders", ex);
}
}
void ar3d::VolumeVisualization::saveSdf(const std::string & filename)
{
std::ofstream o(filename, std::ofstream::binary);
sdfData_->save(o);
o.close();
}
void ar3d::VolumeVisualization::saveMCMesh(const std::string & filename)
{
updateMarchingCubes();
//old: save .obj
//ObjectLoader::saveCustomObj(mcMesh_, filename);
//new: save .ply
std::string path = filename;
std::string::size_type i = path.rfind('.', path.length());
if (i != std::string::npos) {
path.replace(i + 1, 3, "ply");
}
ObjectLoader::saveMeshPly(mcMesh_, path);
}
void ar3d::VolumeVisualization::saveHighResultMesh(const std::string & filename, bool includeNormals, bool includeOriginalPositions)
{
if (hasHighResMesh()) {
updateHighResMesh();
auto format = ci::TriMesh::formatFromSource(*modifiedHighResMesh_);
if (!includeNormals) format.mNormalsDims = 0;
if (!includeOriginalPositions && format.mTexCoords1Dims > 0) format.mTexCoords1Dims = 0;
else if (!includeOriginalPositions && format.mTexCoords1Dims == 0) format.mTexCoords0Dims = 0;
ci::TriMeshRef copy = ci::TriMesh::create(*modifiedHighResMesh_, format);
//old: save .obj
//ObjectLoader::saveCustomObj(copy, filename);
//new: save .ply
std::string path = filename;
std::string::size_type i = path.rfind('.', path.length());
if (i != std::string::npos) {
path.replace(i + 1, 3, "ply");
}
ObjectLoader::saveMeshPly(copy, path);
}
}
void ar3d::VolumeVisualization::drawSlice()
{
double maxSlicePos = (sdfData_->getGrid()->getSize().cast<double>()).norm() * 0.5;
params_->slicePosition_ = ::max(-maxSlicePos, ::min(maxSlicePos, params_->slicePosition_));
using namespace ar::utils;
Vector3d eyePos = toEigen(cam_->getEyePoint());
Vector3d gridCenter = (sdfData_->getGrid()->getOffset().cast<double>() + (Vector3i(1, 1, 1) + sdfData_->getGrid()->getSize()).cast<double>() * 0.5) * sdfData_->getGrid()->getVoxelSize();
Vector3d focusPoint;
Vector3d planeNormal;
switch (params_->sliceAxis_)
{
case 0: //free
planeNormal = toEigen(-cam_->getViewDirection()).normalized();
focusPoint = gridCenter + params_->slicePosition_ * sdfData_->getGrid()->getVoxelSize() * (gridCenter - eyePos).normalized();
break;
case 1: //X
planeNormal = Vector3d(1, 0, 0);
focusPoint = gridCenter + params_->slicePosition_ * sdfData_->getGrid()->getVoxelSize() * planeNormal;
break;
case 2: //Y
planeNormal = Vector3d(0, 1, 0);
focusPoint = gridCenter + params_->slicePosition_ * sdfData_->getGrid()->getVoxelSize() * planeNormal;
break;
case 3: //Z
planeNormal = Vector3d(0, 0, 1);
focusPoint = gridCenter + params_->slicePosition_ * sdfData_->getGrid()->getVoxelSize() * planeNormal;
break;
}
double size = sdfData_->getGrid()->getSize().cast<double>().sum() * sdfData_->getGrid()->getVoxelSize(); //estimate of the size of the plane
{
//we want to draw the slice through 'focusPoint' with normal 'planeNormal'
cinder::gl::ScopedMatrices m;
cinder::gl::ScopedDepthTest dt(true);
cinder::gl::ScopedDepthWrite dw(false);
Quaterniond rot = Quaterniond::FromTwoVectors(Vector3d(0, 0, 1), planeNormal);
cinder::gl::translate(toGLM(focusPoint));
cinder::gl::rotate(toGLM(rot));
cinder::gl::scale(float(size), float(size), float(size));
cinder::gl::ScopedTextureBind t0(transferFunctionTexture_, 0);
sliceShader_->uniform("tfTex", 0);
cinder::gl::ScopedTextureBind t1(sdfTexture_, 1);
sliceShader_->uniform("volTex", 1);
sliceShader_->uniform("tfMin", static_cast<float>(params_->rangeMin_));
sliceShader_->uniform("tfMax", static_cast<float>(params_->rangeMax_));
sliceShader_->uniform("boxMin", toGLM(((sdfData_->getGrid()->getOffset().cast<double>() + Vector3d(0.5, 0.5, 0.5)) * sdfData_->getGrid()->getVoxelSize()).eval()));
sliceShader_->uniform("boxSize", toGLM((sdfData_->getGrid()->getSize().cast<double>() * sdfData_->getGrid()->getVoxelSize()).eval()));
sliceBatch_->draw();
}
}
void ar3d::VolumeVisualization::drawSurface()
{
using namespace ar::utils;
cinder::gl::ScopedMatrices m;
cinder::gl::ScopedDepthTest dt(true);
cinder::gl::ScopedDepthWrite dw(true);
cinder::gl::ScopedTextureBind t1(sdfTexture_, 0);
surfaceShader_->uniform("volTex", 0);
surfaceShader_->uniform("boxMin", toGLM(((sdfData_->getGrid()->getOffset().cast<double>() - Vector3d(0.5, 0.5, 0.5)) * sdfData_->getGrid()->getVoxelSize()).eval()));
surfaceShader_->uniform("boxSize", toGLM((sdfData_->getGrid()->getSize().cast<double>() * sdfData_->getGrid()->getVoxelSize()).eval()));
surfaceShader_->uniform("stepSize", static_cast<float>(sdfData_->getGrid()->getVoxelSize() * params_->stepSize_));
surfaceShader_->uniform("directionalLightDir", params_->directionalLightDir_);
surfaceShader_->uniform("directionalLightColor", params_->directionalLightColor_);
surfaceShader_->uniform("ambientLightColor", params_->ambientLightColor_);
surfaceShader_->uniform("showNormals", params_->showNormals_);
surfaceBatch_->draw();
}
void ar3d::VolumeVisualization::updateMarchingCubes()
{
if (mcBatch_ == nullptr) {
std::vector<int> indexBuffer;
std::vector<MarchingCubes::Vertex> vertexBuffer;
MarchingCubes::polygonizeGrid(input_.referenceSdf_, input_.posToIndex_, indexBuffer, vertexBuffer);
mcMesh_ = cinder::TriMesh::create(
cinder::TriMesh::Format()
.positions(3)
.normals()
);
int maxIndex = 0;
for (const auto& v : vertexBuffer)
maxIndex = ::max(maxIndex, ::max(v.indexA, v.indexB));
if (maxIndex >= positions_.size())
{
CI_LOG_E("max index (" << maxIndex << ") >= positions.size() (" << positions_.size() << ")");
return;
}
std::vector<real3> positions(positions_.size());
positions_.copyToHost(&positions[0]);
for (const auto& v : vertexBuffer)
{
real3 pos = (1 - v.weight) * positions[v.indexA] + v.weight * positions[v.indexB];
mcMesh_->appendPosition(glm::vec3(pos.x, pos.y, pos.z));
}
for (size_t i = 0; i < indexBuffer.size() / 3; ++i)
mcMesh_->appendTriangle(indexBuffer[3 * i], indexBuffer[3 * i + 1], indexBuffer[3 * i + 2]);
mcMesh_->recalculateNormals();
if (window_!=nullptr)
mcBatch_ = cinder::gl::Batch::create(*mcMesh_, cinder::gl::getStockShader(cinder::gl::ShaderDef().lambert()));
}
}
void ar3d::VolumeVisualization::drawMarchingCubes()
{
#if 1
updateMarchingCubes();
ci::gl::BatchRef batch = mcBatch_;
if (batch) batch->draw();
#endif
//TODO: nothing is displayed. Why??
#if 0
if (mcBatch_ == nullptr)
{
//create new marching cubes mesh
std::vector<int> indexBuffer;
std::vector<MarchingCubes::Vertex> vertexBuffer;
MarchingCubes::polygonizeGrid(input_.referenceSdf_, input_.posToIndex_, indexBuffer, vertexBuffer);
std::vector<std::pair<cinder::geom::BufferLayout, cinder::gl::VboRef>> vertexVbos;
std::vector<int> vertexIndices(2*vertexBuffer.size());
for (size_t i=0; i<vertexBuffer.size(); ++i)
{
vertexIndices[2 * i + 0] = vertexBuffer[i].indexA;
vertexIndices[2 * i + 1] = vertexBuffer[i].indexB;
}
std::vector<float> vertexWeights(vertexBuffer.size());
for (size_t i = 0; i < vertexBuffer.size(); ++i)
vertexWeights[i] = static_cast<float>(vertexBuffer[i].weight);
cinder::geom::BufferLayout layout1, layout2;
layout1.append(cinder::geom::Attrib::CUSTOM_0, cinder::geom::INTEGER, 2, 2 * sizeof(int), 0);
layout2.append(cinder::geom::Attrib::CUSTOM_1, cinder::geom::FLOAT, 1, sizeof(float), 0);
vertexVbos.emplace_back(layout1, cinder::gl::Vbo::create(GL_ARRAY_BUFFER, vertexIndices, GL_STATIC_DRAW));
vertexVbos.emplace_back(layout2, cinder::gl::Vbo::create(GL_ARRAY_BUFFER, vertexWeights, GL_STATIC_DRAW));
cinder::gl::VboRef indexVbo = cinder::gl::Vbo::create(GL_ELEMENT_ARRAY_BUFFER, indexBuffer, GL_STATIC_DRAW);
cinder::gl::VboMeshRef mesh = cinder::gl::VboMesh::create(vertexBuffer.size(), GL_TRIANGLES, vertexVbos, indexBuffer.size(), GL_INT, indexVbo);
mcBatch_ = cinder::gl::Batch::create(mesh, mcShader_);
CI_LOG_D("Marching cubes mesh created, " << vertexBuffer.size() << " vertices, " << indexBuffer.size() << " indices");
//allocate position buffer
if (mcPositionBuffer_ != nullptr) {
CUMAT_SAFE_CALL(hipGLUnregisterBufferObject(mcPositionBuffer_->getId()));
mcPositionBuffer_.reset();
CI_LOG_D("position buffer deleted");
}
mcPositionBuffer_ = cinder::gl::BufferObj::create(GL_SHADER_STORAGE_BUFFER, sizeof(real3) * positions_.size(), nullptr, GL_DYNAMIC_DRAW);
CUMAT_SAFE_CALL(hipGLRegisterBufferObject(mcPositionBuffer_->getId()));
CI_LOG_D("position buffer allocated of size " << sizeof(real3) << " * " << positions_.size());
mcValid_ = false;
}
if (!stateValid_)
{
//write position buffer
void* mem;
CUMAT_SAFE_CALL(hipDeviceSynchronize());
CUMAT_SAFE_CALL(hipGLMapBufferObject__(&mem, mcPositionBuffer_->getId()));
CUMAT_SAFE_CALL(hipMemcpy(mem, positions_.data(), sizeof(real3) * positions_.size(), hipMemcpyDeviceToDevice));
CUMAT_SAFE_CALL(hipDeviceSynchronize());
CUMAT_SAFE_CALL(hipGLUnmapBufferObject(mcPositionBuffer_->getId()));
CI_LOG_D("position buffer updated, " << positions_.size() << " entries written");
std::vector<real3> testData(positions_.size());
positions_.copyToHost(&testData[0]);
for (size_t i = 0; i < positions_.size(); ++i) cinder::app::console() << " " << testData[i].x << " " << testData[i].y << " " << testData[i].z << std::endl;
mcValid_ = true;
}
//draw
glBindBuffer(GL_SHADER_STORAGE_BUFFER, mcPositionBuffer_->getId());
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, mcPositionBuffer_->getId());
cinder::gl::ScopedFaceCulling c(false);
mcBatch_->draw();
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
#endif
}
void ar3d::VolumeVisualization::updateHighResMesh()
{
if (!hasHighResMesh()) return;
if (highResBatch_ == nullptr) {
//create deformed mesh
if (gridDisplacements_.size() > 0) {
//1. copy grid displacements to host
WorldGridData<real3> gridDisplacements(input_.grid_);
gridDisplacements.setDeviceMemory(gridDisplacements_);
gridDisplacements.copyDeviceToHost();
//2. update vertex positions
const size_t n = modifiedHighResMesh_->getNumVertices();
for (size_t i = 0; i < n; ++i) {
glm::vec3 cellPos = highResMesh_->getTexCoords1<3>()[i];
//interpolate deformation
real3 xyz; int3 ijk; long double tmp;
xyz.x = modf(cellPos.x, &tmp); ijk.x = int(tmp);
xyz.y = modf(cellPos.y, &tmp); ijk.y = int(tmp);
xyz.z = modf(cellPos.z, &tmp); ijk.z = int(tmp);
real3 corners[] = {
gridDisplacements.getHost(ijk.x + 0, ijk.y + 0, ijk.z + 0),
gridDisplacements.getHost(ijk.x + 1, ijk.y + 0, ijk.z + 0),
gridDisplacements.getHost(ijk.x + 0, ijk.y + 1, ijk.z + 0),
gridDisplacements.getHost(ijk.x + 1, ijk.y + 1, ijk.z + 0),
gridDisplacements.getHost(ijk.x + 0, ijk.y + 0, ijk.z + 1),
gridDisplacements.getHost(ijk.x + 1, ijk.y + 0, ijk.z + 1),
gridDisplacements.getHost(ijk.x + 0, ijk.y + 1, ijk.z + 1),
gridDisplacements.getHost(ijk.x + 1, ijk.y + 1, ijk.z + 1)
};
real3 displacement = ar3d::trilinear(xyz, corners);
//update position
modifiedHighResMesh_->getPositions<3>()[i] =
highResMesh_->getPositions<3>()[i] +
glm::vec3(displacement.x, displacement.y, displacement.z);
}
}
//2. update normals
modifiedHighResMesh_->recalculateNormals();
//3. create batch
if (window_) {
if (modifiedHighResMesh_->hasColors())
highResBatch_ = cinder::gl::Batch::create(*modifiedHighResMesh_, cinder::gl::getStockShader(cinder::gl::ShaderDef().lambert().color()));
else
highResBatch_ = cinder::gl::Batch::create(*modifiedHighResMesh_, cinder::gl::getStockShader(cinder::gl::ShaderDef().lambert()));
}
}
}
void ar3d::VolumeVisualization::drawHighResMesh()
{
if (!hasHighResMesh()) {
//no high resolution mesh available, fallback to marching cubes
drawMarchingCubes();
return;
}
//update mesh
updateHighResMesh();
//draw it
highResBatch_->draw();
}
void ar3d::VolumeVisualization::drawCells()
{
if (input_.numActiveCells_ == 0 || input_.cellSdfs_.size() == 0 && input_.mapping_.size() == 0) return;
if (cellBatch_ == nullptr)
{
//create index buffer
int numCells = input_.numActiveCells_;
std::vector<real8> sdfHost(input_.cellSdfs_.size()); input_.cellSdfs_.copyToHost(&sdfHost[0]);
std::vector<int4> indexHost(input_.mapping_.size()); input_.mapping_.copyToHost(&indexHost[0]);
std::vector<unsigned> indexBuffer;
static const int CubeEdges[] = {
0,1, 0,2, 1,3, 2,3, 4,5, 4,6, 5,7, 6,7, 0,4, 1,5, 2,6, 3,7
};
for (int i=0; i<numCells; ++i)
{
real8 sdf = sdfHost[i];
int c = 0;
if (sdf.first.x < 0) c |= 1;
if (sdf.first.y < 0) c |= 2;
if (sdf.first.z < 0) c |= 4;
if (sdf.first.w < 0) c |= 8;
if (sdf.second.x < 0) c |= 16;
if (sdf.second.y < 0) c |= 32;
if (sdf.second.z < 0) c |= 64;
if (sdf.second.w < 0) c |= 128;
if (c == 0 || c == 255) continue; //no boundary
//create cube
int4 index = indexHost[i];
int cellIndices[] = {
index.x, index.x + 1,
index.y, index.y + 1,
index.z, index.z + 1,
index.w, index.w + 1
};
for (int j=0; j<12; ++j)
{
indexBuffer.push_back(cellIndices[CubeEdges[2*j]]);
indexBuffer.push_back(cellIndices[CubeEdges[2*j+1]]);
}
}
//allocate vertex buffer
if (cellVertexBuffer_)
CUMAT_SAFE_CALL(hipGLUnregisterBufferObject(cellVertexBuffer_->getId()));
cellVertexBuffer_ = cinder::gl::Vbo::create(GL_ARRAY_BUFFER, input_.numActiveNodes_ * sizeof(float3), nullptr, GL_DYNAMIC_DRAW);
CUMAT_SAFE_CALL(hipGLRegisterBufferObject(cellVertexBuffer_->getId()));
cellValid_ = false;
//create vbo and batch
cinder::gl::VboRef indexBufferSolidSurface = cinder::gl::Vbo::create(GL_ELEMENT_ARRAY_BUFFER, indexBuffer, GL_STATIC_DRAW);
cinder::geom::BufferLayout layout;
layout.append(cinder::geom::Attrib::POSITION, cinder::geom::DataType::FLOAT, 3, sizeof(float3), 0);
cinder::gl::VboMeshRef vbo = cinder::gl::VboMesh::create(
input_.numActiveNodes_, GL_LINES, std::vector<std::pair<cinder::geom::BufferLayout, cinder::gl::VboRef>>({ std::make_pair(layout, cellVertexBuffer_) }),
static_cast<uint32_t>(indexBuffer.size()), GL_UNSIGNED_INT, indexBufferSolidSurface);
auto shader = cinder::gl::ShaderDef().color();
cellBatch_ = cinder::gl::Batch::create(vbo, cinder::gl::getStockShader(shader));
}
if (!cellValid_)
{
//update vertex buffer
cuMat::Matrix<float3, cuMat::Dynamic, 1, 1, cuMat::RowMajor> posFloat = positions_.cast<float3>();
void* dst;
CUMAT_SAFE_CALL(hipGLMapBufferObject__(&dst, cellVertexBuffer_->getId()));
CUMAT_SAFE_CALL(hipDeviceSynchronize());
CUMAT_SAFE_CALL(hipMemcpy(dst, posFloat.data(), sizeof(float3)*input_.numActiveNodes_, hipMemcpyDeviceToDevice));
CUMAT_SAFE_CALL(hipDeviceSynchronize());
CUMAT_SAFE_CALL(hipGLUnmapBufferObject(cellVertexBuffer_->getId()));
cellValid_ = true;
}
{
//draw
cinder::gl::ScopedColor col(params_->gridCellColor_);
cinder::gl::ScopedFaceCulling c(false);
cellBatch_->draw();
}
}
void ar3d::VolumeVisualization::mouseWheel(cinder::app::MouseEvent& event)
{
if (event.isHandled()) return; //already handled
if (params_->mode_ != VolumeVisualizationParams::Mode::Slice) return; //only needed for slice rendering
if (!event.isControlDown()) return; //control must be pressed
//move slice
params_->slicePosition_ += event.getWheelIncrement() * 0.5;
event.setHandled();
}
| 4573327715dfb1f7076db3908915046af269078d.cu | #include "VolumeVisualization.h"
#include <cinder/Log.h>
#include "Utils.h"
#include "MarchingCubes.h"
#include "helper_matrixmath.h"
#include "TrilinearInterpolation.h"
#include <ObjectLoader.h>
using namespace Eigen;
ar3d::VolumeVisualizationParams::VolumeVisualizationParams()
: mode_(Mode::Volume)
, slicePosition_(0)
, sliceAxis_(0)
, rangeMin_(0)
, rangeMax_(1)
, stepSize_(0.02)
, showGridCells_(false)
, gridCellColor_(100, 150, 150)
{
//default light, may be reconstructed later
directionalLightDir_ = ar::utils::toGLM(Vector3d(0.2f, -1.6f, -0.4f).normalized());
directionalLightColor_ = cinder::vec4(1, 1, 1, 1);
ambientLightColor_ = cinder::vec4(0.25f, 0.25f, 0.25f, 1.0f);
showNormals_ = false;
}
void ar3d::VolumeVisualizationParams::addParams(const cinder::params::InterfaceGlRef& params)
{
this->params_ = params;
std::vector<std::string> visualizationModeEnums = { "slice", "raytracing", "marching cubes", "high resolution" };
params->addParam("SdfVisualizationMode", visualizationModeEnums, reinterpret_cast<int*>(&mode_)).group("Rendering").label("SDF Rendering").updateFn([this]() {this->updateVisualizationMode(); });
std::vector<std::string> sliceAxisEnums = { "free", "X", "Y", "Z" };
params->addParam("SdfVisualizationSliceAxis", sliceAxisEnums, &sliceAxis_).group("Rendering").label("Slice Axis");
params->addParam("SdfVisualizationLightDir", &directionalLightDir_).group("Rendering").label("Light direction");
params->addParam("SdfVisualizationLightColor", &directionalLightColor_).group("Rendering").label("Light color");
params->addParam("SdfVisualizationAmbientColor", &ambientLightColor_).group("Rendering").label("Ambient color");
params->addParam("SdfVisualizationShowNormals", &showNormals_).group("Rendering").label("Show normals");
params->addParam("SdfVisualizationStepSize", &stepSize_).min(0.01f).max(1).step(0.01).group("Rendering").label("Step size");
params->addParam("SdfVisualizationGridCells", &showGridCells_).group("Rendering").label("Show grid cells");
updateVisualizationMode();
}
void ar3d::VolumeVisualizationParams::load(const cinder::JsonTree& parent)
{
mode_ = ToMode(parent.getValueForKey("Mode"));
directionalLightDir_.x = parent.getChild("LightDirection").getValueAtIndex<float>(0);
directionalLightDir_.y = parent.getChild("LightDirection").getValueAtIndex<float>(1);
directionalLightDir_.z = parent.getChild("LightDirection").getValueAtIndex<float>(2);
directionalLightColor_.r = parent.getChild("LightColor").getValueAtIndex<float>(0);
directionalLightColor_.g = parent.getChild("LightColor").getValueAtIndex<float>(1);
directionalLightColor_.b = parent.getChild("LightColor").getValueAtIndex<float>(2);
ambientLightColor_.r = parent.getChild("AmbientColor").getValueAtIndex<float>(0);
ambientLightColor_.g = parent.getChild("AmbientColor").getValueAtIndex<float>(1);
ambientLightColor_.b = parent.getChild("AmbientColor").getValueAtIndex<float>(2);
showNormals_ = parent.getValueForKey<bool>("ShowNormals");
stepSize_ = parent.getValueForKey<double>("StepSize");
if (parent.hasChild("ShowGridCells")) showGridCells_ = parent.getValueForKey<bool>("ShowGridCells");
updateVisualizationMode();
}
void ar3d::VolumeVisualizationParams::save(cinder::JsonTree& parent) const
{
parent.addChild(cinder::JsonTree("Mode", FromMode(mode_)));
parent.addChild(cinder::JsonTree::makeArray("LightDirection")
.addChild(cinder::JsonTree("", directionalLightDir_.x))
.addChild(cinder::JsonTree("", directionalLightDir_.y))
.addChild(cinder::JsonTree("", directionalLightDir_.z)));
parent.addChild(cinder::JsonTree::makeArray("LightColor")
.addChild(cinder::JsonTree("", directionalLightColor_.r))
.addChild(cinder::JsonTree("", directionalLightColor_.g))
.addChild(cinder::JsonTree("", directionalLightColor_.b)));
parent.addChild(cinder::JsonTree::makeArray("AmbientColor")
.addChild(cinder::JsonTree("", ambientLightColor_.r))
.addChild(cinder::JsonTree("", ambientLightColor_.g))
.addChild(cinder::JsonTree("", ambientLightColor_.b)));
parent.addChild(cinder::JsonTree("ShowNormals", showNormals_));
parent.addChild(cinder::JsonTree("StepSize", stepSize_));
parent.addChild(cinder::JsonTree("ShowGridCells", showGridCells_));
}
void ar3d::VolumeVisualizationParams::updateVisualizationMode()
{
if (mode_ == Mode::Slice)
{
params_->setOptions("SdfVisualizationSliceAxis", "visible=true");
params_->setOptions("SdfVisualizationLightDir", "visible=false");
params_->setOptions("SdfVisualizationLightColor", "visible=false");
params_->setOptions("SdfVisualizationAmbientColor", "visible=false");
params_->setOptions("SdfVisualizationShowNormals", "visible=false");
params_->setOptions("SdfVisualizationStepSize", "visible=false");
}
else
{
params_->setOptions("SdfVisualizationSliceAxis", "visible=false");
params_->setOptions("SdfVisualizationLightDir", "visible=true");
params_->setOptions("SdfVisualizationLightColor", "visible=true");
params_->setOptions("SdfVisualizationAmbientColor", "visible=true");
params_->setOptions("SdfVisualizationShowNormals", "visible=true");
params_->setOptions("SdfVisualizationStepSize", "visible=true");
}
}
ar3d::VolumeVisualization::VolumeVisualization(const cinder::app::WindowRef& window,
const cinder::Camera* cam, VolumeVisualizationParams* params)
: volumeValid_(false)
, cam_(cam)
, window_(window)
, cellValid_(false)
, params_(params)
{
if (window) {
//connect input
window->getSignalMouseWheel().connect(900, [this](cinder::app::MouseEvent& event) {this->mouseWheel(event); });
//load resources
reloadResources();
}
//else: running in window-less mode (only for export)
}
ar3d::VolumeVisualization::~VolumeVisualization()
{
if (sdfData_)
sdfData_->deleteTexture();
//This fails because when the destructor is called, the OpenGL-context is already destroyed.
//if (cellVertexBuffer_)
// CUMAT_SAFE_CALL(cudaGLUnregisterBufferObject(cellVertexBuffer_->getId()));
}
void ar3d::VolumeVisualization::setInput(const SoftBodyGrid3D::Input& input)
{
std::lock_guard<std::mutex> guard(inputMutex_);
input_ = input;
positions_ = input_.referencePositions_.deepClone();
advectedSdf_ = nullptr;
volumeValid_ = false;
mcBatch_ = nullptr;
highResBatch_ = nullptr;
highResMesh_ = nullptr;
cellValid_ = false;
cellBatch_ = nullptr;
}
void ar3d::VolumeVisualization::setHighResInputMesh(const std::string & file)
{
if (file.empty()) {
//no high resolution mesh
highResMesh_ = nullptr;
modifiedHighResMesh_ = nullptr;
}
else {
//load mesh
highResMesh_ = ObjectLoader::loadCustomObj(file);
highResBatch_ = nullptr;
if (highResMesh_->hasTexCoords0()) {
//use original texture coordinates
modifiedHighResMesh_ = cinder::TriMesh::create(*highResMesh_);
}
else {
//use generated texture coordinates (the original vertex position)
//to provide a mapping back to the undeformed mesh.
//If the original mesh has texture coordinates, write original positions into texCoords1,
//else, write them into tex coords 0
auto format = cinder::TriMesh::formatFromSource(*highResMesh_).normals();
if (highResMesh_->hasTexCoords0()) format = format.texCoords0(2).texCoords1(3);
else format = format.texCoords0(3);
auto mesh = cinder::TriMesh::create(*highResMesh_, format);
size_t n = mesh->getNumVertices();
if (highResMesh_->hasTexCoords0()) {
mesh->getBufferTexCoords1().clear();
for (size_t i = 0; i < n; ++i) {
mesh->appendTexCoord1(mesh->getPositions<3>()[i]);
}
}
else {
mesh->getBufferTexCoords0().clear();
for (size_t i = 0; i < n; ++i) {
mesh->appendTexCoord0(mesh->getPositions<3>()[i]);
}
}
modifiedHighResMesh_ = mesh;
}
}
}
bool ar3d::VolumeVisualization::hasHighResMesh() const
{
return modifiedHighResMesh_ != nullptr && highResMesh_!=nullptr;
}
void ar3d::VolumeVisualization::update(const SoftBodyGrid3D::State& state)
{
positions_ = input_.referencePositions_ + state.displacements_;
advectedSdf_ = state.advectedSDF_;
gridDisplacements_ = state.gridDisplacements_;
volumeValid_ = false;
mcBatch_ = nullptr;
cellValid_ = false;
highResBatch_ = nullptr;
real3 centerOfMass;
positions_.segment<1>(input_.centerOfMassIndex_).eval().copyToHost(¢erOfMass);
CI_LOG_I("center of mass: " << centerOfMass);
}
bool ar3d::VolumeVisualization::needsTransferFunction() const
{
return params_->mode_ == VolumeVisualizationParams::Mode::Slice;
}
void ar3d::VolumeVisualization::setTransferFunction(cinder::gl::Texture1dRef transferFunction,
double rangeMin, double rangeMax)
{
this->transferFunctionTexture_ = transferFunction;
params_->rangeMin_ = rangeMin;
params_->rangeMax_ = rangeMax;
}
void ar3d::VolumeVisualization::draw()
{
if (!volumeValid_)
{
//update volume / create textures
std::lock_guard<std::mutex> guard(inputMutex_);
if (input_.grid_ == nullptr) return; //no input yet
WorldGridRealDataPtr currentSdf = advectedSdf_ != nullptr ? advectedSdf_ : input_.referenceSdf_;
//invalidate volume
if (sdfData_ == nullptr || currentSdf->getGrid()->getSize() != sdfData_->getGrid()->getSize())
{
//allocate new texture
if (sdfData_ != nullptr) sdfData_->deleteTexture();
sdfData_ = std::make_shared<WorldGridData<real>>(currentSdf->getGrid());
} else
{
//just update grid
sdfData_->getGrid()->setOffset(currentSdf->getGrid()->getOffset());
}
//update data
if (currentSdf->hasHostMemory())
{
sdfData_->setHostMemory(currentSdf->getHostMemory());
sdfData_->invalidateTexture();
sdfTexture_ = sdfData_->getTexture(WorldGridData<real>::DataSource::HOST);
} else
{
//sdfData_->setDeviceMemory(currentSdf->getDeviceMemory());
//sdfData_->invalidateTexture();
//sdfTexture_ = sdfData_->getTexture(WorldGridData<float>::DataSource::DEVICE);
sdfData_->setDeviceMemory(currentSdf->getDeviceMemory());
sdfData_->copyDeviceToHost();
sdfData_->invalidateTexture();
sdfTexture_ = sdfData_->getTexture(WorldGridData<real>::DataSource::HOST);
}
volumeValid_ = true;
}
//actual drawing
if (params_->showGridCells_)
drawCells();
switch (params_->mode_)
{
case VolumeVisualizationParams::Mode::Slice: drawSlice(); break;
case VolumeVisualizationParams::Mode::Volume: drawSurface(); break;
case VolumeVisualizationParams::Mode::MCSurface: drawMarchingCubes(); break;
case VolumeVisualizationParams::Mode::HighRes: drawHighResMesh(); break;
default: throw std::exception("Unknown visualization mode");
}
}
void ar3d::VolumeVisualization::reloadResources()
{
try {
sliceShader_ = cinder::gl::GlslProg::create(
cinder::app::loadAsset("shaders/VolumeVisualizationSlice.vert"),
cinder::app::loadAsset("shaders/VolumeVisualizationSlice.frag"));
sliceBatch_ = cinder::gl::Batch::create(cinder::geom::Rect(), sliceShader_);
CI_LOG_I("slice shader (re)loaded");
surfaceShader_ = cinder::gl::GlslProg::create(
cinder::app::loadAsset("shaders/VolumeVisualizationSurface.vert"),
cinder::app::loadAsset("shaders/VolumeVisualizationSurface.frag"));
surfaceBatch_ = cinder::gl::Batch::create(cinder::geom::Rect(cinder::Rectf(-1,-1, +1,+1)), surfaceShader_);
CI_LOG_I("surface shader (re)loaded");
cinder::gl::GlslProg::Format mcShaderFormat;
mcShaderFormat.vertex(cinder::app::loadAsset("shaders/VolumeVisualizationMC.vert"));
mcShaderFormat.geometry(cinder::app::loadAsset("shaders/VolumeVisualizationMC.geom"));
mcShaderFormat.fragment(cinder::app::loadAsset("shaders/VolumeVisualizationMC.frag"));
mcShaderFormat.attrib(cinder::geom::CUSTOM_0, "in_NodeIndices");
mcShaderFormat.attrib(cinder::geom::CUSTOM_1, "in_InterpWeight");
mcShader_ = cinder::gl::GlslProg::create(mcShaderFormat);
mcBatch_ = nullptr;
CI_LOG_I("marching cubes shader (re)loaded");
} catch (const cinder::gl::GlslProgExc& ex)
{
CI_LOG_EXCEPTION("Unable to load shaders", ex);
}
}
void ar3d::VolumeVisualization::saveSdf(const std::string & filename)
{
std::ofstream o(filename, std::ofstream::binary);
sdfData_->save(o);
o.close();
}
void ar3d::VolumeVisualization::saveMCMesh(const std::string & filename)
{
updateMarchingCubes();
//old: save .obj
//ObjectLoader::saveCustomObj(mcMesh_, filename);
//new: save .ply
std::string path = filename;
std::string::size_type i = path.rfind('.', path.length());
if (i != std::string::npos) {
path.replace(i + 1, 3, "ply");
}
ObjectLoader::saveMeshPly(mcMesh_, path);
}
void ar3d::VolumeVisualization::saveHighResultMesh(const std::string & filename, bool includeNormals, bool includeOriginalPositions)
{
if (hasHighResMesh()) {
updateHighResMesh();
auto format = ci::TriMesh::formatFromSource(*modifiedHighResMesh_);
if (!includeNormals) format.mNormalsDims = 0;
if (!includeOriginalPositions && format.mTexCoords1Dims > 0) format.mTexCoords1Dims = 0;
else if (!includeOriginalPositions && format.mTexCoords1Dims == 0) format.mTexCoords0Dims = 0;
ci::TriMeshRef copy = ci::TriMesh::create(*modifiedHighResMesh_, format);
//old: save .obj
//ObjectLoader::saveCustomObj(copy, filename);
//new: save .ply
std::string path = filename;
std::string::size_type i = path.rfind('.', path.length());
if (i != std::string::npos) {
path.replace(i + 1, 3, "ply");
}
ObjectLoader::saveMeshPly(copy, path);
}
}
void ar3d::VolumeVisualization::drawSlice()
{
double maxSlicePos = (sdfData_->getGrid()->getSize().cast<double>()).norm() * 0.5;
params_->slicePosition_ = std::max(-maxSlicePos, std::min(maxSlicePos, params_->slicePosition_));
using namespace ar::utils;
Vector3d eyePos = toEigen(cam_->getEyePoint());
Vector3d gridCenter = (sdfData_->getGrid()->getOffset().cast<double>() + (Vector3i(1, 1, 1) + sdfData_->getGrid()->getSize()).cast<double>() * 0.5) * sdfData_->getGrid()->getVoxelSize();
Vector3d focusPoint;
Vector3d planeNormal;
switch (params_->sliceAxis_)
{
case 0: //free
planeNormal = toEigen(-cam_->getViewDirection()).normalized();
focusPoint = gridCenter + params_->slicePosition_ * sdfData_->getGrid()->getVoxelSize() * (gridCenter - eyePos).normalized();
break;
case 1: //X
planeNormal = Vector3d(1, 0, 0);
focusPoint = gridCenter + params_->slicePosition_ * sdfData_->getGrid()->getVoxelSize() * planeNormal;
break;
case 2: //Y
planeNormal = Vector3d(0, 1, 0);
focusPoint = gridCenter + params_->slicePosition_ * sdfData_->getGrid()->getVoxelSize() * planeNormal;
break;
case 3: //Z
planeNormal = Vector3d(0, 0, 1);
focusPoint = gridCenter + params_->slicePosition_ * sdfData_->getGrid()->getVoxelSize() * planeNormal;
break;
}
double size = sdfData_->getGrid()->getSize().cast<double>().sum() * sdfData_->getGrid()->getVoxelSize(); //estimate of the size of the plane
{
//we want to draw the slice through 'focusPoint' with normal 'planeNormal'
cinder::gl::ScopedMatrices m;
cinder::gl::ScopedDepthTest dt(true);
cinder::gl::ScopedDepthWrite dw(false);
Quaterniond rot = Quaterniond::FromTwoVectors(Vector3d(0, 0, 1), planeNormal);
cinder::gl::translate(toGLM(focusPoint));
cinder::gl::rotate(toGLM(rot));
cinder::gl::scale(float(size), float(size), float(size));
cinder::gl::ScopedTextureBind t0(transferFunctionTexture_, 0);
sliceShader_->uniform("tfTex", 0);
cinder::gl::ScopedTextureBind t1(sdfTexture_, 1);
sliceShader_->uniform("volTex", 1);
sliceShader_->uniform("tfMin", static_cast<float>(params_->rangeMin_));
sliceShader_->uniform("tfMax", static_cast<float>(params_->rangeMax_));
sliceShader_->uniform("boxMin", toGLM(((sdfData_->getGrid()->getOffset().cast<double>() + Vector3d(0.5, 0.5, 0.5)) * sdfData_->getGrid()->getVoxelSize()).eval()));
sliceShader_->uniform("boxSize", toGLM((sdfData_->getGrid()->getSize().cast<double>() * sdfData_->getGrid()->getVoxelSize()).eval()));
sliceBatch_->draw();
}
}
void ar3d::VolumeVisualization::drawSurface()
{
using namespace ar::utils;
cinder::gl::ScopedMatrices m;
cinder::gl::ScopedDepthTest dt(true);
cinder::gl::ScopedDepthWrite dw(true);
cinder::gl::ScopedTextureBind t1(sdfTexture_, 0);
surfaceShader_->uniform("volTex", 0);
surfaceShader_->uniform("boxMin", toGLM(((sdfData_->getGrid()->getOffset().cast<double>() - Vector3d(0.5, 0.5, 0.5)) * sdfData_->getGrid()->getVoxelSize()).eval()));
surfaceShader_->uniform("boxSize", toGLM((sdfData_->getGrid()->getSize().cast<double>() * sdfData_->getGrid()->getVoxelSize()).eval()));
surfaceShader_->uniform("stepSize", static_cast<float>(sdfData_->getGrid()->getVoxelSize() * params_->stepSize_));
surfaceShader_->uniform("directionalLightDir", params_->directionalLightDir_);
surfaceShader_->uniform("directionalLightColor", params_->directionalLightColor_);
surfaceShader_->uniform("ambientLightColor", params_->ambientLightColor_);
surfaceShader_->uniform("showNormals", params_->showNormals_);
surfaceBatch_->draw();
}
void ar3d::VolumeVisualization::updateMarchingCubes()
{
if (mcBatch_ == nullptr) {
std::vector<int> indexBuffer;
std::vector<MarchingCubes::Vertex> vertexBuffer;
MarchingCubes::polygonizeGrid(input_.referenceSdf_, input_.posToIndex_, indexBuffer, vertexBuffer);
mcMesh_ = cinder::TriMesh::create(
cinder::TriMesh::Format()
.positions(3)
.normals()
);
int maxIndex = 0;
for (const auto& v : vertexBuffer)
maxIndex = std::max(maxIndex, std::max(v.indexA, v.indexB));
if (maxIndex >= positions_.size())
{
CI_LOG_E("max index (" << maxIndex << ") >= positions.size() (" << positions_.size() << ")");
return;
}
std::vector<real3> positions(positions_.size());
positions_.copyToHost(&positions[0]);
for (const auto& v : vertexBuffer)
{
real3 pos = (1 - v.weight) * positions[v.indexA] + v.weight * positions[v.indexB];
mcMesh_->appendPosition(glm::vec3(pos.x, pos.y, pos.z));
}
for (size_t i = 0; i < indexBuffer.size() / 3; ++i)
mcMesh_->appendTriangle(indexBuffer[3 * i], indexBuffer[3 * i + 1], indexBuffer[3 * i + 2]);
mcMesh_->recalculateNormals();
if (window_!=nullptr)
mcBatch_ = cinder::gl::Batch::create(*mcMesh_, cinder::gl::getStockShader(cinder::gl::ShaderDef().lambert()));
}
}
void ar3d::VolumeVisualization::drawMarchingCubes()
{
#if 1
updateMarchingCubes();
ci::gl::BatchRef batch = mcBatch_;
if (batch) batch->draw();
#endif
//TODO: nothing is displayed. Why??
#if 0
if (mcBatch_ == nullptr)
{
//create new marching cubes mesh
std::vector<int> indexBuffer;
std::vector<MarchingCubes::Vertex> vertexBuffer;
MarchingCubes::polygonizeGrid(input_.referenceSdf_, input_.posToIndex_, indexBuffer, vertexBuffer);
std::vector<std::pair<cinder::geom::BufferLayout, cinder::gl::VboRef>> vertexVbos;
std::vector<int> vertexIndices(2*vertexBuffer.size());
for (size_t i=0; i<vertexBuffer.size(); ++i)
{
vertexIndices[2 * i + 0] = vertexBuffer[i].indexA;
vertexIndices[2 * i + 1] = vertexBuffer[i].indexB;
}
std::vector<float> vertexWeights(vertexBuffer.size());
for (size_t i = 0; i < vertexBuffer.size(); ++i)
vertexWeights[i] = static_cast<float>(vertexBuffer[i].weight);
cinder::geom::BufferLayout layout1, layout2;
layout1.append(cinder::geom::Attrib::CUSTOM_0, cinder::geom::INTEGER, 2, 2 * sizeof(int), 0);
layout2.append(cinder::geom::Attrib::CUSTOM_1, cinder::geom::FLOAT, 1, sizeof(float), 0);
vertexVbos.emplace_back(layout1, cinder::gl::Vbo::create(GL_ARRAY_BUFFER, vertexIndices, GL_STATIC_DRAW));
vertexVbos.emplace_back(layout2, cinder::gl::Vbo::create(GL_ARRAY_BUFFER, vertexWeights, GL_STATIC_DRAW));
cinder::gl::VboRef indexVbo = cinder::gl::Vbo::create(GL_ELEMENT_ARRAY_BUFFER, indexBuffer, GL_STATIC_DRAW);
cinder::gl::VboMeshRef mesh = cinder::gl::VboMesh::create(vertexBuffer.size(), GL_TRIANGLES, vertexVbos, indexBuffer.size(), GL_INT, indexVbo);
mcBatch_ = cinder::gl::Batch::create(mesh, mcShader_);
CI_LOG_D("Marching cubes mesh created, " << vertexBuffer.size() << " vertices, " << indexBuffer.size() << " indices");
//allocate position buffer
if (mcPositionBuffer_ != nullptr) {
CUMAT_SAFE_CALL(cudaGLUnregisterBufferObject(mcPositionBuffer_->getId()));
mcPositionBuffer_.reset();
CI_LOG_D("position buffer deleted");
}
mcPositionBuffer_ = cinder::gl::BufferObj::create(GL_SHADER_STORAGE_BUFFER, sizeof(real3) * positions_.size(), nullptr, GL_DYNAMIC_DRAW);
CUMAT_SAFE_CALL(cudaGLRegisterBufferObject(mcPositionBuffer_->getId()));
CI_LOG_D("position buffer allocated of size " << sizeof(real3) << " * " << positions_.size());
mcValid_ = false;
}
if (!stateValid_)
{
//write position buffer
void* mem;
CUMAT_SAFE_CALL(cudaDeviceSynchronize());
CUMAT_SAFE_CALL(cudaGLMapBufferObject(&mem, mcPositionBuffer_->getId()));
CUMAT_SAFE_CALL(cudaMemcpy(mem, positions_.data(), sizeof(real3) * positions_.size(), cudaMemcpyDeviceToDevice));
CUMAT_SAFE_CALL(cudaDeviceSynchronize());
CUMAT_SAFE_CALL(cudaGLUnmapBufferObject(mcPositionBuffer_->getId()));
CI_LOG_D("position buffer updated, " << positions_.size() << " entries written");
std::vector<real3> testData(positions_.size());
positions_.copyToHost(&testData[0]);
for (size_t i = 0; i < positions_.size(); ++i) cinder::app::console() << " " << testData[i].x << " " << testData[i].y << " " << testData[i].z << std::endl;
mcValid_ = true;
}
//draw
glBindBuffer(GL_SHADER_STORAGE_BUFFER, mcPositionBuffer_->getId());
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, mcPositionBuffer_->getId());
cinder::gl::ScopedFaceCulling c(false);
mcBatch_->draw();
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
#endif
}
void ar3d::VolumeVisualization::updateHighResMesh()
{
if (!hasHighResMesh()) return;
if (highResBatch_ == nullptr) {
//create deformed mesh
if (gridDisplacements_.size() > 0) {
//1. copy grid displacements to host
WorldGridData<real3> gridDisplacements(input_.grid_);
gridDisplacements.setDeviceMemory(gridDisplacements_);
gridDisplacements.copyDeviceToHost();
//2. update vertex positions
const size_t n = modifiedHighResMesh_->getNumVertices();
for (size_t i = 0; i < n; ++i) {
glm::vec3 cellPos = highResMesh_->getTexCoords1<3>()[i];
//interpolate deformation
real3 xyz; int3 ijk; long double tmp;
xyz.x = modf(cellPos.x, &tmp); ijk.x = int(tmp);
xyz.y = modf(cellPos.y, &tmp); ijk.y = int(tmp);
xyz.z = modf(cellPos.z, &tmp); ijk.z = int(tmp);
real3 corners[] = {
gridDisplacements.getHost(ijk.x + 0, ijk.y + 0, ijk.z + 0),
gridDisplacements.getHost(ijk.x + 1, ijk.y + 0, ijk.z + 0),
gridDisplacements.getHost(ijk.x + 0, ijk.y + 1, ijk.z + 0),
gridDisplacements.getHost(ijk.x + 1, ijk.y + 1, ijk.z + 0),
gridDisplacements.getHost(ijk.x + 0, ijk.y + 0, ijk.z + 1),
gridDisplacements.getHost(ijk.x + 1, ijk.y + 0, ijk.z + 1),
gridDisplacements.getHost(ijk.x + 0, ijk.y + 1, ijk.z + 1),
gridDisplacements.getHost(ijk.x + 1, ijk.y + 1, ijk.z + 1)
};
real3 displacement = ar3d::trilinear(xyz, corners);
//update position
modifiedHighResMesh_->getPositions<3>()[i] =
highResMesh_->getPositions<3>()[i] +
glm::vec3(displacement.x, displacement.y, displacement.z);
}
}
//2. update normals
modifiedHighResMesh_->recalculateNormals();
//3. create batch
if (window_) {
if (modifiedHighResMesh_->hasColors())
highResBatch_ = cinder::gl::Batch::create(*modifiedHighResMesh_, cinder::gl::getStockShader(cinder::gl::ShaderDef().lambert().color()));
else
highResBatch_ = cinder::gl::Batch::create(*modifiedHighResMesh_, cinder::gl::getStockShader(cinder::gl::ShaderDef().lambert()));
}
}
}
void ar3d::VolumeVisualization::drawHighResMesh()
{
if (!hasHighResMesh()) {
//no high resolution mesh available, fallback to marching cubes
drawMarchingCubes();
return;
}
//update mesh
updateHighResMesh();
//draw it
highResBatch_->draw();
}
void ar3d::VolumeVisualization::drawCells()
{
if (input_.numActiveCells_ == 0 || input_.cellSdfs_.size() == 0 && input_.mapping_.size() == 0) return;
if (cellBatch_ == nullptr)
{
//create index buffer
int numCells = input_.numActiveCells_;
std::vector<real8> sdfHost(input_.cellSdfs_.size()); input_.cellSdfs_.copyToHost(&sdfHost[0]);
std::vector<int4> indexHost(input_.mapping_.size()); input_.mapping_.copyToHost(&indexHost[0]);
std::vector<unsigned> indexBuffer;
static const int CubeEdges[] = {
0,1, 0,2, 1,3, 2,3, 4,5, 4,6, 5,7, 6,7, 0,4, 1,5, 2,6, 3,7
};
for (int i=0; i<numCells; ++i)
{
real8 sdf = sdfHost[i];
int c = 0;
if (sdf.first.x < 0) c |= 1;
if (sdf.first.y < 0) c |= 2;
if (sdf.first.z < 0) c |= 4;
if (sdf.first.w < 0) c |= 8;
if (sdf.second.x < 0) c |= 16;
if (sdf.second.y < 0) c |= 32;
if (sdf.second.z < 0) c |= 64;
if (sdf.second.w < 0) c |= 128;
if (c == 0 || c == 255) continue; //no boundary
//create cube
int4 index = indexHost[i];
int cellIndices[] = {
index.x, index.x + 1,
index.y, index.y + 1,
index.z, index.z + 1,
index.w, index.w + 1
};
for (int j=0; j<12; ++j)
{
indexBuffer.push_back(cellIndices[CubeEdges[2*j]]);
indexBuffer.push_back(cellIndices[CubeEdges[2*j+1]]);
}
}
//allocate vertex buffer
if (cellVertexBuffer_)
CUMAT_SAFE_CALL(cudaGLUnregisterBufferObject(cellVertexBuffer_->getId()));
cellVertexBuffer_ = cinder::gl::Vbo::create(GL_ARRAY_BUFFER, input_.numActiveNodes_ * sizeof(float3), nullptr, GL_DYNAMIC_DRAW);
CUMAT_SAFE_CALL(cudaGLRegisterBufferObject(cellVertexBuffer_->getId()));
cellValid_ = false;
//create vbo and batch
cinder::gl::VboRef indexBufferSolidSurface = cinder::gl::Vbo::create(GL_ELEMENT_ARRAY_BUFFER, indexBuffer, GL_STATIC_DRAW);
cinder::geom::BufferLayout layout;
layout.append(cinder::geom::Attrib::POSITION, cinder::geom::DataType::FLOAT, 3, sizeof(float3), 0);
cinder::gl::VboMeshRef vbo = cinder::gl::VboMesh::create(
input_.numActiveNodes_, GL_LINES, std::vector<std::pair<cinder::geom::BufferLayout, cinder::gl::VboRef>>({ std::make_pair(layout, cellVertexBuffer_) }),
static_cast<uint32_t>(indexBuffer.size()), GL_UNSIGNED_INT, indexBufferSolidSurface);
auto shader = cinder::gl::ShaderDef().color();
cellBatch_ = cinder::gl::Batch::create(vbo, cinder::gl::getStockShader(shader));
}
if (!cellValid_)
{
//update vertex buffer
cuMat::Matrix<float3, cuMat::Dynamic, 1, 1, cuMat::RowMajor> posFloat = positions_.cast<float3>();
void* dst;
CUMAT_SAFE_CALL(cudaGLMapBufferObject(&dst, cellVertexBuffer_->getId()));
CUMAT_SAFE_CALL(cudaDeviceSynchronize());
CUMAT_SAFE_CALL(cudaMemcpy(dst, posFloat.data(), sizeof(float3)*input_.numActiveNodes_, cudaMemcpyDeviceToDevice));
CUMAT_SAFE_CALL(cudaDeviceSynchronize());
CUMAT_SAFE_CALL(cudaGLUnmapBufferObject(cellVertexBuffer_->getId()));
cellValid_ = true;
}
{
//draw
cinder::gl::ScopedColor col(params_->gridCellColor_);
cinder::gl::ScopedFaceCulling c(false);
cellBatch_->draw();
}
}
void ar3d::VolumeVisualization::mouseWheel(cinder::app::MouseEvent& event)
{
if (event.isHandled()) return; //already handled
if (params_->mode_ != VolumeVisualizationParams::Mode::Slice) return; //only needed for slice rendering
if (!event.isControlDown()) return; //control must be pressed
//move slice
params_->slicePosition_ += event.getWheelIncrement() * 0.5;
event.setHandled();
}
|
d99bc8e6aed5fc8c0fb81ada55b494029f0c8638.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cstdio>
#include <hipcub/hipcub.hpp>
#include "../common.h"
#include "../constant.h"
#include "../device.h"
#include "../logging.h"
#include "../profiler.h"
#include "../timer.h"
#include "cuda_function.h"
#include "cuda_utils.h"
namespace samgraph {
namespace common {
namespace cuda {
namespace {
__global__ void sample_khop1(const IdType *indptr, const IdType *indices,
const IdType *input, const size_t num_input,
const size_t fanout, IdType *tmp_src,
IdType *tmp_dst, hiprandState_t *random_states,
size_t num_random_states) {
size_t num_task = num_input * fanout;
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
size_t task_span = blockDim.x * gridDim.x;
assert(thread_id < num_random_states);
// cache the hiprand state
hiprandState_t local_state = random_states[thread_id];
for (size_t task_idx = thread_id; task_idx < num_task;
task_idx += task_span) {
const IdType rid = input[task_idx / fanout];
const IdType off = indptr[rid];
const IdType len = indptr[rid + 1] - indptr[rid];
if (len == 0) {
tmp_src[task_idx] = Constant::kEmptyKey;
tmp_dst[task_idx] = Constant::kEmptyKey;
} else {
size_t k = hiprand(&local_state) % len;
tmp_src[task_idx] = rid;
tmp_dst[task_idx] = indices[off + k];
}
}
// restore the state
random_states[thread_id] = local_state;
}
__global__ void count_edge(IdType *src, IdType *dst, size_t *item_prefix,
size_t num_task) {
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
size_t task_span = blockDim.x * gridDim.x;
for (size_t task_idx = thread_id; task_idx < num_task;
task_idx += task_span) {
if (task_idx < (num_task - 1)) {
// when the thread is the last thread to get the value, it should be
// the one responsible for copying the edge to the output array
item_prefix[task_idx] = (src[task_idx] != src[task_idx + 1] ||
dst[task_idx] != dst[task_idx + 1]) &&
src[task_idx] != Constant::kEmptyKey;
} else {
item_prefix[task_idx] = src[task_idx] != Constant::kEmptyKey;
}
}
if (thread_id == 0) {
item_prefix[num_task] = 0;
}
}
__global__ void compact_edge(IdType *tmp_src, IdType *tmp_dst, IdType *out_src,
IdType *out_dst, size_t *item_prefix,
size_t num_task, size_t *num_out) {
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
size_t task_span = blockDim.x * gridDim.x;
for (size_t task_idx = thread_id; task_idx < num_task;
task_idx += task_span) {
bool cond;
if (task_idx < (num_task - 1)) {
cond = (tmp_src[task_idx] != tmp_src[task_idx + 1] ||
tmp_dst[task_idx] != tmp_dst[task_idx + 1]) &&
tmp_src[task_idx] != Constant::kEmptyKey;
} else {
cond = tmp_src[task_idx] != Constant::kEmptyKey;
}
if (cond) {
out_src[item_prefix[task_idx]] = tmp_src[task_idx];
out_dst[item_prefix[task_idx]] = tmp_dst[task_idx];
}
// out_src[item_prefix[task_idx]] = tmp_src[task_idx];
// out_dst[item_prefix[task_idx]] = tmp_dst[task_idx];
}
if (thread_id == 0) {
*num_out = item_prefix[num_task];
}
}
} // namespace
void GPUSampleKHop1(const IdType *indptr, const IdType *indices,
const IdType *input, const size_t num_input,
const size_t fanout, IdType *out_src, IdType *out_dst,
size_t *num_out, Context ctx, StreamHandle stream,
GPURandomStates *random_states, uint64_t task_key) {
LOG(DEBUG) << "GPUSample: begin with num_input " << num_input
<< " and fanout " << fanout;
Timer t0;
auto sampler_device = Device::Get(ctx);
auto cu_stream = static_cast<hipStream_t>(stream);
auto num_sample = num_input * fanout;
IdType *tmp_src = static_cast<IdType *>(
sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_sample));
IdType *tmp_dst = static_cast<IdType *>(
sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_sample));
LOG(DEBUG) << "GPUSample: cuda tmp_src malloc "
<< ToReadableSize(num_sample * sizeof(IdType));
LOG(DEBUG) << "GPUSample: cuda tmp_dst malloc "
<< ToReadableSize(num_sample * sizeof(IdType));
// 1. Sampling
size_t num_threads = Min(num_sample, Constant::kKHop1MaxThreads);
const dim3 grid(
RoundUpDiv(num_threads, static_cast<size_t>(Constant::kCudaBlockSize)));
const dim3 block(Constant::kCudaBlockSize);
hipLaunchKernelGGL(( sample_khop1), dim3(grid), dim3(block), 0, cu_stream,
indptr, indices, input, num_input, fanout, tmp_src, tmp_dst,
random_states->GetStates(), random_states->NumStates());
sampler_device->StreamSync(ctx, stream);
double sample_time = t0.Passed();
LOG(DEBUG) << "GPUSample: kernel sampling, time cost: " << sample_time;
// 2. Remove duplication.
// 2.1 COO pair sorting
Timer t1;
size_t temp_storage_bytes = 0;
CUDA_CALL(hipcub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, tmp_src, tmp_src, tmp_dst, tmp_dst,
num_sample, 0, sizeof(IdType) * 8, cu_stream));
sampler_device->StreamSync(ctx, stream);
void *d_temp_storage =
sampler_device->AllocWorkspace(ctx, temp_storage_bytes);
CUDA_CALL(hipcub::DeviceRadixSort::SortPairs(
d_temp_storage, temp_storage_bytes, tmp_src, tmp_src, tmp_dst, tmp_dst,
num_sample, 0, sizeof(IdType) * 8, cu_stream));
sampler_device->StreamSync(ctx, stream);
sampler_device->FreeWorkspace(ctx, d_temp_storage);
double sort_coo_time = t1.Passed();
LOG(DEBUG) << "GPUSample: sort the temporary results, time cost: "
<< sort_coo_time;
// 2.2 Count edges
Timer t2;
size_t *item_prefix = static_cast<size_t *>(
sampler_device->AllocWorkspace(ctx, sizeof(size_t) * num_sample + 1));
LOG(DEBUG) << "GPUSample: cuda prefix_num malloc "
<< ToReadableSize(sizeof(int) * num_sample);
hipLaunchKernelGGL(( count_edge), dim3(grid), dim3(block), 0, cu_stream, tmp_src, tmp_dst, item_prefix,
num_sample);
sampler_device->StreamSync(ctx, stream);
temp_storage_bytes = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes,
item_prefix, item_prefix,
num_sample + 1, cu_stream));
sampler_device->StreamSync(ctx, stream);
d_temp_storage = sampler_device->AllocWorkspace(ctx, temp_storage_bytes);
LOG(DEBUG) << "GPUSample: cuda temp_storage for ExclusiveSum malloc "
<< ToReadableSize(temp_storage_bytes);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
item_prefix, item_prefix,
num_sample + 1, cu_stream));
sampler_device->StreamSync(ctx, stream);
sampler_device->FreeWorkspace(ctx, d_temp_storage);
double count_edge_time = t2.Passed();
LOG(DEBUG) << "GPUSample: Count Edge time cost: " << count_edge_time;
// 2.3 Compact edges
Timer t3;
hipLaunchKernelGGL(( compact_edge), dim3(grid), dim3(block), 0, cu_stream,
tmp_src, tmp_dst, out_src, out_dst, item_prefix, num_sample, num_out);
sampler_device->StreamSync(ctx, stream);
double compact_edge_time = t3.Passed();
LOG(DEBUG) << "GPUSample: compact_edge time cost: " << compact_edge_time;
sampler_device->FreeWorkspace(ctx, item_prefix);
sampler_device->FreeWorkspace(ctx, tmp_src);
sampler_device->FreeWorkspace(ctx, tmp_dst);
Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCooTime, sample_time);
Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleSortCooTime,
sort_coo_time);
Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCountEdgeTime,
count_edge_time);
Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCompactEdgesTime,
compact_edge_time);
double total_time = t0.Passed();
LOG(DEBUG) << "GPUSample: succeed total time cost: " << total_time;
}
} // namespace cuda
} // namespace common
} // namespace samgraph
| d99bc8e6aed5fc8c0fb81ada55b494029f0c8638.cu | /*
* Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <curand.h>
#include <curand_kernel.h>
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cstdio>
#include <cub/cub.cuh>
#include "../common.h"
#include "../constant.h"
#include "../device.h"
#include "../logging.h"
#include "../profiler.h"
#include "../timer.h"
#include "cuda_function.h"
#include "cuda_utils.h"
namespace samgraph {
namespace common {
namespace cuda {
namespace {
__global__ void sample_khop1(const IdType *indptr, const IdType *indices,
const IdType *input, const size_t num_input,
const size_t fanout, IdType *tmp_src,
IdType *tmp_dst, curandState *random_states,
size_t num_random_states) {
size_t num_task = num_input * fanout;
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
size_t task_span = blockDim.x * gridDim.x;
assert(thread_id < num_random_states);
// cache the curand state
curandState local_state = random_states[thread_id];
for (size_t task_idx = thread_id; task_idx < num_task;
task_idx += task_span) {
const IdType rid = input[task_idx / fanout];
const IdType off = indptr[rid];
const IdType len = indptr[rid + 1] - indptr[rid];
if (len == 0) {
tmp_src[task_idx] = Constant::kEmptyKey;
tmp_dst[task_idx] = Constant::kEmptyKey;
} else {
size_t k = curand(&local_state) % len;
tmp_src[task_idx] = rid;
tmp_dst[task_idx] = indices[off + k];
}
}
// restore the state
random_states[thread_id] = local_state;
}
__global__ void count_edge(IdType *src, IdType *dst, size_t *item_prefix,
size_t num_task) {
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
size_t task_span = blockDim.x * gridDim.x;
for (size_t task_idx = thread_id; task_idx < num_task;
task_idx += task_span) {
if (task_idx < (num_task - 1)) {
// when the thread is the last thread to get the value, it should be
// the one responsible for copying the edge to the output array
item_prefix[task_idx] = (src[task_idx] != src[task_idx + 1] ||
dst[task_idx] != dst[task_idx + 1]) &&
src[task_idx] != Constant::kEmptyKey;
} else {
item_prefix[task_idx] = src[task_idx] != Constant::kEmptyKey;
}
}
if (thread_id == 0) {
item_prefix[num_task] = 0;
}
}
__global__ void compact_edge(IdType *tmp_src, IdType *tmp_dst, IdType *out_src,
IdType *out_dst, size_t *item_prefix,
size_t num_task, size_t *num_out) {
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
size_t task_span = blockDim.x * gridDim.x;
for (size_t task_idx = thread_id; task_idx < num_task;
task_idx += task_span) {
bool cond;
if (task_idx < (num_task - 1)) {
cond = (tmp_src[task_idx] != tmp_src[task_idx + 1] ||
tmp_dst[task_idx] != tmp_dst[task_idx + 1]) &&
tmp_src[task_idx] != Constant::kEmptyKey;
} else {
cond = tmp_src[task_idx] != Constant::kEmptyKey;
}
if (cond) {
out_src[item_prefix[task_idx]] = tmp_src[task_idx];
out_dst[item_prefix[task_idx]] = tmp_dst[task_idx];
}
// out_src[item_prefix[task_idx]] = tmp_src[task_idx];
// out_dst[item_prefix[task_idx]] = tmp_dst[task_idx];
}
if (thread_id == 0) {
*num_out = item_prefix[num_task];
}
}
} // namespace
void GPUSampleKHop1(const IdType *indptr, const IdType *indices,
const IdType *input, const size_t num_input,
const size_t fanout, IdType *out_src, IdType *out_dst,
size_t *num_out, Context ctx, StreamHandle stream,
GPURandomStates *random_states, uint64_t task_key) {
LOG(DEBUG) << "GPUSample: begin with num_input " << num_input
<< " and fanout " << fanout;
Timer t0;
auto sampler_device = Device::Get(ctx);
auto cu_stream = static_cast<cudaStream_t>(stream);
auto num_sample = num_input * fanout;
IdType *tmp_src = static_cast<IdType *>(
sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_sample));
IdType *tmp_dst = static_cast<IdType *>(
sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_sample));
LOG(DEBUG) << "GPUSample: cuda tmp_src malloc "
<< ToReadableSize(num_sample * sizeof(IdType));
LOG(DEBUG) << "GPUSample: cuda tmp_dst malloc "
<< ToReadableSize(num_sample * sizeof(IdType));
// 1. Sampling
size_t num_threads = Min(num_sample, Constant::kKHop1MaxThreads);
const dim3 grid(
RoundUpDiv(num_threads, static_cast<size_t>(Constant::kCudaBlockSize)));
const dim3 block(Constant::kCudaBlockSize);
sample_khop1<<<grid, block, 0, cu_stream>>>(
indptr, indices, input, num_input, fanout, tmp_src, tmp_dst,
random_states->GetStates(), random_states->NumStates());
sampler_device->StreamSync(ctx, stream);
double sample_time = t0.Passed();
LOG(DEBUG) << "GPUSample: kernel sampling, time cost: " << sample_time;
// 2. Remove duplication.
// 2.1 COO pair sorting
Timer t1;
size_t temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, tmp_src, tmp_src, tmp_dst, tmp_dst,
num_sample, 0, sizeof(IdType) * 8, cu_stream));
sampler_device->StreamSync(ctx, stream);
void *d_temp_storage =
sampler_device->AllocWorkspace(ctx, temp_storage_bytes);
CUDA_CALL(cub::DeviceRadixSort::SortPairs(
d_temp_storage, temp_storage_bytes, tmp_src, tmp_src, tmp_dst, tmp_dst,
num_sample, 0, sizeof(IdType) * 8, cu_stream));
sampler_device->StreamSync(ctx, stream);
sampler_device->FreeWorkspace(ctx, d_temp_storage);
double sort_coo_time = t1.Passed();
LOG(DEBUG) << "GPUSample: sort the temporary results, time cost: "
<< sort_coo_time;
// 2.2 Count edges
Timer t2;
size_t *item_prefix = static_cast<size_t *>(
sampler_device->AllocWorkspace(ctx, sizeof(size_t) * num_sample + 1));
LOG(DEBUG) << "GPUSample: cuda prefix_num malloc "
<< ToReadableSize(sizeof(int) * num_sample);
count_edge<<<grid, block, 0, cu_stream>>>(tmp_src, tmp_dst, item_prefix,
num_sample);
sampler_device->StreamSync(ctx, stream);
temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes,
item_prefix, item_prefix,
num_sample + 1, cu_stream));
sampler_device->StreamSync(ctx, stream);
d_temp_storage = sampler_device->AllocWorkspace(ctx, temp_storage_bytes);
LOG(DEBUG) << "GPUSample: cuda temp_storage for ExclusiveSum malloc "
<< ToReadableSize(temp_storage_bytes);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
item_prefix, item_prefix,
num_sample + 1, cu_stream));
sampler_device->StreamSync(ctx, stream);
sampler_device->FreeWorkspace(ctx, d_temp_storage);
double count_edge_time = t2.Passed();
LOG(DEBUG) << "GPUSample: Count Edge time cost: " << count_edge_time;
// 2.3 Compact edges
Timer t3;
compact_edge<<<grid, block, 0, cu_stream>>>(
tmp_src, tmp_dst, out_src, out_dst, item_prefix, num_sample, num_out);
sampler_device->StreamSync(ctx, stream);
double compact_edge_time = t3.Passed();
LOG(DEBUG) << "GPUSample: compact_edge time cost: " << compact_edge_time;
sampler_device->FreeWorkspace(ctx, item_prefix);
sampler_device->FreeWorkspace(ctx, tmp_src);
sampler_device->FreeWorkspace(ctx, tmp_dst);
Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCooTime, sample_time);
Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleSortCooTime,
sort_coo_time);
Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCountEdgeTime,
count_edge_time);
Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCompactEdgesTime,
compact_edge_time);
double total_time = t0.Passed();
LOG(DEBUG) << "GPUSample: succeed total time cost: " << total_time;
}
} // namespace cuda
} // namespace common
} // namespace samgraph
|
aa79d8b62d2808d15f1766b3276731089c9f024f.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/ForeachFunctors.cuh>
#include <ATen/native/hip/ForeachMinMaxFunctors.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/empty_like_native.h>
#endif
namespace at::native {
template <typename T, template <class> class Op>
std::vector<Tensor> foreach_binary_op(
TensorList tensors,
const Scalar& scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(tensors.size());
for (const auto& t : tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(std::move(vec_res));
using opmath_t = at::opmath_type<T>;
multi_tensor_apply<2>(
tensor_lists,
BinaryOpScalarFunctor<
T,
/* depth */ 2,
/* r_args_depth */ 1,
/* res_arg_index */ 1>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
return tensor_lists[1];
}
template <typename T, template <class> class Op>
void foreach_binary_op_(TensorList tensors, const Scalar& scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
using opmath_t = at::opmath_type<T>;
multi_tensor_apply<1>(
tensor_lists,
BinaryOpScalarFunctor<
T,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
increment_version(tensors);
}
template <template <class> class Op>
std::vector<Tensor> all_types_complex_bool_half_bfloat16(
TensorList tensors,
const Scalar& scalar) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool,
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda",
[&]() { return foreach_binary_op<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
void all_types_complex_bool_half_bfloat16_(
TensorList tensors,
const Scalar& scalar) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool,
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda_",
[&]() { foreach_binary_op_<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
std::vector<Tensor> all_types_half_bfloat16(
TensorList tensors,
const Scalar& scalar) {
return AT_DISPATCH_ALL_TYPES_AND2(
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda",
[&]() { return foreach_binary_op<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
void all_types_half_bfloat16_(TensorList tensors, const Scalar& scalar) {
AT_DISPATCH_ALL_TYPES_AND2(
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda_",
[&]() { foreach_binary_op_<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
std::vector<Tensor> all_types_complex_half_bfloat16(
TensorList tensors,
const Scalar& scalar) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda",
[&]() { return foreach_binary_op<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
void all_types_complex_half_bfloat16_(
TensorList tensors,
const Scalar& scalar) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda_",
[&]() { foreach_binary_op_<scalar_t, Op>(tensors, scalar); });
}
#define FOREACH_BINARY_OP_SCALAR(FUNCTION, NAME, OP, DIVISION_OP) \
void foreach_tensor_##NAME##_scalar_kernel_cuda_( \
TensorList tensors, const Scalar& scalar) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors, scalar, DIVISION_OP)) { \
return at::native::foreach_tensor_##NAME##_scalar_kernel_slow_( \
tensors, scalar); \
} \
\
FUNCTION##_<OP>(tensors, scalar); \
} \
\
std::vector<Tensor> foreach_tensor_##NAME##_scalar_kernel_cuda( \
TensorList tensors, const Scalar& scalar) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors, scalar, DIVISION_OP)) { \
return at::native::foreach_tensor_##NAME##_scalar_kernel_slow( \
tensors, scalar); \
} \
\
return FUNCTION<OP>(tensors, scalar); \
}
FOREACH_BINARY_OP_SCALAR(
all_types_complex_bool_half_bfloat16,
add,
std::plus,
/*div_op*/ false);
FOREACH_BINARY_OP_SCALAR(
all_types_complex_bool_half_bfloat16,
mul,
std::multiplies,
/*div_op*/ false);
// See [Why is foreach_pow's division_op=true?]
FOREACH_BINARY_OP_SCALAR(
all_types_complex_half_bfloat16,
pow,
power_functor,
/*div_op*/ true);
std::vector<Tensor> foreach_scalar_pow_list_kernel_cuda(
const Scalar& scalar,
TensorList exponent) {
check_foreach_api_restrictions(exponent);
if (!can_use_fast_route(exponent)) {
return at::native::foreach_scalar_pow_list_kernel_slow(scalar, exponent);
}
return all_types_complex_half_bfloat16<reverse_power_functor>(
exponent, scalar);
}
// In the case of division, integer inputs will result in float.
// Currently multi tensor apply can only return result of the same type as
// input.
FOREACH_BINARY_OP_SCALAR(
all_types_complex_bool_half_bfloat16,
div,
std::divides,
/*div_op*/ true);
// In the case of subtraction, we dont allow scalar to be boolean following the
// torch.sub logic
void foreach_tensor_sub_scalar_kernel_cuda_(
TensorList tensors,
const Scalar& scalar) {
check_foreach_api_restrictions(tensors);
at::native::sub_check(tensors[0], scalar);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_sub_scalar_kernel_slow_(tensors, scalar);
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool,
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda_",
[&]() { foreach_binary_op_<scalar_t, std::minus>(tensors, scalar); });
}
std::vector<Tensor> foreach_tensor_sub_scalar_kernel_cuda(
TensorList tensors,
const Scalar& scalar) {
check_foreach_api_restrictions(tensors);
at::native::sub_check(tensors[0], scalar);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_sub_scalar_kernel_slow(tensors, scalar);
}
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool,
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda",
[&]() {
return foreach_binary_op<scalar_t, std::minus>(tensors, scalar);
});
}
FOREACH_BINARY_OP_SCALAR(all_types_half_bfloat16, clamp_max, minimum, false);
FOREACH_BINARY_OP_SCALAR(all_types_half_bfloat16, clamp_min, maximum, false);
} // namespace at::native
| aa79d8b62d2808d15f1766b3276731089c9f024f.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
#include <ATen/native/cuda/ForeachMinMaxFunctors.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/empty_like_native.h>
#endif
namespace at::native {
template <typename T, template <class> class Op>
std::vector<Tensor> foreach_binary_op(
TensorList tensors,
const Scalar& scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(tensors.size());
for (const auto& t : tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(std::move(vec_res));
using opmath_t = at::opmath_type<T>;
multi_tensor_apply<2>(
tensor_lists,
BinaryOpScalarFunctor<
T,
/* depth */ 2,
/* r_args_depth */ 1,
/* res_arg_index */ 1>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
return tensor_lists[1];
}
template <typename T, template <class> class Op>
void foreach_binary_op_(TensorList tensors, const Scalar& scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
using opmath_t = at::opmath_type<T>;
multi_tensor_apply<1>(
tensor_lists,
BinaryOpScalarFunctor<
T,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
increment_version(tensors);
}
template <template <class> class Op>
std::vector<Tensor> all_types_complex_bool_half_bfloat16(
TensorList tensors,
const Scalar& scalar) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool,
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda",
[&]() { return foreach_binary_op<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
void all_types_complex_bool_half_bfloat16_(
TensorList tensors,
const Scalar& scalar) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool,
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda_",
[&]() { foreach_binary_op_<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
std::vector<Tensor> all_types_half_bfloat16(
TensorList tensors,
const Scalar& scalar) {
return AT_DISPATCH_ALL_TYPES_AND2(
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda",
[&]() { return foreach_binary_op<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
void all_types_half_bfloat16_(TensorList tensors, const Scalar& scalar) {
AT_DISPATCH_ALL_TYPES_AND2(
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda_",
[&]() { foreach_binary_op_<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
std::vector<Tensor> all_types_complex_half_bfloat16(
TensorList tensors,
const Scalar& scalar) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda",
[&]() { return foreach_binary_op<scalar_t, Op>(tensors, scalar); });
}
template <template <class> class Op>
void all_types_complex_half_bfloat16_(
TensorList tensors,
const Scalar& scalar) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda_",
[&]() { foreach_binary_op_<scalar_t, Op>(tensors, scalar); });
}
#define FOREACH_BINARY_OP_SCALAR(FUNCTION, NAME, OP, DIVISION_OP) \
void foreach_tensor_##NAME##_scalar_kernel_cuda_( \
TensorList tensors, const Scalar& scalar) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors, scalar, DIVISION_OP)) { \
return at::native::foreach_tensor_##NAME##_scalar_kernel_slow_( \
tensors, scalar); \
} \
\
FUNCTION##_<OP>(tensors, scalar); \
} \
\
std::vector<Tensor> foreach_tensor_##NAME##_scalar_kernel_cuda( \
TensorList tensors, const Scalar& scalar) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors, scalar, DIVISION_OP)) { \
return at::native::foreach_tensor_##NAME##_scalar_kernel_slow( \
tensors, scalar); \
} \
\
return FUNCTION<OP>(tensors, scalar); \
}
FOREACH_BINARY_OP_SCALAR(
all_types_complex_bool_half_bfloat16,
add,
std::plus,
/*div_op*/ false);
FOREACH_BINARY_OP_SCALAR(
all_types_complex_bool_half_bfloat16,
mul,
std::multiplies,
/*div_op*/ false);
// See [Why is foreach_pow's division_op=true?]
FOREACH_BINARY_OP_SCALAR(
all_types_complex_half_bfloat16,
pow,
power_functor,
/*div_op*/ true);
std::vector<Tensor> foreach_scalar_pow_list_kernel_cuda(
const Scalar& scalar,
TensorList exponent) {
check_foreach_api_restrictions(exponent);
if (!can_use_fast_route(exponent)) {
return at::native::foreach_scalar_pow_list_kernel_slow(scalar, exponent);
}
return all_types_complex_half_bfloat16<reverse_power_functor>(
exponent, scalar);
}
// In the case of division, integer inputs will result in float.
// Currently multi tensor apply can only return result of the same type as
// input.
FOREACH_BINARY_OP_SCALAR(
all_types_complex_bool_half_bfloat16,
div,
std::divides,
/*div_op*/ true);
// In the case of subtraction, we dont allow scalar to be boolean following the
// torch.sub logic
void foreach_tensor_sub_scalar_kernel_cuda_(
TensorList tensors,
const Scalar& scalar) {
check_foreach_api_restrictions(tensors);
at::native::sub_check(tensors[0], scalar);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_sub_scalar_kernel_slow_(tensors, scalar);
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool,
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda_",
[&]() { foreach_binary_op_<scalar_t, std::minus>(tensors, scalar); });
}
std::vector<Tensor> foreach_tensor_sub_scalar_kernel_cuda(
TensorList tensors,
const Scalar& scalar) {
check_foreach_api_restrictions(tensors);
at::native::sub_check(tensors[0], scalar);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_sub_scalar_kernel_slow(tensors, scalar);
}
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool,
kHalf,
kBFloat16,
tensors[0].scalar_type(),
"foreach_binary_op_scalar_cuda",
[&]() {
return foreach_binary_op<scalar_t, std::minus>(tensors, scalar);
});
}
FOREACH_BINARY_OP_SCALAR(all_types_half_bfloat16, clamp_max, minimum, false);
FOREACH_BINARY_OP_SCALAR(all_types_half_bfloat16, clamp_min, maximum, false);
} // namespace at::native
|
89414d2bdda67dbd8283412162fb757caa8ff296.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/sparse/SparseUtils.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <THH/THHTensorMathPointwise.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/system/hip/execution_policy.h>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
#ifndef __HIP_PLATFORM_HCC__
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data<int32_t>(), nnz, dim, csr.data<int32_t>());
return csr;
}
}
#endif
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensorRef, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(_check_device({sparse_, r_, t, dense}));
// TODO: This error message seems awfully opaque
AT_CHECK(sparse_._sparseDims() == 2, "addmm: 2D tensor expected, got ", sparse_._sparseDims(), "D tensor");
AT_CHECK(sparse_._denseDims() == 0, "addmm: scalar values expected, got ", sparse_._denseDims(), "D values");
AT_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
AT_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
AT_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.type().toScalarType(kInt));
colIndicesInt.copy_(colIndices);
// No half support, so we don't have to use CUDATypeConversion
Tensor r__;
AT_DISPATCH_FLOATING_TYPES(
values.type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!isSameTensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, beta);
}
/* r_ */
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone();
r__.transpose_(0, 1);
}
/* dense */
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data<scalar_t>(),
csr.data<int32_t>(),
colIndicesInt.data<int32_t>(),
dense_.data<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data<scalar_t>(),
r__.stride(1));
});
r_.copy_(r__);
return r_;
#else
AT_ERROR("s_addmm_out_sparse_dense_cuda: HIP not supported");
#endif
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = t.type().tensor();
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(sparse_.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, sparse_, dense}));
AT_CHECK(sparse_._sparseDims() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_._sparseDims(), "D tensor");
AT_CHECK(sparse_._denseDims() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_._denseDims(), "D values");
AT_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
_get_sparse_impl(r_)->raw_resize_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.type());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
_get_sparse_impl(newSparse)->_sizes_mut()[0] = nnz; // TODO: use something safer)
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
_get_sparse_impl(r_)->set_indices_and_values(indices, values);
return r_;
#else
AT_ERROR("hspmm_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = sparse.type().tensor();
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensorRef, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, SparseTensorRef sparse_, at::Scalar value) {
#ifndef __HIP_PLATFORM_HCC__
const SparseTensor& sparse = sparse_.tref;
AT_ASSERT(dense.is_cuda()); // dispatch argument
AT_CHECK(sparse.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({sparse, r_, dense}));
AT_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
Tensor r = r_;
if (!isSameTensor(r, dense)) {
r_.resize_as_(dense);
r_.copy_(dense);
} else {
AT_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
r = r_.contiguous();
}
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
int64_t nDim = dense.dim();
int64_t nDimI = sparse._sparseDims();
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::cuda::getCurrentCUDAStreamOnDevice(curDevice);
if (sparse._denseDims() == 0) {
AT_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
} else {
AT_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
}
} else {
LongTensor indices1D = _newFlattenedIndices(sparse, 0).squeeze_(0).narrow(0, 0, nnz);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.narrow(0, 0, nnz).reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(hipGetLastError());
return r_;
#else
AT_ERROR("add_out_dense_sparse_cuda: HIP not supported");
#endif
}
Tensor add_dense_sparse_cuda(const Tensor& t, SparseTensorRef src, Scalar alpha) {
Tensor r = t.type().tensor();
add_out_dense_sparse_cuda(r, t, src, alpha);
return r;
}
Tensor& add_dense_sparse_cuda_(Tensor& t, SparseTensorRef src, Scalar alpha) {
return add_out_dense_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, t, src}));
AT_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return raw_copy_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
AT_CHECK(_is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t._sparseDims(), " sparse dimensions while 'other' has ", src._sparseDims(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
AT_DISPATCH_ALL_TYPES_AND_HALF(
s_values_.type(), "s_add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
r_.resize_as_(src);
_alias_into_sparse(r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
return r_;
#else
AT_ERROR("s_add_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor s_add_sparse_cuda(const SparseTensor& t, const SparseTensor& src, Scalar alpha) {
SparseTensor r = t.type().tensor();
s_add_out_sparse_cuda(r, t, src, alpha);
return r;
}
SparseTensor& s_add_sparse_cuda_(SparseTensor& t, const SparseTensor& src, Scalar alpha) {
return s_add_out_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// sub(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_sub_out_sparse_cuda(SparseTensor& r, const SparseTensor& t, const SparseTensor& src, Scalar value) {
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "sub: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r.is_cuda(), "sub: expected 'out' to be CUDA, but got CPU");
AT_DISPATCH_ALL_TYPES(
t.type(), "sub_sparse", [&] {
scalar_t cast_value = value.to<scalar_t>();
s_add_out_sparse_cuda(r, t, src, -cast_value);
}
);
return r;
}
SparseTensor s_sub_sparse_cuda(const SparseTensor& t, const SparseTensor& src, Scalar alpha) {
SparseTensor r = t.type().tensor();
s_sub_out_sparse_cuda(r, t, src, alpha);
return r;
}
SparseTensor& s_sub_sparse_cuda_(SparseTensor& t, const SparseTensor& src, Scalar alpha) {
return s_sub_out_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t_.is_cuda()); // dispatch argument
AT_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, t_, src_}));
AT_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparseDims = src._sparseDims();
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
LongTensor r_indices_ = t_indices_.type().tensor({sparseDims, max_nnz});
Tensor r_values_ = _new_values_with_size_of(t_values_, max_nnz).zero_();
r_.resize_as_(src);
_get_sparse_impl(r_)->set_indices_and_values(r_indices_, r_values_); // TODO: sigh
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::cuda::getCurrentCUDAStreamOnDevice(curDevice);
AT_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND_HALF(
t_values_.type(), "s_mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(hipGetLastError());
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(hipGetLastError());
});
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
_get_sparse_impl(r_)->set_nnz(cpu_resultNnz.accessor<int64_t, 1>()[0]);
_get_sparse_impl(r_)->set_coalesced(true);
return r_;
#else
AT_ERROR("s_mul_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor s_mul_sparse_cuda(const SparseTensor& t, const SparseTensor& src) {
SparseTensor r = t.type().tensor();
s_mul_out_sparse_cuda(r, t, src);
return r;
}
SparseTensor& s_mul_sparse_cuda_(SparseTensor& t, const SparseTensor& src) {
return s_mul_out_sparse_cuda(t, t, src);
}
}} // namespace at::native
| 89414d2bdda67dbd8283412162fb757caa8ff296.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/sparse/SparseUtils.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <THC/THCTensorMathPointwise.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/system/cuda/execution_policy.h>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
#ifndef __HIP_PLATFORM_HCC__
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data<int32_t>(), nnz, dim, csr.data<int32_t>());
return csr;
}
}
#endif
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensorRef, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(_check_device({sparse_, r_, t, dense}));
// TODO: This error message seems awfully opaque
AT_CHECK(sparse_._sparseDims() == 2, "addmm: 2D tensor expected, got ", sparse_._sparseDims(), "D tensor");
AT_CHECK(sparse_._denseDims() == 0, "addmm: scalar values expected, got ", sparse_._denseDims(), "D values");
AT_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
AT_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
AT_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.type().toScalarType(kInt));
colIndicesInt.copy_(colIndices);
// No half support, so we don't have to use CUDATypeConversion
Tensor r__;
AT_DISPATCH_FLOATING_TYPES(
values.type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!isSameTensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, beta);
}
/* r_ */
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone();
r__.transpose_(0, 1);
}
/* dense */
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data<scalar_t>(),
csr.data<int32_t>(),
colIndicesInt.data<int32_t>(),
dense_.data<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data<scalar_t>(),
r__.stride(1));
});
r_.copy_(r__);
return r_;
#else
AT_ERROR("s_addmm_out_sparse_dense_cuda: HIP not supported");
#endif
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = t.type().tensor();
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(sparse_.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, sparse_, dense}));
AT_CHECK(sparse_._sparseDims() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_._sparseDims(), "D tensor");
AT_CHECK(sparse_._denseDims() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_._denseDims(), "D values");
AT_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
_get_sparse_impl(r_)->raw_resize_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.type());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
_get_sparse_impl(newSparse)->_sizes_mut()[0] = nnz; // TODO: use something safer)
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
_get_sparse_impl(r_)->set_indices_and_values(indices, values);
return r_;
#else
AT_ERROR("hspmm_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = sparse.type().tensor();
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensorRef, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, SparseTensorRef sparse_, at::Scalar value) {
#ifndef __HIP_PLATFORM_HCC__
const SparseTensor& sparse = sparse_.tref;
AT_ASSERT(dense.is_cuda()); // dispatch argument
AT_CHECK(sparse.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({sparse, r_, dense}));
AT_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
Tensor r = r_;
if (!isSameTensor(r, dense)) {
r_.resize_as_(dense);
r_.copy_(dense);
} else {
AT_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
r = r_.contiguous();
}
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
int64_t nDim = dense.dim();
int64_t nDimI = sparse._sparseDims();
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStreamOnDevice(curDevice);
if (sparse._denseDims() == 0) {
AT_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
} else {
AT_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
}
} else {
LongTensor indices1D = _newFlattenedIndices(sparse, 0).squeeze_(0).narrow(0, 0, nnz);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.narrow(0, 0, nnz).reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(cudaGetLastError());
return r_;
#else
AT_ERROR("add_out_dense_sparse_cuda: HIP not supported");
#endif
}
Tensor add_dense_sparse_cuda(const Tensor& t, SparseTensorRef src, Scalar alpha) {
Tensor r = t.type().tensor();
add_out_dense_sparse_cuda(r, t, src, alpha);
return r;
}
Tensor& add_dense_sparse_cuda_(Tensor& t, SparseTensorRef src, Scalar alpha) {
return add_out_dense_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, t, src}));
AT_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return raw_copy_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
AT_CHECK(_is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t._sparseDims(), " sparse dimensions while 'other' has ", src._sparseDims(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
AT_DISPATCH_ALL_TYPES_AND_HALF(
s_values_.type(), "s_add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
r_.resize_as_(src);
_alias_into_sparse(r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
return r_;
#else
AT_ERROR("s_add_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor s_add_sparse_cuda(const SparseTensor& t, const SparseTensor& src, Scalar alpha) {
SparseTensor r = t.type().tensor();
s_add_out_sparse_cuda(r, t, src, alpha);
return r;
}
SparseTensor& s_add_sparse_cuda_(SparseTensor& t, const SparseTensor& src, Scalar alpha) {
return s_add_out_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// sub(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_sub_out_sparse_cuda(SparseTensor& r, const SparseTensor& t, const SparseTensor& src, Scalar value) {
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "sub: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r.is_cuda(), "sub: expected 'out' to be CUDA, but got CPU");
AT_DISPATCH_ALL_TYPES(
t.type(), "sub_sparse", [&] {
scalar_t cast_value = value.to<scalar_t>();
s_add_out_sparse_cuda(r, t, src, -cast_value);
}
);
return r;
}
SparseTensor s_sub_sparse_cuda(const SparseTensor& t, const SparseTensor& src, Scalar alpha) {
SparseTensor r = t.type().tensor();
s_sub_out_sparse_cuda(r, t, src, alpha);
return r;
}
SparseTensor& s_sub_sparse_cuda_(SparseTensor& t, const SparseTensor& src, Scalar alpha) {
return s_sub_out_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t_.is_cuda()); // dispatch argument
AT_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, t_, src_}));
AT_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparseDims = src._sparseDims();
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
LongTensor r_indices_ = t_indices_.type().tensor({sparseDims, max_nnz});
Tensor r_values_ = _new_values_with_size_of(t_values_, max_nnz).zero_();
r_.resize_as_(src);
_get_sparse_impl(r_)->set_indices_and_values(r_indices_, r_values_); // TODO: sigh
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStreamOnDevice(curDevice);
AT_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND_HALF(
t_values_.type(), "s_mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(cudaGetLastError());
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(cudaGetLastError());
});
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
_get_sparse_impl(r_)->set_nnz(cpu_resultNnz.accessor<int64_t, 1>()[0]);
_get_sparse_impl(r_)->set_coalesced(true);
return r_;
#else
AT_ERROR("s_mul_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor s_mul_sparse_cuda(const SparseTensor& t, const SparseTensor& src) {
SparseTensor r = t.type().tensor();
s_mul_out_sparse_cuda(r, t, src);
return r;
}
SparseTensor& s_mul_sparse_cuda_(SparseTensor& t, const SparseTensor& src) {
return s_mul_out_sparse_cuda(t, t, src);
}
}} // namespace at::native
|
fe36f70bb9079a7282c3ede72d4c5f6453c46b25.hip | // !!! This is a file automatically generated by hipify!!!
// Vinnie Zhang
// Parallel Computing - Lab 3
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
// to perform kernel function (called from host code, executed on device) --> must be void
__global__ void getmaxcu(unsigned int* numbers_device, unsigned int* result_device, int array_size){
// numbers_device and result_device (first two params) point to device memory
// 1D grid of 1D blocks of 1D threads --> threads form blocks form grid
int i = blockIdx.x * blockDim.x + threadIdx.x;
// blockDim.x used for threads per block
if (i < array_size){
// we don't want to exceed array size
// reads the word old located in first param in global/shared memory,
// computes the max between the first and second param, and stores the result
// back to memory at the same address (first param)
// (3 operations are performed in one atomic transaction --> returns the max value, stored
// in first parameter)
atomicMax((int*)result_device, (int)numbers_device[i]);
}
}
// this is a less efficient way to retrieve max of array??
// __global__ void getmaxcu(unsigned int* numbers_device, unsigned int array_size)
// {
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// for (i = 0; i < size; ++i)
// {
// if (numbers_device[i] > numbers_device[0])
// numbers_device[0] = numbers_device[i];
// }
// }
int main(int argc, char *argv[])
{
unsigned int array_size; // size of the array
unsigned int i; // loop index
unsigned int * numbers; // pointer to the array
unsigned int * final;
final = (unsigned int*)malloc(sizeof(unsigned int)); // allocate space for host copies
final[0] = 0; // this is the index where the max will be stored in --> is this correct?
// given to us in sequential code file
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
array_size = atol(argv[1]); // converts string to a long int
numbers = (unsigned int *)malloc(array_size * sizeof(unsigned int));
if( !numbers ) {
printf("Unable to allocate mem for an array of size %u\n", array_size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for(i = 0; i < array_size; i++){
numbers[i] = rand() % array_size;
// printf("%d", numbers[i]);
// printf("\n");
}
// this is where the parallelizing comes in
// we're going to allocate and then copy over memory
unsigned int * numbers_device;
unsigned int * result_device;
// transfer m and n to device memory and allocating space for device copies
hipMalloc((void **)&numbers_device, array_size*sizeof(unsigned int)); // allocating space for device copies in global memory
hipMemcpy(numbers_device, numbers, array_size*sizeof(unsigned int), hipMemcpyHostToDevice); // copy input to device
hipMalloc((void **)&result_device, sizeof(unsigned int)); // allocating space for device copies in global memory
hipMemcpy(result_device, final, sizeof(unsigned int), hipMemcpyHostToDevice); // copy result BACK to host
// setting up input values
int block_num = 32; // (int)ceil(array_size/(double)thread_num);
int thread_num = 1024; // cims servers allow for this amount
// call from host code to device code (aka kernal launch!!)
hipLaunchKernelGGL(( getmaxcu), dim3(block_num), dim3(thread_num), 0, 0, numbers_device, result_device, array_size);
// this is where we copy the result back to host (from device)
hipMemcpy(final, result_device, sizeof(unsigned int), hipMemcpyDeviceToHost);
// cleaning and freeing up the device memory!!!
free(numbers);
hipFree(numbers_device);
hipFree(result_device);
printf("The maximum number in the array is: %u\n", final[0]); // print statement, retrieving max value in array
exit(0);
} | fe36f70bb9079a7282c3ede72d4c5f6453c46b25.cu | // Vinnie Zhang
// Parallel Computing - Lab 3
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
// to perform kernel function (called from host code, executed on device) --> must be void
__global__ void getmaxcu(unsigned int* numbers_device, unsigned int* result_device, int array_size){
// numbers_device and result_device (first two params) point to device memory
// 1D grid of 1D blocks of 1D threads --> threads form blocks form grid
int i = blockIdx.x * blockDim.x + threadIdx.x;
// blockDim.x used for threads per block
if (i < array_size){
// we don't want to exceed array size
// reads the word old located in first param in global/shared memory,
// computes the max between the first and second param, and stores the result
// back to memory at the same address (first param)
// (3 operations are performed in one atomic transaction --> returns the max value, stored
// in first parameter)
atomicMax((int*)result_device, (int)numbers_device[i]);
}
}
// this is a less efficient way to retrieve max of array??
// __global__ void getmaxcu(unsigned int* numbers_device, unsigned int array_size)
// {
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// for (i = 0; i < size; ++i)
// {
// if (numbers_device[i] > numbers_device[0])
// numbers_device[0] = numbers_device[i];
// }
// }
int main(int argc, char *argv[])
{
unsigned int array_size; // size of the array
unsigned int i; // loop index
unsigned int * numbers; // pointer to the array
unsigned int * final;
final = (unsigned int*)malloc(sizeof(unsigned int)); // allocate space for host copies
final[0] = 0; // this is the index where the max will be stored in --> is this correct?
// given to us in sequential code file
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
array_size = atol(argv[1]); // converts string to a long int
numbers = (unsigned int *)malloc(array_size * sizeof(unsigned int));
if( !numbers ) {
printf("Unable to allocate mem for an array of size %u\n", array_size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for(i = 0; i < array_size; i++){
numbers[i] = rand() % array_size;
// printf("%d", numbers[i]);
// printf("\n");
}
// this is where the parallelizing comes in
// we're going to allocate and then copy over memory
unsigned int * numbers_device;
unsigned int * result_device;
// transfer m and n to device memory and allocating space for device copies
cudaMalloc((void **)&numbers_device, array_size*sizeof(unsigned int)); // allocating space for device copies in global memory
cudaMemcpy(numbers_device, numbers, array_size*sizeof(unsigned int), cudaMemcpyHostToDevice); // copy input to device
cudaMalloc((void **)&result_device, sizeof(unsigned int)); // allocating space for device copies in global memory
cudaMemcpy(result_device, final, sizeof(unsigned int), cudaMemcpyHostToDevice); // copy result BACK to host
// setting up input values
int block_num = 32; // (int)ceil(array_size/(double)thread_num);
int thread_num = 1024; // cims servers allow for this amount
// call from host code to device code (aka kernal launch!!)
getmaxcu<<<block_num, thread_num>>>(numbers_device, result_device, array_size);
// this is where we copy the result back to host (from device)
cudaMemcpy(final, result_device, sizeof(unsigned int), cudaMemcpyDeviceToHost);
// cleaning and freeing up the device memory!!!
free(numbers);
cudaFree(numbers_device);
cudaFree(result_device);
printf("The maximum number in the array is: %u\n", final[0]); // print statement, retrieving max value in array
exit(0);
} |
78385ac227b780870a8fa853710ca1c641425ad0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <openacc.h>
#define IPMACC_MAX1(A) (A)
#define IPMACC_MAX2(A,B) (A>B?A:B)
#define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B))
#ifdef __cplusplus
#include "openacc_container.h"
#endif
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include "../../common/polybenchUtilFuncts.h"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 1
#define N 4096
typedef float DATA_TYPE;
void init_array(DATA_TYPE* A, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2, DATA_TYPE* x1_gpu, DATA_TYPE* x2_gpu)
{
int i, j;
for (i = 0; i < N; i++) {
x1 [i] = ((DATA_TYPE)i) / N;
x2 [i] = ((DATA_TYPE)i + 1) / N;
x1_gpu [i] = x1 [i];
x2_gpu [i] = x2 [i];
y1 [i] = ((DATA_TYPE)i + 3) / N;
y2 [i] = ((DATA_TYPE)i + 4) / N;
for (j = 0; j < N; j++) {
A [i * N + j] = ((DATA_TYPE)i * j) / N;
}
}
}
void CPU__runMvt(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x1 [i] = x1 [i] + a [i * N + j] * y1 [j];
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x2 [i] = x2 [i] + a [j * N + i] * y2 [j];
}
}
}
__global__ void __generated_kernel_region_0(DATA_TYPE * a,DATA_TYPE * y1,DATA_TYPE * x1);
__global__ void __generated_kernel_region_1(DATA_TYPE * a,DATA_TYPE * x2,DATA_TYPE * y2);
void GPU__runMvt(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i;
ipmacc_prompt((char*)"IPMACC: memory allocation a\n");
acc_present_or_create((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation x1\n");
acc_present_or_create((void*)x1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation y1\n");
acc_present_or_create((void*)y1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin a\n");
acc_pcopyin((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin x1\n");
acc_pcopyin((void*)x1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin y1\n");
acc_pcopyin((void*)y1,(4095+0)*sizeof(DATA_TYPE ));
{
/* kernel call statement [0, -1]*/
{
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL((
__generated_kernel_region_0), dim3((((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0,
(DATA_TYPE *)acc_deviceptr((void*)a),
(DATA_TYPE *)acc_deviceptr((void*)y1),
(DATA_TYPE *)acc_deviceptr((void*)x1));
}
/* kernel call statement*/
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n");
{
hipError_t err=hipDeviceSynchronize();
if(err!=hipSuccess){
printf("Kernel Launch Error! error code (%d)\n",err);
assert(0&&"Launch Failure!\n");}
}
}
ipmacc_prompt((char*)"IPMACC: memory copyout a\n");
acc_copyout_and_keep((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout x1\n");
acc_copyout_and_keep((void*)x1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout y1\n");
acc_copyout_and_keep((void*)y1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation a\n");
acc_present_or_create((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation x2\n");
acc_present_or_create((void*)x2,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation y2\n");
acc_present_or_create((void*)y2,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin a\n");
acc_pcopyin((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin x2\n");
acc_pcopyin((void*)x2,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin y2\n");
acc_pcopyin((void*)y2,(4095+0)*sizeof(DATA_TYPE ));
{
/* kernel call statement [1, -1]*/
{
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL((
__generated_kernel_region_1), dim3((((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0,
(DATA_TYPE *)acc_deviceptr((void*)a),
(DATA_TYPE *)acc_deviceptr((void*)x2),
(DATA_TYPE *)acc_deviceptr((void*)y2));
}
/* kernel call statement*/
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n");
{
hipError_t err=hipDeviceSynchronize();
if(err!=hipSuccess){
printf("Kernel Launch Error! error code (%d)\n",err);
assert(0&&"Launch Failure!\n");}
}
}
ipmacc_prompt((char*)"IPMACC: memory copyout a\n");
acc_copyout_and_keep((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout x2\n");
acc_copyout_and_keep((void*)x2,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout y2\n");
acc_copyout_and_keep((void*)y2,(4095+0)*sizeof(DATA_TYPE ));
}
void compareResults(DATA_TYPE* x1, DATA_TYPE* x1_outputFromGpu, DATA_TYPE* x2, DATA_TYPE* x2_outputFromGpu)
{
int i, fail;
fail = 0;
for (i = 0; i < N; i++) {
if (percentDiff(x1 [i], x1_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
if (percentDiff(x2 [i], x2_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
int main()
{
double t_start, t_end;
DATA_TYPE* a;
DATA_TYPE* x1;
DATA_TYPE* x2;
DATA_TYPE* x1_outputFromGpu;
DATA_TYPE* x2_outputFromGpu;
DATA_TYPE* y_1;
DATA_TYPE* y_2;
a = (DATA_TYPE*)malloc(N * N * sizeof(DATA_TYPE));
x1 = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
x2 = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
x1_outputFromGpu = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
x2_outputFromGpu = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
y_1 = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
y_2 = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
fprintf(stdout, "<< Matrix Vector Product and Transpose >>\n");
init_array(a, x1, x2, y_1, y_2, x1_outputFromGpu, x2_outputFromGpu);
t_start = rtclock();
GPU__runMvt(a, x1_outputFromGpu, x2_outputFromGpu, y_1, y_2);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
CPU__runMvt(a, x1, x2, y_1, y_2);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(x1, x1_outputFromGpu, x2, x2_outputFromGpu);
free(a);
free(x1);
free(x2);
free(x1_outputFromGpu);
free(x2_outputFromGpu);
free(y_1);
free(y_2);
return 0;
}
__global__ void __generated_kernel_region_0(DATA_TYPE * a,DATA_TYPE * y1,DATA_TYPE * x1){
int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x;
int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y;
int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z;
int i;
{
{
{
i=0+(__kernel_getuid_x);
if( i < N)
{
int j;
for(j = 0; j < N; j++)
{
x1 [i] = x1 [i] + a [i * N + j] * y1 [j];
}
}
}
}
}
//append writeback of scalar variables
}
__global__ void __generated_kernel_region_1(DATA_TYPE * a,DATA_TYPE * x2,DATA_TYPE * y2){
int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x;
int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y;
int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z;
int i;
{
{
{
i=0+(__kernel_getuid_x);
if( i < N)
{
int j;
for(j = 0; j < N; j++)
{
x2 [i] = x2 [i] + a [j * N + i] * y2 [j];
}
}
}
}
}
//append writeback of scalar variables
}
| 78385ac227b780870a8fa853710ca1c641425ad0.cu | #include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <openacc.h>
#define IPMACC_MAX1(A) (A)
#define IPMACC_MAX2(A,B) (A>B?A:B)
#define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B))
#ifdef __cplusplus
#include "openacc_container.h"
#endif
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include "../../common/polybenchUtilFuncts.h"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 1
#define N 4096
typedef float DATA_TYPE;
void init_array(DATA_TYPE* A, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2, DATA_TYPE* x1_gpu, DATA_TYPE* x2_gpu)
{
int i, j;
for (i = 0; i < N; i++) {
x1 [i] = ((DATA_TYPE)i) / N;
x2 [i] = ((DATA_TYPE)i + 1) / N;
x1_gpu [i] = x1 [i];
x2_gpu [i] = x2 [i];
y1 [i] = ((DATA_TYPE)i + 3) / N;
y2 [i] = ((DATA_TYPE)i + 4) / N;
for (j = 0; j < N; j++) {
A [i * N + j] = ((DATA_TYPE)i * j) / N;
}
}
}
void CPU__runMvt(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x1 [i] = x1 [i] + a [i * N + j] * y1 [j];
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x2 [i] = x2 [i] + a [j * N + i] * y2 [j];
}
}
}
__global__ void __generated_kernel_region_0(DATA_TYPE * a,DATA_TYPE * y1,DATA_TYPE * x1);
__global__ void __generated_kernel_region_1(DATA_TYPE * a,DATA_TYPE * x2,DATA_TYPE * y2);
void GPU__runMvt(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i;
ipmacc_prompt((char*)"IPMACC: memory allocation a\n");
acc_present_or_create((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation x1\n");
acc_present_or_create((void*)x1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation y1\n");
acc_present_or_create((void*)y1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin a\n");
acc_pcopyin((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin x1\n");
acc_pcopyin((void*)x1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin y1\n");
acc_pcopyin((void*)y1,(4095+0)*sizeof(DATA_TYPE ));
{
/* kernel call statement [0, -1]*/
{
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1),256);
__generated_kernel_region_0<<<(((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1),256>>>(
(DATA_TYPE *)acc_deviceptr((void*)a),
(DATA_TYPE *)acc_deviceptr((void*)y1),
(DATA_TYPE *)acc_deviceptr((void*)x1));
}
/* kernel call statement*/
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n");
{
cudaError err=cudaDeviceSynchronize();
if(err!=cudaSuccess){
printf("Kernel Launch Error! error code (%d)\n",err);
assert(0&&"Launch Failure!\n");}
}
}
ipmacc_prompt((char*)"IPMACC: memory copyout a\n");
acc_copyout_and_keep((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout x1\n");
acc_copyout_and_keep((void*)x1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout y1\n");
acc_copyout_and_keep((void*)y1,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation a\n");
acc_present_or_create((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation x2\n");
acc_present_or_create((void*)x2,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation y2\n");
acc_present_or_create((void*)y2,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin a\n");
acc_pcopyin((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin x2\n");
acc_pcopyin((void*)x2,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin y2\n");
acc_pcopyin((void*)y2,(4095+0)*sizeof(DATA_TYPE ));
{
/* kernel call statement [1, -1]*/
{
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1),256);
__generated_kernel_region_1<<<(((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1),256>>>(
(DATA_TYPE *)acc_deviceptr((void*)a),
(DATA_TYPE *)acc_deviceptr((void*)x2),
(DATA_TYPE *)acc_deviceptr((void*)y2));
}
/* kernel call statement*/
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n");
{
cudaError err=cudaDeviceSynchronize();
if(err!=cudaSuccess){
printf("Kernel Launch Error! error code (%d)\n",err);
assert(0&&"Launch Failure!\n");}
}
}
ipmacc_prompt((char*)"IPMACC: memory copyout a\n");
acc_copyout_and_keep((void*)a,(16777215+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout x2\n");
acc_copyout_and_keep((void*)x2,(4095+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout y2\n");
acc_copyout_and_keep((void*)y2,(4095+0)*sizeof(DATA_TYPE ));
}
void compareResults(DATA_TYPE* x1, DATA_TYPE* x1_outputFromGpu, DATA_TYPE* x2, DATA_TYPE* x2_outputFromGpu)
{
int i, fail;
fail = 0;
for (i = 0; i < N; i++) {
if (percentDiff(x1 [i], x1_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
if (percentDiff(x2 [i], x2_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
int main()
{
double t_start, t_end;
DATA_TYPE* a;
DATA_TYPE* x1;
DATA_TYPE* x2;
DATA_TYPE* x1_outputFromGpu;
DATA_TYPE* x2_outputFromGpu;
DATA_TYPE* y_1;
DATA_TYPE* y_2;
a = (DATA_TYPE*)malloc(N * N * sizeof(DATA_TYPE));
x1 = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
x2 = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
x1_outputFromGpu = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
x2_outputFromGpu = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
y_1 = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
y_2 = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
fprintf(stdout, "<< Matrix Vector Product and Transpose >>\n");
init_array(a, x1, x2, y_1, y_2, x1_outputFromGpu, x2_outputFromGpu);
t_start = rtclock();
GPU__runMvt(a, x1_outputFromGpu, x2_outputFromGpu, y_1, y_2);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
CPU__runMvt(a, x1, x2, y_1, y_2);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(x1, x1_outputFromGpu, x2, x2_outputFromGpu);
free(a);
free(x1);
free(x2);
free(x1_outputFromGpu);
free(x2_outputFromGpu);
free(y_1);
free(y_2);
return 0;
}
__global__ void __generated_kernel_region_0(DATA_TYPE * a,DATA_TYPE * y1,DATA_TYPE * x1){
int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x;
int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y;
int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z;
int i;
{
{
{
i=0+(__kernel_getuid_x);
if( i < N)
{
int j;
for(j = 0; j < N; j++)
{
x1 [i] = x1 [i] + a [i * N + j] * y1 [j];
}
}
}
}
}
//append writeback of scalar variables
}
__global__ void __generated_kernel_region_1(DATA_TYPE * a,DATA_TYPE * x2,DATA_TYPE * y2){
int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x;
int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y;
int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z;
int i;
{
{
{
i=0+(__kernel_getuid_x);
if( i < N)
{
int j;
for(j = 0; j < N; j++)
{
x2 [i] = x2 [i] + a [j * N + i] * y2 [j];
}
}
}
}
}
//append writeback of scalar variables
}
|
fda8c34aa45241571251b54570b6458f4419a2f4.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#include "ATen/hip/HIPContext.h"
#include "open3d/ml/impl/misc/InvertNeighborsList.cuh"
#include "open3d/ml/pytorch/TorchHelper.h"
#include "open3d/ml/pytorch/misc/InvertNeighborsListOpKernel.h"
#include "torch/script.h"
template <class TIndex, class TAttr>
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> InvertNeighborsListCUDA(
int64_t num_points,
const torch::Tensor& inp_neighbors_index,
const torch::Tensor& inp_neighbors_row_splits,
const torch::Tensor& inp_neighbors_attributes) {
auto device = inp_neighbors_index.device();
torch::Tensor neighbors_index =
torch::empty(inp_neighbors_index.sizes(),
torch::dtype(ToTorchDtype<TIndex>()).device(device));
torch::Tensor neighbors_row_splits = torch::empty(
{num_points + 1}, torch::dtype(torch::kInt64).device(device));
torch::Tensor neighbors_attributes =
torch::empty_like(inp_neighbors_attributes);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto cuda_device_props = at::cuda::getCurrentDeviceProperties();
const int texture_alignment = cuda_device_props->textureAlignment;
int num_attributes;
if (inp_neighbors_attributes.size(0) == 0) {
num_attributes = 0;
} else {
num_attributes = 1;
for (int i = 1; i < inp_neighbors_attributes.dim(); ++i)
num_attributes *= inp_neighbors_attributes.size(i);
}
void* temp_ptr = nullptr;
size_t temp_size = 0;
// determine temp_size
open3d::ml::impl::InvertNeighborsListCUDA(
stream, temp_ptr, temp_size, texture_alignment,
inp_neighbors_index.data_ptr<TIndex>(),
num_attributes ? inp_neighbors_attributes.data_ptr<TAttr>()
: nullptr,
num_attributes,
(int64_t*)inp_neighbors_row_splits.data_ptr<int64_t>(),
inp_neighbors_row_splits.size(0) - 1,
neighbors_index.data_ptr<TIndex>(),
num_attributes ? neighbors_attributes.data_ptr<TAttr>() : nullptr,
neighbors_index.size(0),
(int64_t*)neighbors_row_splits.data_ptr<int64_t>(),
neighbors_row_splits.size(0) - 1);
auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr);
// actually invert the list
open3d::ml::impl::InvertNeighborsListCUDA(
stream, temp_ptr, temp_size, texture_alignment,
inp_neighbors_index.data_ptr<TIndex>(),
num_attributes ? inp_neighbors_attributes.data_ptr<TAttr>()
: nullptr,
num_attributes,
(int64_t*)inp_neighbors_row_splits.data_ptr<int64_t>(),
inp_neighbors_row_splits.size(0) - 1,
neighbors_index.data_ptr<TIndex>(),
num_attributes ? neighbors_attributes.data_ptr<TAttr>() : nullptr,
neighbors_index.size(0),
(int64_t*)neighbors_row_splits.data_ptr<int64_t>(),
neighbors_row_splits.size(0) - 1);
return std::make_tuple(neighbors_index, neighbors_row_splits,
neighbors_attributes);
}
#define INSTANTIATE(TIndex, TAttr) \
template std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> \
InvertNeighborsListCUDA<TIndex, TAttr>(int64_t, const torch::Tensor&, \
const torch::Tensor&, \
const torch::Tensor&);
INSTANTIATE(int32_t, int32_t)
INSTANTIATE(int32_t, int64_t)
INSTANTIATE(int32_t, float)
INSTANTIATE(int32_t, double)
| fda8c34aa45241571251b54570b6458f4419a2f4.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#include "ATen/cuda/CUDAContext.h"
#include "open3d/ml/impl/misc/InvertNeighborsList.cuh"
#include "open3d/ml/pytorch/TorchHelper.h"
#include "open3d/ml/pytorch/misc/InvertNeighborsListOpKernel.h"
#include "torch/script.h"
template <class TIndex, class TAttr>
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> InvertNeighborsListCUDA(
int64_t num_points,
const torch::Tensor& inp_neighbors_index,
const torch::Tensor& inp_neighbors_row_splits,
const torch::Tensor& inp_neighbors_attributes) {
auto device = inp_neighbors_index.device();
torch::Tensor neighbors_index =
torch::empty(inp_neighbors_index.sizes(),
torch::dtype(ToTorchDtype<TIndex>()).device(device));
torch::Tensor neighbors_row_splits = torch::empty(
{num_points + 1}, torch::dtype(torch::kInt64).device(device));
torch::Tensor neighbors_attributes =
torch::empty_like(inp_neighbors_attributes);
auto stream = at::cuda::getCurrentCUDAStream();
auto cuda_device_props = at::cuda::getCurrentDeviceProperties();
const int texture_alignment = cuda_device_props->textureAlignment;
int num_attributes;
if (inp_neighbors_attributes.size(0) == 0) {
num_attributes = 0;
} else {
num_attributes = 1;
for (int i = 1; i < inp_neighbors_attributes.dim(); ++i)
num_attributes *= inp_neighbors_attributes.size(i);
}
void* temp_ptr = nullptr;
size_t temp_size = 0;
// determine temp_size
open3d::ml::impl::InvertNeighborsListCUDA(
stream, temp_ptr, temp_size, texture_alignment,
inp_neighbors_index.data_ptr<TIndex>(),
num_attributes ? inp_neighbors_attributes.data_ptr<TAttr>()
: nullptr,
num_attributes,
(int64_t*)inp_neighbors_row_splits.data_ptr<int64_t>(),
inp_neighbors_row_splits.size(0) - 1,
neighbors_index.data_ptr<TIndex>(),
num_attributes ? neighbors_attributes.data_ptr<TAttr>() : nullptr,
neighbors_index.size(0),
(int64_t*)neighbors_row_splits.data_ptr<int64_t>(),
neighbors_row_splits.size(0) - 1);
auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr);
// actually invert the list
open3d::ml::impl::InvertNeighborsListCUDA(
stream, temp_ptr, temp_size, texture_alignment,
inp_neighbors_index.data_ptr<TIndex>(),
num_attributes ? inp_neighbors_attributes.data_ptr<TAttr>()
: nullptr,
num_attributes,
(int64_t*)inp_neighbors_row_splits.data_ptr<int64_t>(),
inp_neighbors_row_splits.size(0) - 1,
neighbors_index.data_ptr<TIndex>(),
num_attributes ? neighbors_attributes.data_ptr<TAttr>() : nullptr,
neighbors_index.size(0),
(int64_t*)neighbors_row_splits.data_ptr<int64_t>(),
neighbors_row_splits.size(0) - 1);
return std::make_tuple(neighbors_index, neighbors_row_splits,
neighbors_attributes);
}
#define INSTANTIATE(TIndex, TAttr) \
template std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> \
InvertNeighborsListCUDA<TIndex, TAttr>(int64_t, const torch::Tensor&, \
const torch::Tensor&, \
const torch::Tensor&);
INSTANTIATE(int32_t, int32_t)
INSTANTIATE(int32_t, int64_t)
INSTANTIATE(int32_t, float)
INSTANTIATE(int32_t, double)
|
06f1b6b0e44feef925ae4ea098bad68f39b43110.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 H2O.ai, Inc.
* License Apache License Version 2.0 (see LICENSE for details)
*/
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <iostream>
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <unistd.h>
#include "h2o4gpukmeans.h"
#include "kmeans_impl.h"
#include "kmeans_general.h"
#include <random>
#include <algorithm>
#include <vector>
#include <csignal>
#define CUDACHECK(cmd) do { \
hipError_t e = cmd; \
if( e != hipSuccess ) { \
printf("Cuda failure %s:%d '%s'\n", \
__FILE__,__LINE__,hipGetErrorString(e));\
exit(EXIT_FAILURE); \
} \
} while(0)
/**
* METHODS FOR DATA COPYING AND GENERATION
*/
template<typename T>
void random_data(int verbose, thrust::device_vector<T> &array, int m, int n) {
thrust::host_vector<T> host_array(m * n);
for (int i = 0; i < m * n; i++) {
host_array[i] = (T) rand() / (T) RAND_MAX;
}
array = host_array;
}
/**
* Copies data from srcdata to array
* @tparam T
* @param verbose Logging level
* @param ord Column on row order of data
* @param array Destination array
* @param srcdata Source data
* @param q Shard number (from 0 to n_gpu)
* @param n
* @param npergpu
* @param d
*/
template<typename T>
void copy_data(int verbose, const char ord, thrust::device_vector<T> &array, const T *srcdata,
int q, int n, int npergpu, int d) {
thrust::host_vector<T> host_array(npergpu * d);
if (ord == 'c') {
log_debug(verbose, "Copy data COL ORDER -> ROW ORDER");
int indexi, indexj;
for (int i = 0; i < npergpu * d; i++) {
indexi = i % d; // col
indexj = i / d + q * npergpu; // row (shifted by which gpu)
host_array[i] = srcdata[indexi * n + indexj];
}
} else {
log_debug(verbose, "Copy data ROW ORDER not changed");
for (int i = 0; i < npergpu * d; i++) {
host_array[i] = srcdata[q * npergpu * d + i]; // shift by which gpu
}
}
array = host_array;
}
/**
* Like copy_data but shuffles the data according to mapping from v
* @tparam T
* @param verbose
* @param v
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
*/
template<typename T>
void copy_data_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array,
const T *srcdata, int q, int n, int npergpu, int d) {
thrust::host_vector<T> host_array(npergpu * d);
if (ord == 'c') {
log_debug(verbose, "Copy data shuffle COL ORDER -> ROW ORDER");
for (int i = 0; i < npergpu; i++) {
for (int j = 0; j < d; j++) {
host_array[i * d + j] = srcdata[v[q * npergpu + i] + j * n]; // shift by which gpu
}
}
} else {
log_debug(verbose, "Copy data shuffle ROW ORDER not changed");
for (int i = 0; i < npergpu; i++) {
for (int j = 0; j < d; j++) {
host_array[i * d + j] = srcdata[v[q * npergpu + i] * d + j]; // shift by which gpu
}
}
}
array = host_array;
}
template<typename T>
void copy_centroids_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array,
const T *srcdata, int n, int k, int d) {
copy_data_shuffled(verbose, v, ord, array, srcdata, 0, n, k, d);
}
/**
* Copies centroids from initial training set randomly.
* @tparam T
* @param verbose
* @param seed
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
* @param k
*/
template<typename T>
void random_centroids(int verbose, int seed, const char ord,
thrust::device_vector<T> &array, const T *srcdata,
int q, int n, int npergpu, int d, int k) {
thrust::host_vector<T> host_array(k * d);
if (seed < 0) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
seed = rd();
}
std::mt19937 gen(seed);
std::uniform_int_distribution<> dis(0, n - 1); // random i in range from 0..n-1 (i.e. only 1 gpu gets centroids)
if (ord == 'c') {
log_debug(verbose, "Random centroids COL ORDER -> ROW ORDER");
for (int i = 0; i < k; i++) { // rows
int reali = dis(gen); // + q*npergpu; // row sampled (called indexj above)
for (int j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali + j * n];
}
}
} else {
log_debug(verbose, "Random centroids ROW ORDER not changed");
for (int i = 0; i < k; i++) { // rows
int reali = dis(gen); // + q*npergpu ; // row sampled
for (int j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali * d + j];
}
}
}
array = host_array;
}
/**
* KMEANS METHODS FIT, PREDICT, TRANSFORM
*/
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpukmeans {
volatile std::atomic_int flaggpu(0);
inline void my_function_gpu(int sig) { // can be called asynchronously
fprintf(stderr, "Caught signal %d. Terminating shortly.\n", sig);
flaggpu = 1;
}
std::vector<int> kmeans_init(int verbose, int *final_n_gpu, int n_gputry, int gpu_idtry, int rows) {
if (rows > std::numeric_limits<int>::max()) {
fprintf(stderr, "rows > %d not implemented\n", std::numeric_limits<int>::max());
fflush(stderr);
exit(0);
}
std::signal(SIGINT, my_function_gpu);
std::signal(SIGTERM, my_function_gpu);
// no more gpus than visible gpus
int n_gpuvis;
hipGetDeviceCount(&n_gpuvis);
int n_gpu = ::min(n_gpuvis, n_gputry);
// no more than rows
n_gpu = ::min(n_gpu, rows);
if (verbose) {
std::cout << n_gpu << " gpus." << std::endl;
}
int gpu_id = gpu_idtry % n_gpuvis;
// setup GPU list to use
std::vector<int> dList(n_gpu);
for (int idx = 0; idx < n_gpu; idx++) {
int device_idx = (gpu_id + idx) % n_gpuvis;
dList[idx] = device_idx;
}
*final_n_gpu = n_gpu;
return dList;
}
template<typename T>
H2O4GPUKMeans<T>::H2O4GPUKMeans(const T *A, int k, int n, int d) {
_A = A;
_k = k;
_n = n;
_d = d;
}
template<typename T>
int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord,
int k, int max_iterations, int init_from_data,
int init_data, T threshold,
const T *srcdata, void **pred_centroids, void **pred_labels) {
log_debug(verbose, "KMeans - Start fitting");
// init random seed if use the C function rand()
if (seed >= 0) {
srand(seed);
} else {
srand(unsigned(time(NULL)));
}
// no more clusters than rows
if (k > rows) {
k = static_cast<int>(rows);
fprintf(stderr, "Number of clusters adjusted to be equal to number of rows.\n");
fflush(stderr);
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
double t0t = timer<double>();
thrust::device_vector<T> *data[n_gpu];
thrust::device_vector<int> *labels[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *distances[n_gpu];
log_debug(verbose, "KMeans - Before allocation");
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
labels[q] = new thrust::device_vector<int>(rows / n_gpu);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
distances[q] = new thrust::device_vector<T>(rows / n_gpu);
}
if (verbose >= H2O4GPU_LOG_INFO) {
std::cout << "Number of points: " << rows << std::endl;
std::cout << "Number of dimensions: " << cols << std::endl;
std::cout << "Number of clusters: " << k << std::endl;
std::cout << "Max. number of iterations: " << max_iterations << std::endl;
std::cout << "Stopping threshold: " << threshold << std::endl;
}
std::vector<int> v(rows);
std::iota(std::begin(v), std::end(v), 0); // Fill with 0, 1, ..., rows.
if (seed >= 0) {
std::shuffle(v.begin(), v.end(), std::default_random_engine(seed));
} else {
std::random_shuffle(v.begin(), v.end());
}
// Copy the data to devices
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
if (verbose) { std::cout << "Copying data to device: " << dList[q] << std::endl; }
if (init_data == 0) { // random (for testing)
random_data<T>(verbose, *data[q], rows / n_gpu, cols);
} else if (init_data == 1) { // shard by row
copy_data(verbose, ord, *data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
} else { // shard by randomly (without replacement) selected by row
copy_data_shuffled(verbose, v, ord, *data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
}
}
// Get random points as centroids
int masterq = 0;
CUDACHECK(hipSetDevice(dList[masterq]));
copy_centroids_shuffled(verbose, v, ord, *d_centroids[masterq], &srcdata[0], rows, k, cols);
int bytecount = cols * k * sizeof(T); // all centroids
// Copy centroids to all devices
std::vector < hipStream_t * > streams;
streams.resize(n_gpu);
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
CUDACHECK(hipSetDevice(dList[q]));
std::cout << "Copying centroid data to device: " << dList[q] << std::endl;
streams[q] = reinterpret_cast<hipStream_t *>(malloc(sizeof(hipStream_t)));
hipStreamCreate(streams[q]);
hipMemcpyPeerAsync(thrust::raw_pointer_cast(&(*d_centroids[q])[0]),
dList[q],
thrust::raw_pointer_cast(&(*d_centroids[masterq])[0]),
dList[masterq],
bytecount,
*(streams[q]));
}
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
hipSetDevice(dList[q]);
hipStreamDestroy(*(streams[q]));
#if(DEBUGKMEANS)
thrust::host_vector<T> h_centroidq=*d_centroids[q];
for(int ii=0;ii<k*d;ii++){
fprintf(stderr,"q=%d initcent[%d]=%g\n",q,ii,h_centroidq[ii]); fflush(stderr);
}
#endif
}
double timetransfer = static_cast<double>(timer<double>() - t0t);
log_debug(verbose, "KMeans - Before kmeans() call");
double t0 = timer<double>();
int status = kmeans::kmeans<T>(verbose, &flaggpu, rows, cols, k, data, labels, d_centroids, distances, dList, n_gpu,
max_iterations, threshold, true);
if (status) {
fprintf(stderr, "KMeans status was %d\n", status);
fflush(stderr);
return (status);
}
double timefit = static_cast<double>(timer<double>() - t0);
if (verbose) {
std::cout << " Time fit: " << timefit << " s" << std::endl;
fprintf(stderr, "Timetransfer: %g Timefit: %g\n", timetransfer, timefit);
fflush(stderr);
}
// copy result of centroids (sitting entirely on each device) back to host
thrust::host_vector<T> *ctr = new thrust::host_vector<T>(*d_centroids[0]);
// TODO FIXME: When do delete this ctr memory?
// hipMemcpy(ctr->data().get(), centroids[0]->data().get(), sizeof(T)*k*d, hipMemcpyDeviceToHost);
*pred_centroids = ctr->data();
// copy assigned labels
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(0);
for (int q = 0; q < n_gpu; q++) {
h_labels->insert(h_labels->end(), labels[q]->begin(), labels[q]->end());
}
// The initial dataset was shuffled, we need to reshuffle the labels accordingly
// This also reshuffles the initial permutation scheme v
if (init_data > 1) {
for (int i = 0; i < rows; i++) {
while (v[i] != i) {
int tmpIdx = v[v[i]];
int tmpLabel = h_labels->data()[v[i]];
h_labels->data()[v[i]] = h_labels->data()[i];
v[v[i]] = v[i];
v[i] = tmpIdx;
h_labels->data()[i] = tmpLabel;
}
}
}
*pred_labels = h_labels->data();
// debug
if (verbose >= H2O4GPU_LOG_VERBOSE) {
for (unsigned int ii = 0; ii < k; ii++) {
fprintf(stderr, "ii=%d of k=%d ", ii, k);
for (unsigned int jj = 0; jj < cols; jj++) {
fprintf(stderr, "%g ", (*ctr)[cols * ii + jj]);
}
fprintf(stderr, "\n");
fflush(stderr);
}
}
for (int q = 0; q < n_gpu; q++) {
delete (data[q]);
delete (labels[q]);
delete (d_centroids[q]);
delete (distances[q]);
}
return 0;
}
template<typename T>
int kmeans_predict(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const T *srcdata, const T *centroids, void **pred_labels) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<int> *d_labels[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *pairwise_distances[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
thrust::device_vector<T> *distances[n_gpu];
int *d_changes[n_gpu];
for (int q = 0; q < n_gpu; q++) {
// TODO everything from here till "distances[q]" is exactly the same as in transform
CUDACHECK(hipSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
centroid_dots[q] = new thrust::device_vector<T>(k);
pairwise_distances[q] = new thrust::device_vector<T>(rows / n_gpu * k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]);
kmeans::detail::calculate_distances(verbose, q, rows / n_gpu, cols, k,
*d_data[q], *d_centroids[q], *data_dots[q],
*centroid_dots[q], *pairwise_distances[q]);
distances[q] = new thrust::device_vector<T>(rows / n_gpu);
d_labels[q] = new thrust::device_vector<int>(rows / n_gpu);
hipMalloc(&d_changes[q], sizeof(int));
kmeans::detail::relabel(rows / n_gpu, k, *pairwise_distances[q], *d_labels[q], *distances[q], d_changes[q]);
}
// Move the resulting labels into host memory from all devices
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(0);
for (int q = 0; q < n_gpu; q++) {
h_labels->insert(h_labels->end(), d_labels[q]->begin(), d_labels[q]->end());
}
*pred_labels = h_labels->data();
for (int q = 0; q < n_gpu; q++) {
safe_cuda(hipSetDevice(dList[q]));
safe_cuda(hipFree(d_changes[q]));
kmeans::detail::labels_close();
delete (d_labels[q]);
delete (pairwise_distances[q]);
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
delete (distances[q]);
}
return 0;
}
template<typename T>
int kmeans_transform(int verbose,
int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord, int k,
const T *srcdata, const T *centroids,
void **preds) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *d_pairwise_distances[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
centroid_dots[q] = new thrust::device_vector<T>(k);
d_pairwise_distances[q] = new thrust::device_vector<T>(rows / n_gpu * k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]);
kmeans::detail::calculate_distances(verbose, q, rows / n_gpu, cols, k,
*d_data[q], *d_centroids[q], *data_dots[q],
*centroid_dots[q], *d_pairwise_distances[q]);
}
// Move the resulting labels into host memory from all devices
thrust::host_vector<T> *h_pairwise_distances = new thrust::host_vector<T>(0);
for (int q = 0; q < n_gpu; q++) {
h_pairwise_distances->insert(h_pairwise_distances->end(),
d_pairwise_distances[q]->begin(),
d_pairwise_distances[q]->end());
}
*preds = h_pairwise_distances->data();
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < rows * cols; i++) {
std::cout << h_pairwise_distances->data()[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
for (int q = 0; q < n_gpu; q++) {
safe_cuda(hipSetDevice(dList[q]));
kmeans::detail::labels_close();
delete (d_pairwise_distances[q]);
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
}
return 0;
}
template<typename T>
int makePtr_dense(int dopredict, int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data, int init_data,
T threshold, const T *srcdata, const T *centroids,
void **pred_centroids, void **pred_labels) {
if (dopredict == 0) {
return kmeans_fit(verbose, seed, gpu_idtry, n_gputry, rows, cols,
ord, k, max_iterations, init_from_data, init_data, threshold,
srcdata, pred_centroids, pred_labels);
} else {
return kmeans_predict(verbose, gpu_idtry, n_gputry, rows, cols,
ord, k,
srcdata, centroids, pred_labels);
}
}
template int
makePtr_dense<float>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
int init_data, float threshold, const float *srcdata,
const float *centroids, void **pred_centroids, void **pred_labels);
template int
makePtr_dense<double>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
int init_data, double threshold, const double *srcdata,
const double *centroids, void **pred_centroids, void **pred_labels);
template int kmeans_fit<float>(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k, int max_iterations,
int init_from_data, int init_data, float threshold,
const float *srcdata,
void **pred_centroids, void **pred_labels);
template int kmeans_fit<double>(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k, int max_iterations,
int init_from_data, int init_data, double threshold,
const double *srcdata,
void **pred_centroids, void **pred_labels);
template int kmeans_predict<float>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const float *srcdata, const float *centroids, void **pred_labels);
template int kmeans_predict<double>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const double *srcdata, const double *centroids, void **pred_labels);
template int kmeans_transform<float>(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const float *src_data, const float *centroids,
void **preds);
template int kmeans_transform<double>(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const double *src_data, const double *centroids,
void **preds);
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE == 1
template
class H2O4GPUKMeans<double>;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE == 1
template
class H2O4GPUKMeans<float>;
#endif
} // namespace h2o4gpukmeans
#ifdef __cplusplus
extern "C" {
#endif
/*
* Interface for other languages
*/
// Fit and Predict
int make_ptr_float_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n,
const char ord, int k, int max_iterations, int init_from_data,
int init_data, float threshold, const float *srcdata,
const float *centroids, void **pred_centroids, void **pred_labels) {
return h2o4gpukmeans::makePtr_dense<float>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k,
max_iterations, init_from_data, init_data, threshold,
srcdata, centroids, pred_centroids, pred_labels);
}
int make_ptr_double_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n,
const char ord, int k, int max_iterations, int init_from_data,
int init_data, double threshold, const double *srcdata,
const double *centroids, void **pred_centroids, void **pred_labels) {
return h2o4gpukmeans::makePtr_dense<double>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k,
max_iterations, init_from_data, init_data, threshold,
srcdata, centroids, pred_centroids, pred_labels);
}
// Transform
int kmeans_transform_float(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const float *src_data, const float *centroids,
void **preds) {
return h2o4gpukmeans::kmeans_transform<float>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds);
}
int kmeans_transform_double(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const double *src_data, const double *centroids,
void **preds) {
return h2o4gpukmeans::kmeans_transform<double>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds);
}
#ifdef __cplusplus
}
#endif
| 06f1b6b0e44feef925ae4ea098bad68f39b43110.cu | /*!
* Copyright 2017 H2O.ai, Inc.
* License Apache License Version 2.0 (see LICENSE for details)
*/
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <iostream>
#include "cuda.h"
#include <cstdlib>
#include <unistd.h>
#include "h2o4gpukmeans.h"
#include "kmeans_impl.h"
#include "kmeans_general.h"
#include <random>
#include <algorithm>
#include <vector>
#include <csignal>
#define CUDACHECK(cmd) do { \
cudaError_t e = cmd; \
if( e != cudaSuccess ) { \
printf("Cuda failure %s:%d '%s'\n", \
__FILE__,__LINE__,cudaGetErrorString(e));\
exit(EXIT_FAILURE); \
} \
} while(0)
/**
* METHODS FOR DATA COPYING AND GENERATION
*/
template<typename T>
void random_data(int verbose, thrust::device_vector<T> &array, int m, int n) {
thrust::host_vector<T> host_array(m * n);
for (int i = 0; i < m * n; i++) {
host_array[i] = (T) rand() / (T) RAND_MAX;
}
array = host_array;
}
/**
* Copies data from srcdata to array
* @tparam T
* @param verbose Logging level
* @param ord Column on row order of data
* @param array Destination array
* @param srcdata Source data
* @param q Shard number (from 0 to n_gpu)
* @param n
* @param npergpu
* @param d
*/
template<typename T>
void copy_data(int verbose, const char ord, thrust::device_vector<T> &array, const T *srcdata,
int q, int n, int npergpu, int d) {
thrust::host_vector<T> host_array(npergpu * d);
if (ord == 'c') {
log_debug(verbose, "Copy data COL ORDER -> ROW ORDER");
int indexi, indexj;
for (int i = 0; i < npergpu * d; i++) {
indexi = i % d; // col
indexj = i / d + q * npergpu; // row (shifted by which gpu)
host_array[i] = srcdata[indexi * n + indexj];
}
} else {
log_debug(verbose, "Copy data ROW ORDER not changed");
for (int i = 0; i < npergpu * d; i++) {
host_array[i] = srcdata[q * npergpu * d + i]; // shift by which gpu
}
}
array = host_array;
}
/**
* Like copy_data but shuffles the data according to mapping from v
* @tparam T
* @param verbose
* @param v
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
*/
template<typename T>
void copy_data_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array,
const T *srcdata, int q, int n, int npergpu, int d) {
thrust::host_vector<T> host_array(npergpu * d);
if (ord == 'c') {
log_debug(verbose, "Copy data shuffle COL ORDER -> ROW ORDER");
for (int i = 0; i < npergpu; i++) {
for (int j = 0; j < d; j++) {
host_array[i * d + j] = srcdata[v[q * npergpu + i] + j * n]; // shift by which gpu
}
}
} else {
log_debug(verbose, "Copy data shuffle ROW ORDER not changed");
for (int i = 0; i < npergpu; i++) {
for (int j = 0; j < d; j++) {
host_array[i * d + j] = srcdata[v[q * npergpu + i] * d + j]; // shift by which gpu
}
}
}
array = host_array;
}
template<typename T>
void copy_centroids_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array,
const T *srcdata, int n, int k, int d) {
copy_data_shuffled(verbose, v, ord, array, srcdata, 0, n, k, d);
}
/**
* Copies centroids from initial training set randomly.
* @tparam T
* @param verbose
* @param seed
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
* @param k
*/
template<typename T>
void random_centroids(int verbose, int seed, const char ord,
thrust::device_vector<T> &array, const T *srcdata,
int q, int n, int npergpu, int d, int k) {
thrust::host_vector<T> host_array(k * d);
if (seed < 0) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
seed = rd();
}
std::mt19937 gen(seed);
std::uniform_int_distribution<> dis(0, n - 1); // random i in range from 0..n-1 (i.e. only 1 gpu gets centroids)
if (ord == 'c') {
log_debug(verbose, "Random centroids COL ORDER -> ROW ORDER");
for (int i = 0; i < k; i++) { // rows
int reali = dis(gen); // + q*npergpu; // row sampled (called indexj above)
for (int j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali + j * n];
}
}
} else {
log_debug(verbose, "Random centroids ROW ORDER not changed");
for (int i = 0; i < k; i++) { // rows
int reali = dis(gen); // + q*npergpu ; // row sampled
for (int j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali * d + j];
}
}
}
array = host_array;
}
/**
* KMEANS METHODS FIT, PREDICT, TRANSFORM
*/
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpukmeans {
volatile std::atomic_int flaggpu(0);
inline void my_function_gpu(int sig) { // can be called asynchronously
fprintf(stderr, "Caught signal %d. Terminating shortly.\n", sig);
flaggpu = 1;
}
std::vector<int> kmeans_init(int verbose, int *final_n_gpu, int n_gputry, int gpu_idtry, int rows) {
if (rows > std::numeric_limits<int>::max()) {
fprintf(stderr, "rows > %d not implemented\n", std::numeric_limits<int>::max());
fflush(stderr);
exit(0);
}
std::signal(SIGINT, my_function_gpu);
std::signal(SIGTERM, my_function_gpu);
// no more gpus than visible gpus
int n_gpuvis;
cudaGetDeviceCount(&n_gpuvis);
int n_gpu = std::min(n_gpuvis, n_gputry);
// no more than rows
n_gpu = std::min(n_gpu, rows);
if (verbose) {
std::cout << n_gpu << " gpus." << std::endl;
}
int gpu_id = gpu_idtry % n_gpuvis;
// setup GPU list to use
std::vector<int> dList(n_gpu);
for (int idx = 0; idx < n_gpu; idx++) {
int device_idx = (gpu_id + idx) % n_gpuvis;
dList[idx] = device_idx;
}
*final_n_gpu = n_gpu;
return dList;
}
template<typename T>
H2O4GPUKMeans<T>::H2O4GPUKMeans(const T *A, int k, int n, int d) {
_A = A;
_k = k;
_n = n;
_d = d;
}
template<typename T>
int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord,
int k, int max_iterations, int init_from_data,
int init_data, T threshold,
const T *srcdata, void **pred_centroids, void **pred_labels) {
log_debug(verbose, "KMeans - Start fitting");
// init random seed if use the C function rand()
if (seed >= 0) {
srand(seed);
} else {
srand(unsigned(time(NULL)));
}
// no more clusters than rows
if (k > rows) {
k = static_cast<int>(rows);
fprintf(stderr, "Number of clusters adjusted to be equal to number of rows.\n");
fflush(stderr);
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
double t0t = timer<double>();
thrust::device_vector<T> *data[n_gpu];
thrust::device_vector<int> *labels[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *distances[n_gpu];
log_debug(verbose, "KMeans - Before allocation");
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
labels[q] = new thrust::device_vector<int>(rows / n_gpu);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
distances[q] = new thrust::device_vector<T>(rows / n_gpu);
}
if (verbose >= H2O4GPU_LOG_INFO) {
std::cout << "Number of points: " << rows << std::endl;
std::cout << "Number of dimensions: " << cols << std::endl;
std::cout << "Number of clusters: " << k << std::endl;
std::cout << "Max. number of iterations: " << max_iterations << std::endl;
std::cout << "Stopping threshold: " << threshold << std::endl;
}
std::vector<int> v(rows);
std::iota(std::begin(v), std::end(v), 0); // Fill with 0, 1, ..., rows.
if (seed >= 0) {
std::shuffle(v.begin(), v.end(), std::default_random_engine(seed));
} else {
std::random_shuffle(v.begin(), v.end());
}
// Copy the data to devices
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
if (verbose) { std::cout << "Copying data to device: " << dList[q] << std::endl; }
if (init_data == 0) { // random (for testing)
random_data<T>(verbose, *data[q], rows / n_gpu, cols);
} else if (init_data == 1) { // shard by row
copy_data(verbose, ord, *data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
} else { // shard by randomly (without replacement) selected by row
copy_data_shuffled(verbose, v, ord, *data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
}
}
// Get random points as centroids
int masterq = 0;
CUDACHECK(cudaSetDevice(dList[masterq]));
copy_centroids_shuffled(verbose, v, ord, *d_centroids[masterq], &srcdata[0], rows, k, cols);
int bytecount = cols * k * sizeof(T); // all centroids
// Copy centroids to all devices
std::vector < cudaStream_t * > streams;
streams.resize(n_gpu);
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
CUDACHECK(cudaSetDevice(dList[q]));
std::cout << "Copying centroid data to device: " << dList[q] << std::endl;
streams[q] = reinterpret_cast<cudaStream_t *>(malloc(sizeof(cudaStream_t)));
cudaStreamCreate(streams[q]);
cudaMemcpyPeerAsync(thrust::raw_pointer_cast(&(*d_centroids[q])[0]),
dList[q],
thrust::raw_pointer_cast(&(*d_centroids[masterq])[0]),
dList[masterq],
bytecount,
*(streams[q]));
}
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
cudaSetDevice(dList[q]);
cudaStreamDestroy(*(streams[q]));
#if(DEBUGKMEANS)
thrust::host_vector<T> h_centroidq=*d_centroids[q];
for(int ii=0;ii<k*d;ii++){
fprintf(stderr,"q=%d initcent[%d]=%g\n",q,ii,h_centroidq[ii]); fflush(stderr);
}
#endif
}
double timetransfer = static_cast<double>(timer<double>() - t0t);
log_debug(verbose, "KMeans - Before kmeans() call");
double t0 = timer<double>();
int status = kmeans::kmeans<T>(verbose, &flaggpu, rows, cols, k, data, labels, d_centroids, distances, dList, n_gpu,
max_iterations, threshold, true);
if (status) {
fprintf(stderr, "KMeans status was %d\n", status);
fflush(stderr);
return (status);
}
double timefit = static_cast<double>(timer<double>() - t0);
if (verbose) {
std::cout << " Time fit: " << timefit << " s" << std::endl;
fprintf(stderr, "Timetransfer: %g Timefit: %g\n", timetransfer, timefit);
fflush(stderr);
}
// copy result of centroids (sitting entirely on each device) back to host
thrust::host_vector<T> *ctr = new thrust::host_vector<T>(*d_centroids[0]);
// TODO FIXME: When do delete this ctr memory?
// cudaMemcpy(ctr->data().get(), centroids[0]->data().get(), sizeof(T)*k*d, cudaMemcpyDeviceToHost);
*pred_centroids = ctr->data();
// copy assigned labels
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(0);
for (int q = 0; q < n_gpu; q++) {
h_labels->insert(h_labels->end(), labels[q]->begin(), labels[q]->end());
}
// The initial dataset was shuffled, we need to reshuffle the labels accordingly
// This also reshuffles the initial permutation scheme v
if (init_data > 1) {
for (int i = 0; i < rows; i++) {
while (v[i] != i) {
int tmpIdx = v[v[i]];
int tmpLabel = h_labels->data()[v[i]];
h_labels->data()[v[i]] = h_labels->data()[i];
v[v[i]] = v[i];
v[i] = tmpIdx;
h_labels->data()[i] = tmpLabel;
}
}
}
*pred_labels = h_labels->data();
// debug
if (verbose >= H2O4GPU_LOG_VERBOSE) {
for (unsigned int ii = 0; ii < k; ii++) {
fprintf(stderr, "ii=%d of k=%d ", ii, k);
for (unsigned int jj = 0; jj < cols; jj++) {
fprintf(stderr, "%g ", (*ctr)[cols * ii + jj]);
}
fprintf(stderr, "\n");
fflush(stderr);
}
}
for (int q = 0; q < n_gpu; q++) {
delete (data[q]);
delete (labels[q]);
delete (d_centroids[q]);
delete (distances[q]);
}
return 0;
}
template<typename T>
int kmeans_predict(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const T *srcdata, const T *centroids, void **pred_labels) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<int> *d_labels[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *pairwise_distances[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
thrust::device_vector<T> *distances[n_gpu];
int *d_changes[n_gpu];
for (int q = 0; q < n_gpu; q++) {
// TODO everything from here till "distances[q]" is exactly the same as in transform
CUDACHECK(cudaSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
centroid_dots[q] = new thrust::device_vector<T>(k);
pairwise_distances[q] = new thrust::device_vector<T>(rows / n_gpu * k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]);
kmeans::detail::calculate_distances(verbose, q, rows / n_gpu, cols, k,
*d_data[q], *d_centroids[q], *data_dots[q],
*centroid_dots[q], *pairwise_distances[q]);
distances[q] = new thrust::device_vector<T>(rows / n_gpu);
d_labels[q] = new thrust::device_vector<int>(rows / n_gpu);
cudaMalloc(&d_changes[q], sizeof(int));
kmeans::detail::relabel(rows / n_gpu, k, *pairwise_distances[q], *d_labels[q], *distances[q], d_changes[q]);
}
// Move the resulting labels into host memory from all devices
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(0);
for (int q = 0; q < n_gpu; q++) {
h_labels->insert(h_labels->end(), d_labels[q]->begin(), d_labels[q]->end());
}
*pred_labels = h_labels->data();
for (int q = 0; q < n_gpu; q++) {
safe_cuda(cudaSetDevice(dList[q]));
safe_cuda(cudaFree(d_changes[q]));
kmeans::detail::labels_close();
delete (d_labels[q]);
delete (pairwise_distances[q]);
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
delete (distances[q]);
}
return 0;
}
template<typename T>
int kmeans_transform(int verbose,
int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord, int k,
const T *srcdata, const T *centroids,
void **preds) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *d_pairwise_distances[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
centroid_dots[q] = new thrust::device_vector<T>(k);
d_pairwise_distances[q] = new thrust::device_vector<T>(rows / n_gpu * k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]);
kmeans::detail::calculate_distances(verbose, q, rows / n_gpu, cols, k,
*d_data[q], *d_centroids[q], *data_dots[q],
*centroid_dots[q], *d_pairwise_distances[q]);
}
// Move the resulting labels into host memory from all devices
thrust::host_vector<T> *h_pairwise_distances = new thrust::host_vector<T>(0);
for (int q = 0; q < n_gpu; q++) {
h_pairwise_distances->insert(h_pairwise_distances->end(),
d_pairwise_distances[q]->begin(),
d_pairwise_distances[q]->end());
}
*preds = h_pairwise_distances->data();
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < rows * cols; i++) {
std::cout << h_pairwise_distances->data()[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
for (int q = 0; q < n_gpu; q++) {
safe_cuda(cudaSetDevice(dList[q]));
kmeans::detail::labels_close();
delete (d_pairwise_distances[q]);
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
}
return 0;
}
template<typename T>
int makePtr_dense(int dopredict, int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data, int init_data,
T threshold, const T *srcdata, const T *centroids,
void **pred_centroids, void **pred_labels) {
if (dopredict == 0) {
return kmeans_fit(verbose, seed, gpu_idtry, n_gputry, rows, cols,
ord, k, max_iterations, init_from_data, init_data, threshold,
srcdata, pred_centroids, pred_labels);
} else {
return kmeans_predict(verbose, gpu_idtry, n_gputry, rows, cols,
ord, k,
srcdata, centroids, pred_labels);
}
}
template int
makePtr_dense<float>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
int init_data, float threshold, const float *srcdata,
const float *centroids, void **pred_centroids, void **pred_labels);
template int
makePtr_dense<double>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
int init_data, double threshold, const double *srcdata,
const double *centroids, void **pred_centroids, void **pred_labels);
template int kmeans_fit<float>(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k, int max_iterations,
int init_from_data, int init_data, float threshold,
const float *srcdata,
void **pred_centroids, void **pred_labels);
template int kmeans_fit<double>(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k, int max_iterations,
int init_from_data, int init_data, double threshold,
const double *srcdata,
void **pred_centroids, void **pred_labels);
template int kmeans_predict<float>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const float *srcdata, const float *centroids, void **pred_labels);
template int kmeans_predict<double>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const double *srcdata, const double *centroids, void **pred_labels);
template int kmeans_transform<float>(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const float *src_data, const float *centroids,
void **preds);
template int kmeans_transform<double>(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const double *src_data, const double *centroids,
void **preds);
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE == 1
template
class H2O4GPUKMeans<double>;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE == 1
template
class H2O4GPUKMeans<float>;
#endif
} // namespace h2o4gpukmeans
#ifdef __cplusplus
extern "C" {
#endif
/*
* Interface for other languages
*/
// Fit and Predict
int make_ptr_float_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n,
const char ord, int k, int max_iterations, int init_from_data,
int init_data, float threshold, const float *srcdata,
const float *centroids, void **pred_centroids, void **pred_labels) {
return h2o4gpukmeans::makePtr_dense<float>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k,
max_iterations, init_from_data, init_data, threshold,
srcdata, centroids, pred_centroids, pred_labels);
}
int make_ptr_double_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n,
const char ord, int k, int max_iterations, int init_from_data,
int init_data, double threshold, const double *srcdata,
const double *centroids, void **pred_centroids, void **pred_labels) {
return h2o4gpukmeans::makePtr_dense<double>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k,
max_iterations, init_from_data, init_data, threshold,
srcdata, centroids, pred_centroids, pred_labels);
}
// Transform
int kmeans_transform_float(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const float *src_data, const float *centroids,
void **preds) {
return h2o4gpukmeans::kmeans_transform<float>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds);
}
int kmeans_transform_double(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const double *src_data, const double *centroids,
void **preds) {
return h2o4gpukmeans::kmeans_transform<double>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds);
}
#ifdef __cplusplus
}
#endif
|
6f5588fea982d535064f6e8026238862d464346f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/manifold/tsne.h>
#include <common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include "distances.cuh"
#include "exact_kernels.cuh"
#include "utils.cuh"
#include "barnes_hut.cuh"
#include "exact_tsne.cuh"
namespace ML {
void TSNE_fit(const cumlHandle &handle, const float *X, float *Y, const int n,
const int p, const int dim, int n_neighbors, const float theta,
const float epssq, float perplexity,
const int perplexity_max_iter, const float perplexity_tol,
const float early_exaggeration, const int exaggeration_iter,
const float min_gain, const float pre_learning_rate,
const float post_learning_rate, const int max_iter,
const float min_grad_norm, const float pre_momentum,
const float post_momentum, const long long random_state,
int verbosity, const bool intialize_embeddings, bool barnes_hut) {
ASSERT(n > 0 && p > 0 && dim > 0 && n_neighbors > 0 && X != NULL && Y != NULL,
"Wrong input args");
ML::Logger::get().setLevel(verbosity);
if (dim > 2 and barnes_hut) {
barnes_hut = false;
CUML_LOG_WARN(
"Barnes Hut only works for dim == 2. Switching to exact solution.");
}
if (n_neighbors > n) n_neighbors = n;
if (n_neighbors > 1023) {
CUML_LOG_WARN("FAISS only supports maximum n_neighbors = 1023.");
n_neighbors = 1023;
}
// Perplexity must be less than number of datapoints
// "How to Use t-SNE Effectively" https://distill.pub/2016/misread-tsne/
if (perplexity > n) perplexity = n;
CUML_LOG_DEBUG("Data size = (%d, %d) with dim = %d perplexity = %f", n, p,
dim, perplexity);
if (perplexity < 5 or perplexity > 50)
CUML_LOG_WARN(
"Perplexity should be within ranges (5, 50). Your results might be a"
" bit strange...");
if (n_neighbors < perplexity * 3.0f)
CUML_LOG_WARN(
"# of Nearest Neighbors should be at least 3 * perplexity. Your results"
" might be a bit strange...");
auto d_alloc = handle.getDeviceAllocator();
hipStream_t stream = handle.getStream();
START_TIMER;
//---------------------------------------------------
// Get distances
CUML_LOG_DEBUG("Getting distances.");
MLCommon::device_buffer<float> distances(d_alloc, stream, n * n_neighbors);
MLCommon::device_buffer<long> indices(d_alloc, stream, n * n_neighbors);
TSNE::get_distances(X, n, p, indices.data(), distances.data(), n_neighbors,
d_alloc, stream);
//---------------------------------------------------
END_TIMER(DistancesTime);
START_TIMER;
//---------------------------------------------------
// Normalize distances
CUML_LOG_DEBUG("Now normalizing distances so exp(D) doesn't explode.");
TSNE::normalize_distances(n, distances.data(), n_neighbors, stream);
//---------------------------------------------------
END_TIMER(NormalizeTime);
START_TIMER;
//---------------------------------------------------
// Optimal perplexity
CUML_LOG_DEBUG("Searching for optimal perplexity via bisection search.");
MLCommon::device_buffer<float> P(d_alloc, stream, n * n_neighbors);
const float P_sum = TSNE::perplexity_search(
distances.data(), P.data(), perplexity, perplexity_max_iter, perplexity_tol,
n, n_neighbors, handle);
distances.release(stream);
CUML_LOG_DEBUG("Perplexity sum = %f", P_sum);
//---------------------------------------------------
END_TIMER(PerplexityTime);
START_TIMER;
//---------------------------------------------------
// Convert data to COO layout
MLCommon::Sparse::COO<float> COO_Matrix(d_alloc, stream);
TSNE::symmetrize_perplexity(P.data(), indices.data(), n, n_neighbors, P_sum,
early_exaggeration, &COO_Matrix, stream, handle);
P.release(stream);
indices.release(stream);
const int NNZ = COO_Matrix.nnz;
float *VAL = COO_Matrix.vals();
const int *COL = COO_Matrix.cols();
const int *ROW = COO_Matrix.rows();
//---------------------------------------------------
END_TIMER(SymmetrizeTime);
if (barnes_hut) {
TSNE::Barnes_Hut(VAL, COL, ROW, NNZ, handle, Y, n, theta, epssq,
early_exaggeration, exaggeration_iter, min_gain,
pre_learning_rate, post_learning_rate, max_iter,
min_grad_norm, pre_momentum, post_momentum, random_state);
} else {
TSNE::Exact_TSNE(VAL, COL, ROW, NNZ, handle, Y, n, dim, early_exaggeration,
exaggeration_iter, min_gain, pre_learning_rate,
post_learning_rate, max_iter, min_grad_norm, pre_momentum,
post_momentum, random_state, intialize_embeddings);
}
}
} // namespace ML
| 6f5588fea982d535064f6e8026238862d464346f.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/manifold/tsne.h>
#include <common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include "distances.cuh"
#include "exact_kernels.cuh"
#include "utils.cuh"
#include "barnes_hut.cuh"
#include "exact_tsne.cuh"
namespace ML {
void TSNE_fit(const cumlHandle &handle, const float *X, float *Y, const int n,
const int p, const int dim, int n_neighbors, const float theta,
const float epssq, float perplexity,
const int perplexity_max_iter, const float perplexity_tol,
const float early_exaggeration, const int exaggeration_iter,
const float min_gain, const float pre_learning_rate,
const float post_learning_rate, const int max_iter,
const float min_grad_norm, const float pre_momentum,
const float post_momentum, const long long random_state,
int verbosity, const bool intialize_embeddings, bool barnes_hut) {
ASSERT(n > 0 && p > 0 && dim > 0 && n_neighbors > 0 && X != NULL && Y != NULL,
"Wrong input args");
ML::Logger::get().setLevel(verbosity);
if (dim > 2 and barnes_hut) {
barnes_hut = false;
CUML_LOG_WARN(
"Barnes Hut only works for dim == 2. Switching to exact solution.");
}
if (n_neighbors > n) n_neighbors = n;
if (n_neighbors > 1023) {
CUML_LOG_WARN("FAISS only supports maximum n_neighbors = 1023.");
n_neighbors = 1023;
}
// Perplexity must be less than number of datapoints
// "How to Use t-SNE Effectively" https://distill.pub/2016/misread-tsne/
if (perplexity > n) perplexity = n;
CUML_LOG_DEBUG("Data size = (%d, %d) with dim = %d perplexity = %f", n, p,
dim, perplexity);
if (perplexity < 5 or perplexity > 50)
CUML_LOG_WARN(
"Perplexity should be within ranges (5, 50). Your results might be a"
" bit strange...");
if (n_neighbors < perplexity * 3.0f)
CUML_LOG_WARN(
"# of Nearest Neighbors should be at least 3 * perplexity. Your results"
" might be a bit strange...");
auto d_alloc = handle.getDeviceAllocator();
cudaStream_t stream = handle.getStream();
START_TIMER;
//---------------------------------------------------
// Get distances
CUML_LOG_DEBUG("Getting distances.");
MLCommon::device_buffer<float> distances(d_alloc, stream, n * n_neighbors);
MLCommon::device_buffer<long> indices(d_alloc, stream, n * n_neighbors);
TSNE::get_distances(X, n, p, indices.data(), distances.data(), n_neighbors,
d_alloc, stream);
//---------------------------------------------------
END_TIMER(DistancesTime);
START_TIMER;
//---------------------------------------------------
// Normalize distances
CUML_LOG_DEBUG("Now normalizing distances so exp(D) doesn't explode.");
TSNE::normalize_distances(n, distances.data(), n_neighbors, stream);
//---------------------------------------------------
END_TIMER(NormalizeTime);
START_TIMER;
//---------------------------------------------------
// Optimal perplexity
CUML_LOG_DEBUG("Searching for optimal perplexity via bisection search.");
MLCommon::device_buffer<float> P(d_alloc, stream, n * n_neighbors);
const float P_sum = TSNE::perplexity_search(
distances.data(), P.data(), perplexity, perplexity_max_iter, perplexity_tol,
n, n_neighbors, handle);
distances.release(stream);
CUML_LOG_DEBUG("Perplexity sum = %f", P_sum);
//---------------------------------------------------
END_TIMER(PerplexityTime);
START_TIMER;
//---------------------------------------------------
// Convert data to COO layout
MLCommon::Sparse::COO<float> COO_Matrix(d_alloc, stream);
TSNE::symmetrize_perplexity(P.data(), indices.data(), n, n_neighbors, P_sum,
early_exaggeration, &COO_Matrix, stream, handle);
P.release(stream);
indices.release(stream);
const int NNZ = COO_Matrix.nnz;
float *VAL = COO_Matrix.vals();
const int *COL = COO_Matrix.cols();
const int *ROW = COO_Matrix.rows();
//---------------------------------------------------
END_TIMER(SymmetrizeTime);
if (barnes_hut) {
TSNE::Barnes_Hut(VAL, COL, ROW, NNZ, handle, Y, n, theta, epssq,
early_exaggeration, exaggeration_iter, min_gain,
pre_learning_rate, post_learning_rate, max_iter,
min_grad_norm, pre_momentum, post_momentum, random_state);
} else {
TSNE::Exact_TSNE(VAL, COL, ROW, NNZ, handle, Y, n, dim, early_exaggeration,
exaggeration_iter, min_gain, pre_learning_rate,
post_learning_rate, max_iter, min_grad_norm, pre_momentum,
post_momentum, random_state, intialize_embeddings);
}
}
} // namespace ML
|
bfa620cfbc279c2cda3dd669bd8de841b116df06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <cstdlib>
#include <ctime>
#include "opencv2/opencv.hpp"
#include "helper_functions.h"
#include <sys/resource.h>
using namespace std;
using namespace cv;
struct gaussian g[height][width][K][3];
float w[height][width][K];
int foreground[height][width];
int size_obj_f[width*height];
int x_obj[width*height];
int y_obj[width*height];
int h_i[height][width][3];
KalmanFilter kf_obj[height*width];
int size_obj[height*width];
int num_obj = 0;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//
// __global__ kernel for Gaussian
//
__global__ void foreground_g(int *d_imageArray, struct gaussian *d_g, float *d_w, int *d_f, int w, int h) {
unsigned int i = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(!(i < h && j < w)) return; //do nothing if indices are invalid
unsigned int idx = ((i * w) + j);
int p_r = d_imageArray[idx * 3]; // frame.at<cv::Vec3b>(i, j)[0];
int p_g = d_imageArray[idx * 3 + 1]; // frame.at<cv::Vec3b>(i, j)[1];
int p_b = d_imageArray[idx * 3 + 2]; // frame.at<cv::Vec3b>(i, j)[2];
float min = -1;
int min_ind = -1;
for (int k=0; k<K; k++) {
float z_r = getZ(p_r, d_g[idx * K * 3 + k * 3] /*g[i][j][k][0]*/);
float z_g = getZ(p_g, d_g[idx * K * 3 + k * 3 + 1] /*g[i][j][k][1]*/);
float z_b = getZ(p_b, d_g[idx * K * 3 + k * 3 + 2] /*g[i][j][k][2]*/);
float z_sum = z_r + z_g + z_b;
if (min == -1 || min > z_sum) {
min = z_sum;
min_ind = k;
}
}
if (min > DEVIATION_SQ_THRESH) {
min_ind = -1;
}
for (int k=0; k<K; k++) {
if (k == min_ind) {
d_w[idx * K + k] /* w[i][j][k] */ = update_weight( d_w[idx * K + k] /* w[i][j][k] */, L_A, 1);
update_distribution(p_r, &d_g[idx * K * 3 + k * 3] /* g[i][j][k][0] */);
update_distribution(p_g, &d_g[idx * K * 3 + k * 3 + 1] /* g[i][j][k][1] */);
update_distribution(p_b, &d_g[idx * K * 3 + k * 3 + 2] /* g[i][j][k][2] */);
}else{
d_w[idx * K + k] /* w[i][j][k] */ = update_weight(d_w[idx * K + k] /* w[i][j][k] */, L_A, 0);
}
}
if (min_ind == -1) {
min = -1;
for (int k=0; k<K; k++) {
if (min == -1 || min > d_w[idx * K + k] /* w[i][j][k] */) { // replacement policy can be changed
min = d_w[idx * K + k]; // w[i][j][k];
min_ind = k;
}
}
d_g[idx * K * 3 + min_ind * 3].mean /* g[i][j][min_ind][0].mean */ = p_r;
d_g[idx * K * 3 + min_ind * 3].variance /* g[i][j][min_ind][0].variance */ = INIT_VARIANCE;
d_g[idx * K * 3 + min_ind * 3 + 1].mean /* g[i][j][min_ind][1].mean */ = p_g;
d_g[idx * K * 3 + min_ind * 3 + 1].variance /* g[i][j][min_ind][1].variance */ = INIT_VARIANCE;
d_g[idx * K * 3 + min_ind * 3 + 2].mean /* g[i][j][min_ind][2].mean */ = p_b;
d_g[idx * K * 3 + min_ind * 3 + 2].variance /* g[i][j][min_ind][2].variance */ = INIT_VARIANCE;
d_w[idx * K + min_ind] /* w[i][j][min_ind] */ = INIT_MIXPROP;
}
// renormalized weight
float sum = 0;
for (int k=0; k<K; k++) {
sum += d_w[idx * K + k]; /* w[i][j][k] */
}
for (int k=0; k<K; k++) {
d_w[idx * K + k] = d_w[idx * K + k] / sum; // w[i][j][k] = w[i][j][k] / sum;
}
if (is_background(min_ind, &d_w[idx * K] /*w[i][j]*/, &d_g[idx * K * 3] /*g[i][j]*/)){
// background
d_f[idx] = 0; // foreground[i][j] = 0;
//frame.at<cv::Vec3b>(i, j)[0] = 0;
//frame.at<cv::Vec3b>(i, j)[1] = 0;
//frame.at<cv::Vec3b>(i, j)[2] = 0;
} else {
// foreground
// change to black dot
d_f[idx] = -1; // foreground[i][j] = -1;
}
}
int main(int argc, char **argv)
{
VideoCapture cap; // open the default camera
if(argc == 1) {
cap.open(0);
} else {
cap.open(argv[1]);
}
if(!cap.isOpened()) // check if we succeeded
return -1;
vector<int> compression_params;
compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
vector<Mat> frames;
cap.set(CV_CAP_PROP_FRAME_WIDTH, width);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, height);
Mat frame0;
cap >> frame0;
srand(time(0));
for (int i=0; i<height; i++) {
for (int j=0; j<width; j++) {
for (int k=0; k<K; k++) {
g[i][j][k][0].mean = rand() % 256;
g[i][j][k][1].mean = rand() % 256;
g[i][j][k][2].mean = rand() % 256;
g[i][j][k][0].variance = INIT_VARIANCE;
g[i][j][k][1].variance = INIT_VARIANCE;
g[i][j][k][2].variance = INIT_VARIANCE;
w[i][j][k] = 0;
}
g[i][j][0][0].mean = frame0.at<cv::Vec3b>(i, j)[0];
g[i][j][0][1].mean = frame0.at<cv::Vec3b>(i, j)[1];
g[i][j][0][2].mean = frame0.at<cv::Vec3b>(i, j)[2];
w[i][j][0] = 1;
}
}
Mat frame;
hipStream_t stream0, stream1;
// data copy of gaussian
struct gaussian *d_g;
CUDA_CHECK_RETURN(hipMalloc((void **) &d_g, sizeof(struct gaussian) * 3 * width * height * K));
CUDA_CHECK_RETURN(hipMemcpy(d_g, g, sizeof(struct gaussian) * 3 * width * height * K, hipMemcpyHostToDevice));
// data copy of weight
float *d_w;
CUDA_CHECK_RETURN(hipMalloc((void **) &d_w, sizeof(float) * K * width * height));
CUDA_CHECK_RETURN(hipMemcpy(d_w, w, sizeof(float) * K * width * height, hipMemcpyHostToDevice));
// data allocation of foreground
int *d_f;
CUDA_CHECK_RETURN(hipMalloc((void **) &d_f, sizeof(int) * width * height));
// data allocation of image
int *d_frame;
CUDA_CHECK_RETURN(hipMalloc((void **) &d_frame, sizeof(int) * width * height * 3));
for(;;)
{
cap >> frame; // get a new frame from camera
// data copy of image
for (int i=0; i<height; i++) {
for (int j=0; j<width; j++) {
h_i[i][j][0] = frame.at<cv::Vec3b>(i, j)[0];
h_i[i][j][1] = frame.at<cv::Vec3b>(i, j)[1];
h_i[i][j][2] = frame.at<cv::Vec3b>(i, j)[2];
}
}
CUDA_CHECK_RETURN(hipMemcpy(d_frame, h_i, sizeof(int) * width * height * 3, hipMemcpyHostToDevice));
// kernel launch
// define grid and block dimensions
dim3 dimBlock(32, 32);
dim3 dimGrid(ceil(width/(double)dimBlock.x), ceil(height/(double)dimBlock.y));
// kernel launch
hipLaunchKernelGGL(( foreground_g), dim3(dimGrid), dim3(dimBlock), 0, 0, d_frame, d_g, d_w, d_f, width, height);
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError());
// data copy back
CUDA_CHECK_RETURN(hipMemcpy(foreground, d_f, sizeof(int) * width * height, hipMemcpyDeviceToHost));
int num_obj_f = 0;
for (int i=0; i<height; i++) {
for (int j=0; j<width; j++) {
if (foreground[i][j] == -1) {
num_obj_f++;
foreground[i][j] = num_obj_f;
y_obj[num_obj_f-1] = 0;
x_obj[num_obj_f-1] = 0;
size_obj_f[num_obj_f-1] = connected_component(i, j, foreground,
y_obj[num_obj_f-1], x_obj[num_obj_f-1]);
y_obj[num_obj_f-1] = y_obj[num_obj_f-1] / size_obj_f[num_obj_f-1];
x_obj[num_obj_f-1] = y_obj[num_obj_f-1] / size_obj_f[num_obj_f-1];
}
}
}
for (int k=0; k<num_obj_f; k++) {
if (size_obj_f[k] >= COMPONENT_THRESH) {
int min_dis = DISTANCE_THRESH;
int min_i = -1;
for (int i=0; i<num_obj; i++) {
int kf_p_x;
int kf_p_y;
kalman_predict(kf_obj[i], kf_p_y, kf_p_x);
if (get_distance(x_obj[k] - kf_p_x, y_obj[k] - kf_p_y) < min_dis &&
(size_obj[i] - size_obj_f[k]) * (size_obj[i] - size_obj_f[k]) < SIZE_THRESH) {
min_dis = get_distance(x_obj[k] - kf_p_x, y_obj[k] - kf_p_y);
min_i = i;
}
}
if (min_i == -1) {
size_obj[num_obj] = size_obj_f[k];
kf_obj[num_obj++] = kalman_init(y_obj[k], x_obj[k]);
min_i = num_obj-1;
}else{
size_obj[min_i] = size_obj_f[k];
kalman_update(kf_obj[min_i], y_obj[k], x_obj[k]);
}
int min_x=width, min_y=height, max_x=0, max_y=0;
for (int i=0; i<height; i++) {
for (int j=0; j<width; j++) {
if (foreground[i][j] == k+1) {
if (min_x > j) min_x = j;
if (min_y > i) min_y = i;
if (max_x < j) max_x = j;
if (max_y < i) max_y = i;
/*
int color = 256*256*256/(min_i+1);
frame.at<cv::Vec3b>(i, j)[0] = color/(256*256);
frame.at<cv::Vec3b>(i, j)[1] = color/256%256;
frame.at<cv::Vec3b>(i, j)[2] = color%256;
*/
}
}
}
for (int i=min_x; i<=max_x; i++) {
frame.at<cv::Vec3b>(min_y, i)[0] = 255;
frame.at<cv::Vec3b>(min_y, i)[1] = 255;
frame.at<cv::Vec3b>(min_y, i)[2] = 255;
frame.at<cv::Vec3b>(min_y, i)[min_i % 3] = 0;
frame.at<cv::Vec3b>(max_y, i)[0] = 255;
frame.at<cv::Vec3b>(max_y, i)[1] = 255;
frame.at<cv::Vec3b>(max_y, i)[2] = 255;
frame.at<cv::Vec3b>(max_y, i)[min_i % 3] = 0;
}
for (int i=min_y; i<=max_y; i++) {
frame.at<cv::Vec3b>(i, min_x)[0] = 255;
frame.at<cv::Vec3b>(i, min_x)[1] = 255;
frame.at<cv::Vec3b>(i, min_x)[2] = 255;
frame.at<cv::Vec3b>(i, min_x)[min_i % 3] = 0;
frame.at<cv::Vec3b>(i, max_x)[0] = 255;
frame.at<cv::Vec3b>(i, max_x)[1] = 255;
frame.at<cv::Vec3b>(i, max_x)[2] = 255;
frame.at<cv::Vec3b>(i, max_x)[min_i % 3] = 0;
}
}
}
cv::imshow("something", frame);
waitKey(1);
/*
sprintf(filename, "im%d.png", frameIndex++);
try {
imwrite(filename, frame, compression_params);
}
catch (runtime_error& ex) {
fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what());
return 1;
}
*/
}
return 0;
}
| bfa620cfbc279c2cda3dd669bd8de841b116df06.cu | #include <vector>
#include <cstdlib>
#include <ctime>
#include "opencv2/opencv.hpp"
#include "helper_functions.h"
#include <sys/resource.h>
using namespace std;
using namespace cv;
struct gaussian g[height][width][K][3];
float w[height][width][K];
int foreground[height][width];
int size_obj_f[width*height];
int x_obj[width*height];
int y_obj[width*height];
int h_i[height][width][3];
KalmanFilter kf_obj[height*width];
int size_obj[height*width];
int num_obj = 0;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//
// __global__ kernel for Gaussian
//
__global__ void foreground_g(int *d_imageArray, struct gaussian *d_g, float *d_w, int *d_f, int w, int h) {
unsigned int i = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(!(i < h && j < w)) return; //do nothing if indices are invalid
unsigned int idx = ((i * w) + j);
int p_r = d_imageArray[idx * 3]; // frame.at<cv::Vec3b>(i, j)[0];
int p_g = d_imageArray[idx * 3 + 1]; // frame.at<cv::Vec3b>(i, j)[1];
int p_b = d_imageArray[idx * 3 + 2]; // frame.at<cv::Vec3b>(i, j)[2];
float min = -1;
int min_ind = -1;
for (int k=0; k<K; k++) {
float z_r = getZ(p_r, d_g[idx * K * 3 + k * 3] /*g[i][j][k][0]*/);
float z_g = getZ(p_g, d_g[idx * K * 3 + k * 3 + 1] /*g[i][j][k][1]*/);
float z_b = getZ(p_b, d_g[idx * K * 3 + k * 3 + 2] /*g[i][j][k][2]*/);
float z_sum = z_r + z_g + z_b;
if (min == -1 || min > z_sum) {
min = z_sum;
min_ind = k;
}
}
if (min > DEVIATION_SQ_THRESH) {
min_ind = -1;
}
for (int k=0; k<K; k++) {
if (k == min_ind) {
d_w[idx * K + k] /* w[i][j][k] */ = update_weight( d_w[idx * K + k] /* w[i][j][k] */, L_A, 1);
update_distribution(p_r, &d_g[idx * K * 3 + k * 3] /* g[i][j][k][0] */);
update_distribution(p_g, &d_g[idx * K * 3 + k * 3 + 1] /* g[i][j][k][1] */);
update_distribution(p_b, &d_g[idx * K * 3 + k * 3 + 2] /* g[i][j][k][2] */);
}else{
d_w[idx * K + k] /* w[i][j][k] */ = update_weight(d_w[idx * K + k] /* w[i][j][k] */, L_A, 0);
}
}
if (min_ind == -1) {
min = -1;
for (int k=0; k<K; k++) {
if (min == -1 || min > d_w[idx * K + k] /* w[i][j][k] */) { // replacement policy can be changed
min = d_w[idx * K + k]; // w[i][j][k];
min_ind = k;
}
}
d_g[idx * K * 3 + min_ind * 3].mean /* g[i][j][min_ind][0].mean */ = p_r;
d_g[idx * K * 3 + min_ind * 3].variance /* g[i][j][min_ind][0].variance */ = INIT_VARIANCE;
d_g[idx * K * 3 + min_ind * 3 + 1].mean /* g[i][j][min_ind][1].mean */ = p_g;
d_g[idx * K * 3 + min_ind * 3 + 1].variance /* g[i][j][min_ind][1].variance */ = INIT_VARIANCE;
d_g[idx * K * 3 + min_ind * 3 + 2].mean /* g[i][j][min_ind][2].mean */ = p_b;
d_g[idx * K * 3 + min_ind * 3 + 2].variance /* g[i][j][min_ind][2].variance */ = INIT_VARIANCE;
d_w[idx * K + min_ind] /* w[i][j][min_ind] */ = INIT_MIXPROP;
}
// renormalized weight
float sum = 0;
for (int k=0; k<K; k++) {
sum += d_w[idx * K + k]; /* w[i][j][k] */
}
for (int k=0; k<K; k++) {
d_w[idx * K + k] = d_w[idx * K + k] / sum; // w[i][j][k] = w[i][j][k] / sum;
}
if (is_background(min_ind, &d_w[idx * K] /*w[i][j]*/, &d_g[idx * K * 3] /*g[i][j]*/)){
// background
d_f[idx] = 0; // foreground[i][j] = 0;
//frame.at<cv::Vec3b>(i, j)[0] = 0;
//frame.at<cv::Vec3b>(i, j)[1] = 0;
//frame.at<cv::Vec3b>(i, j)[2] = 0;
} else {
// foreground
// change to black dot
d_f[idx] = -1; // foreground[i][j] = -1;
}
}
int main(int argc, char **argv)
{
VideoCapture cap; // open the default camera
if(argc == 1) {
cap.open(0);
} else {
cap.open(argv[1]);
}
if(!cap.isOpened()) // check if we succeeded
return -1;
vector<int> compression_params;
compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
vector<Mat> frames;
cap.set(CV_CAP_PROP_FRAME_WIDTH, width);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, height);
Mat frame0;
cap >> frame0;
srand(time(0));
for (int i=0; i<height; i++) {
for (int j=0; j<width; j++) {
for (int k=0; k<K; k++) {
g[i][j][k][0].mean = rand() % 256;
g[i][j][k][1].mean = rand() % 256;
g[i][j][k][2].mean = rand() % 256;
g[i][j][k][0].variance = INIT_VARIANCE;
g[i][j][k][1].variance = INIT_VARIANCE;
g[i][j][k][2].variance = INIT_VARIANCE;
w[i][j][k] = 0;
}
g[i][j][0][0].mean = frame0.at<cv::Vec3b>(i, j)[0];
g[i][j][0][1].mean = frame0.at<cv::Vec3b>(i, j)[1];
g[i][j][0][2].mean = frame0.at<cv::Vec3b>(i, j)[2];
w[i][j][0] = 1;
}
}
Mat frame;
cudaStream_t stream0, stream1;
// data copy of gaussian
struct gaussian *d_g;
CUDA_CHECK_RETURN(cudaMalloc((void **) &d_g, sizeof(struct gaussian) * 3 * width * height * K));
CUDA_CHECK_RETURN(cudaMemcpy(d_g, g, sizeof(struct gaussian) * 3 * width * height * K, cudaMemcpyHostToDevice));
// data copy of weight
float *d_w;
CUDA_CHECK_RETURN(cudaMalloc((void **) &d_w, sizeof(float) * K * width * height));
CUDA_CHECK_RETURN(cudaMemcpy(d_w, w, sizeof(float) * K * width * height, cudaMemcpyHostToDevice));
// data allocation of foreground
int *d_f;
CUDA_CHECK_RETURN(cudaMalloc((void **) &d_f, sizeof(int) * width * height));
// data allocation of image
int *d_frame;
CUDA_CHECK_RETURN(cudaMalloc((void **) &d_frame, sizeof(int) * width * height * 3));
for(;;)
{
cap >> frame; // get a new frame from camera
// data copy of image
for (int i=0; i<height; i++) {
for (int j=0; j<width; j++) {
h_i[i][j][0] = frame.at<cv::Vec3b>(i, j)[0];
h_i[i][j][1] = frame.at<cv::Vec3b>(i, j)[1];
h_i[i][j][2] = frame.at<cv::Vec3b>(i, j)[2];
}
}
CUDA_CHECK_RETURN(cudaMemcpy(d_frame, h_i, sizeof(int) * width * height * 3, cudaMemcpyHostToDevice));
// kernel launch
// define grid and block dimensions
dim3 dimBlock(32, 32);
dim3 dimGrid(ceil(width/(double)dimBlock.x), ceil(height/(double)dimBlock.y));
// kernel launch
foreground_g<<<dimGrid, dimBlock>>>(d_frame, d_g, d_w, d_f, width, height);
CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
// data copy back
CUDA_CHECK_RETURN(cudaMemcpy(foreground, d_f, sizeof(int) * width * height, cudaMemcpyDeviceToHost));
int num_obj_f = 0;
for (int i=0; i<height; i++) {
for (int j=0; j<width; j++) {
if (foreground[i][j] == -1) {
num_obj_f++;
foreground[i][j] = num_obj_f;
y_obj[num_obj_f-1] = 0;
x_obj[num_obj_f-1] = 0;
size_obj_f[num_obj_f-1] = connected_component(i, j, foreground,
y_obj[num_obj_f-1], x_obj[num_obj_f-1]);
y_obj[num_obj_f-1] = y_obj[num_obj_f-1] / size_obj_f[num_obj_f-1];
x_obj[num_obj_f-1] = y_obj[num_obj_f-1] / size_obj_f[num_obj_f-1];
}
}
}
for (int k=0; k<num_obj_f; k++) {
if (size_obj_f[k] >= COMPONENT_THRESH) {
int min_dis = DISTANCE_THRESH;
int min_i = -1;
for (int i=0; i<num_obj; i++) {
int kf_p_x;
int kf_p_y;
kalman_predict(kf_obj[i], kf_p_y, kf_p_x);
if (get_distance(x_obj[k] - kf_p_x, y_obj[k] - kf_p_y) < min_dis &&
(size_obj[i] - size_obj_f[k]) * (size_obj[i] - size_obj_f[k]) < SIZE_THRESH) {
min_dis = get_distance(x_obj[k] - kf_p_x, y_obj[k] - kf_p_y);
min_i = i;
}
}
if (min_i == -1) {
size_obj[num_obj] = size_obj_f[k];
kf_obj[num_obj++] = kalman_init(y_obj[k], x_obj[k]);
min_i = num_obj-1;
}else{
size_obj[min_i] = size_obj_f[k];
kalman_update(kf_obj[min_i], y_obj[k], x_obj[k]);
}
int min_x=width, min_y=height, max_x=0, max_y=0;
for (int i=0; i<height; i++) {
for (int j=0; j<width; j++) {
if (foreground[i][j] == k+1) {
if (min_x > j) min_x = j;
if (min_y > i) min_y = i;
if (max_x < j) max_x = j;
if (max_y < i) max_y = i;
/*
int color = 256*256*256/(min_i+1);
frame.at<cv::Vec3b>(i, j)[0] = color/(256*256);
frame.at<cv::Vec3b>(i, j)[1] = color/256%256;
frame.at<cv::Vec3b>(i, j)[2] = color%256;
*/
}
}
}
for (int i=min_x; i<=max_x; i++) {
frame.at<cv::Vec3b>(min_y, i)[0] = 255;
frame.at<cv::Vec3b>(min_y, i)[1] = 255;
frame.at<cv::Vec3b>(min_y, i)[2] = 255;
frame.at<cv::Vec3b>(min_y, i)[min_i % 3] = 0;
frame.at<cv::Vec3b>(max_y, i)[0] = 255;
frame.at<cv::Vec3b>(max_y, i)[1] = 255;
frame.at<cv::Vec3b>(max_y, i)[2] = 255;
frame.at<cv::Vec3b>(max_y, i)[min_i % 3] = 0;
}
for (int i=min_y; i<=max_y; i++) {
frame.at<cv::Vec3b>(i, min_x)[0] = 255;
frame.at<cv::Vec3b>(i, min_x)[1] = 255;
frame.at<cv::Vec3b>(i, min_x)[2] = 255;
frame.at<cv::Vec3b>(i, min_x)[min_i % 3] = 0;
frame.at<cv::Vec3b>(i, max_x)[0] = 255;
frame.at<cv::Vec3b>(i, max_x)[1] = 255;
frame.at<cv::Vec3b>(i, max_x)[2] = 255;
frame.at<cv::Vec3b>(i, max_x)[min_i % 3] = 0;
}
}
}
cv::imshow("something", frame);
waitKey(1);
/*
sprintf(filename, "im%d.png", frameIndex++);
try {
imwrite(filename, frame, compression_params);
}
catch (runtime_error& ex) {
fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what());
return 1;
}
*/
}
return 0;
}
|
b188ea694c2e0fc327931cbd9a46a30fcabea893.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <errno.h>
#include "misc.h"
#include "eigen_cpu.h"
#include "database_cpu.h"
#include "eigen_gpu.h"
#include "database_gpu.h"
#include "load_save_image.h"
void display_menu_cpu(struct DatasetCPU *dataset_cpu);
void display_menu_gpu(struct DatasetGPU *dataset_gpu);
int get_user_choice();
void get_user_string(char **s);
int main(int argc, char **argv)
{
struct DatasetCPU *dataset_cpu = NULL;
struct DatasetGPU *dataset_gpu = NULL;
int use_gpu = 1;
if (argc == 2 && !strcmp(argv[1], "-cpu"))
use_gpu = 0;
int action = 0;
char name[100] = "";
char *path = NULL;
char *dataset_name = NULL;
int tmp;
do {
if (use_gpu)
display_menu_gpu(dataset_gpu);
else
display_menu_cpu(dataset_cpu);
action = get_user_choice();
switch(action) {
case 1:
printf("\nEnter path to a repo containing images: ");
get_user_string(&path);
printf("\nEnter a name for the database: ");
get_user_string(&dataset_name);
if (use_gpu) {
if (dataset_gpu != NULL)
free_dataset_gpu(dataset_gpu);
dataset_gpu = create_dataset_and_compute_all_gpu(path, dataset_name);
if (dataset_gpu)
printf("\nDone! Press any key to continue");
} else {
if (dataset_cpu != NULL)
free_dataset_cpu(dataset_cpu);
dataset_cpu = create_dataset_and_compute_all_cpu(path, dataset_name);
if (dataset_cpu)
printf("\nDone! Press any key to continue");
}
break;
case 2:
printf("\nEnter path to a .dat file: ");
get_user_string(&path);
if (use_gpu) {
if (dataset_gpu != NULL)
free_dataset_gpu(dataset_gpu);
dataset_gpu = load_dataset_gpu(path);
if (dataset_gpu)
printf("\nDone! Press any key to continue");
} else {
if (dataset_cpu != NULL)
free_dataset_cpu(dataset_cpu);
dataset_cpu = load_dataset_cpu(path);
if (dataset_cpu)
printf("\nDone! Press any key to continue");
}
break;
case 3:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
printf("Enter path to a repo in which %s.dat will be saved: ", dataset_gpu->name);
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
printf("Enter path to a repo in which %s.dat will be saved: ", dataset_cpu->name);
}
get_user_string(&path);
if (use_gpu) {
path = (char *)realloc(path, (strlen(path) + strlen(dataset_gpu->name) + 6) * sizeof(char));
TEST_MALLOC(path);
strcat(path, "/");
strcat(path, dataset_gpu->name);
strcat(path, ".dat");
save_dataset_to_disk_gpu(dataset_gpu, path);
printf("\nDone! Press any key to continue");
} else {
path = (char *)realloc(path, (strlen(path) + strlen(dataset_cpu->name) + 6) * sizeof(char));
TEST_MALLOC(path);
strcat(path, "/");
strcat(path, dataset_cpu->name);
strcat(path, ".dat");
save_dataset_to_disk_cpu(dataset_cpu, path);
printf("\nDone! Press any key to continue");
}
break;
case 4:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
}
printf("Enter path to a repo containing new face(s) or path to a single face: ");
get_user_string(&path);
if (use_gpu)
tmp = add_faces_and_compute_coordinates_gpu(dataset_gpu, path);
else
tmp = add_faces_and_compute_coordinates_cpu(dataset_cpu, path);
if (tmp)
printf("\nDone! Press any key to continue");
break;
case 5:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
}
printf("Enter path to a face to identify: ");
get_user_string(&path);
if (use_gpu)
identify_face_gpu(dataset_gpu, path);
else
identify_face_cpu(dataset_cpu, path);
printf("\nDone! Press any key to continue");
break;
case 6:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
}
if (use_gpu) {
for (int i = 0; i < dataset_gpu->num_eigenfaces; i++) {
sprintf(name, "eigen/Eigenface %d.png", i);
save_image_to_disk_gpu(dataset_gpu->d_eigenfaces + dataset_gpu->w * dataset_gpu->h * i, dataset_gpu->w, dataset_gpu->h, name);
}
} else {
for (int i = 0; i < dataset_cpu->num_eigenfaces; i++) {
sprintf(name, "eigen/Eigenface %d.png", i);
save_image_to_disk_cpu(dataset_cpu->eigenfaces[i], name);
}
}
printf("\nDone! Press any key to continue");
break;
case 7:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
}
if (use_gpu) {
for (int i = 0; i < dataset_gpu->num_faces; i++)
save_reconstructed_face_to_disk_gpu(dataset_gpu, dataset_gpu->faces[i], dataset_gpu->num_eigenfaces);
} else {
for (int i = 0; i < dataset_cpu->num_faces; i++)
save_reconstructed_face_to_disk_cpu(dataset_cpu, dataset_cpu->faces[i], dataset_cpu->num_eigenfaces);
}
printf("\nDone! Press any key to continue");
break;
case 8:
printf("Good bye!\n");
break;
default:
break;
}
getchar();
} while (action != 8);
if (use_gpu)
free_dataset_gpu(dataset_gpu);
else
free_dataset_cpu(dataset_cpu);
free(dataset_name);
free(path);
return EXIT_SUCCESS;
}
void display_menu_cpu(struct DatasetCPU *dataset_cpu)
{
system("clear");
printf("////////////////////////////////////////////////////////////////////////////////\n");
printf("/// FaceIdX - CPU version ///\n");
printf("////////////////////////////////////////////////////////////////////////////////\n\n\n");
printf("Current database: ");
if (dataset_cpu == NULL) {
printf(KRED "None");
} else {
printf(KGRN "%s\n\n", dataset_cpu->name);
printf(KNRM "Number of original images: ");
printf(KWHT "%d\n", dataset_cpu->num_original_images);
printf(KNRM "Number of eigenfaces: ");
printf(KWHT "%d\n", dataset_cpu->num_eigenfaces);
printf(KNRM "Number of faces: ");
printf(KWHT "%d\n", dataset_cpu->num_faces);
printf(KNRM "Number of new faces: ");
printf(KWHT "%d\n", dataset_cpu->num_new_faces);
}
printf(KNRM "\n\n===== MENU =====\n\n");
printf("1. Create database\n");
printf("2. Load database\n");
printf("3. Save database to disk\n");
printf("4. Add face(s) to database\n");
printf("5. Identify face\n");
printf("6. Export eigenfaces\n");
printf("7. Reconstruct faces\n");
printf("8. Exit\n");
printf("\nYour choice: ");
}
void display_menu_gpu(struct DatasetGPU *dataset_gpu)
{
system("clear");
printf("////////////////////////////////////////////////////////////////////////////////\n");
printf("/// FaceIdX - GPU version ///\n");
printf("////////////////////////////////////////////////////////////////////////////////\n\n\n");
printf("Current database: ");
if (dataset_gpu == NULL) {
printf(KRED "None");
} else {
printf(KGRN "%s\n\n", dataset_gpu->name);
printf(KNRM "Number of original images: ");
printf(KWHT "%d\n", dataset_gpu->num_original_images);
printf(KNRM "Number of eigenfaces: ");
printf(KWHT "%d\n", dataset_gpu->num_eigenfaces);
printf(KNRM "Number of faces: ");
printf(KWHT "%d\n", dataset_gpu->num_faces);
printf(KNRM "Number of new faces: ");
printf(KWHT "%d\n", dataset_gpu->num_new_faces);
}
printf(KNRM "\n\n===== MENU =====\n\n");
printf("1. Create database\n");
printf("2. Load database\n");
printf("3. Save database to disk\n");
printf("4. Add face(s) to database\n");
printf("5. Identify face\n");
printf("6. Export eigenfaces\n");
printf("7. Reconstruct faces\n");
printf("8. Exit\n");
printf("\nYour choice: ");
}
int get_user_choice()
{
size_t len = 0;
int char_read;
char *user_command;
char_read = getline(&user_command, &len, stdin);
if (char_read == -1) {
PRINT("BUG", "Unexpected error.");
return 0;
}
user_command[char_read - 1] = '\0';
char *p;
int tmp = strtol(user_command, &p, 10);
if (*p != '\0' || (tmp == 0 && errno != 0)) {
PRINT("WARN", "Invalid choice!\n");
return 0;
} else if (tmp < 1 || tmp > 8) {
PRINT("WARN", "Invalid choice!\n");
tmp = 0;
}
free(user_command);
return tmp;
}
void get_user_string(char **s)
{
size_t len = 0;
int char_read;
char_read = getline(s, &len, stdin);
if (char_read == -1) {
PRINT("BUG", "Unexpected error.");
return;
}
(*s)[char_read - 1] = '\0';
}
| b188ea694c2e0fc327931cbd9a46a30fcabea893.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <errno.h>
#include "misc.h"
#include "eigen_cpu.h"
#include "database_cpu.h"
#include "eigen_gpu.h"
#include "database_gpu.h"
#include "load_save_image.h"
void display_menu_cpu(struct DatasetCPU *dataset_cpu);
void display_menu_gpu(struct DatasetGPU *dataset_gpu);
int get_user_choice();
void get_user_string(char **s);
int main(int argc, char **argv)
{
struct DatasetCPU *dataset_cpu = NULL;
struct DatasetGPU *dataset_gpu = NULL;
int use_gpu = 1;
if (argc == 2 && !strcmp(argv[1], "-cpu"))
use_gpu = 0;
int action = 0;
char name[100] = "";
char *path = NULL;
char *dataset_name = NULL;
int tmp;
do {
if (use_gpu)
display_menu_gpu(dataset_gpu);
else
display_menu_cpu(dataset_cpu);
action = get_user_choice();
switch(action) {
case 1:
printf("\nEnter path to a repo containing images: ");
get_user_string(&path);
printf("\nEnter a name for the database: ");
get_user_string(&dataset_name);
if (use_gpu) {
if (dataset_gpu != NULL)
free_dataset_gpu(dataset_gpu);
dataset_gpu = create_dataset_and_compute_all_gpu(path, dataset_name);
if (dataset_gpu)
printf("\nDone! Press any key to continue");
} else {
if (dataset_cpu != NULL)
free_dataset_cpu(dataset_cpu);
dataset_cpu = create_dataset_and_compute_all_cpu(path, dataset_name);
if (dataset_cpu)
printf("\nDone! Press any key to continue");
}
break;
case 2:
printf("\nEnter path to a .dat file: ");
get_user_string(&path);
if (use_gpu) {
if (dataset_gpu != NULL)
free_dataset_gpu(dataset_gpu);
dataset_gpu = load_dataset_gpu(path);
if (dataset_gpu)
printf("\nDone! Press any key to continue");
} else {
if (dataset_cpu != NULL)
free_dataset_cpu(dataset_cpu);
dataset_cpu = load_dataset_cpu(path);
if (dataset_cpu)
printf("\nDone! Press any key to continue");
}
break;
case 3:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
printf("Enter path to a repo in which %s.dat will be saved: ", dataset_gpu->name);
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
printf("Enter path to a repo in which %s.dat will be saved: ", dataset_cpu->name);
}
get_user_string(&path);
if (use_gpu) {
path = (char *)realloc(path, (strlen(path) + strlen(dataset_gpu->name) + 6) * sizeof(char));
TEST_MALLOC(path);
strcat(path, "/");
strcat(path, dataset_gpu->name);
strcat(path, ".dat");
save_dataset_to_disk_gpu(dataset_gpu, path);
printf("\nDone! Press any key to continue");
} else {
path = (char *)realloc(path, (strlen(path) + strlen(dataset_cpu->name) + 6) * sizeof(char));
TEST_MALLOC(path);
strcat(path, "/");
strcat(path, dataset_cpu->name);
strcat(path, ".dat");
save_dataset_to_disk_cpu(dataset_cpu, path);
printf("\nDone! Press any key to continue");
}
break;
case 4:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
}
printf("Enter path to a repo containing new face(s) or path to a single face: ");
get_user_string(&path);
if (use_gpu)
tmp = add_faces_and_compute_coordinates_gpu(dataset_gpu, path);
else
tmp = add_faces_and_compute_coordinates_cpu(dataset_cpu, path);
if (tmp)
printf("\nDone! Press any key to continue");
break;
case 5:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
}
printf("Enter path to a face to identify: ");
get_user_string(&path);
if (use_gpu)
identify_face_gpu(dataset_gpu, path);
else
identify_face_cpu(dataset_cpu, path);
printf("\nDone! Press any key to continue");
break;
case 6:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
}
if (use_gpu) {
for (int i = 0; i < dataset_gpu->num_eigenfaces; i++) {
sprintf(name, "eigen/Eigenface %d.png", i);
save_image_to_disk_gpu(dataset_gpu->d_eigenfaces + dataset_gpu->w * dataset_gpu->h * i, dataset_gpu->w, dataset_gpu->h, name);
}
} else {
for (int i = 0; i < dataset_cpu->num_eigenfaces; i++) {
sprintf(name, "eigen/Eigenface %d.png", i);
save_image_to_disk_cpu(dataset_cpu->eigenfaces[i], name);
}
}
printf("\nDone! Press any key to continue");
break;
case 7:
if (use_gpu) {
if (dataset_gpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
} else {
if (dataset_cpu == NULL) {
PRINT("WARN", "No database is currently loaded!\n");
break;
}
}
if (use_gpu) {
for (int i = 0; i < dataset_gpu->num_faces; i++)
save_reconstructed_face_to_disk_gpu(dataset_gpu, dataset_gpu->faces[i], dataset_gpu->num_eigenfaces);
} else {
for (int i = 0; i < dataset_cpu->num_faces; i++)
save_reconstructed_face_to_disk_cpu(dataset_cpu, dataset_cpu->faces[i], dataset_cpu->num_eigenfaces);
}
printf("\nDone! Press any key to continue");
break;
case 8:
printf("Good bye!\n");
break;
default:
break;
}
getchar();
} while (action != 8);
if (use_gpu)
free_dataset_gpu(dataset_gpu);
else
free_dataset_cpu(dataset_cpu);
free(dataset_name);
free(path);
return EXIT_SUCCESS;
}
void display_menu_cpu(struct DatasetCPU *dataset_cpu)
{
system("clear");
printf("////////////////////////////////////////////////////////////////////////////////\n");
printf("/// FaceIdX - CPU version ///\n");
printf("////////////////////////////////////////////////////////////////////////////////\n\n\n");
printf("Current database: ");
if (dataset_cpu == NULL) {
printf(KRED "None");
} else {
printf(KGRN "%s\n\n", dataset_cpu->name);
printf(KNRM "Number of original images: ");
printf(KWHT "%d\n", dataset_cpu->num_original_images);
printf(KNRM "Number of eigenfaces: ");
printf(KWHT "%d\n", dataset_cpu->num_eigenfaces);
printf(KNRM "Number of faces: ");
printf(KWHT "%d\n", dataset_cpu->num_faces);
printf(KNRM "Number of new faces: ");
printf(KWHT "%d\n", dataset_cpu->num_new_faces);
}
printf(KNRM "\n\n===== MENU =====\n\n");
printf("1. Create database\n");
printf("2. Load database\n");
printf("3. Save database to disk\n");
printf("4. Add face(s) to database\n");
printf("5. Identify face\n");
printf("6. Export eigenfaces\n");
printf("7. Reconstruct faces\n");
printf("8. Exit\n");
printf("\nYour choice: ");
}
void display_menu_gpu(struct DatasetGPU *dataset_gpu)
{
system("clear");
printf("////////////////////////////////////////////////////////////////////////////////\n");
printf("/// FaceIdX - GPU version ///\n");
printf("////////////////////////////////////////////////////////////////////////////////\n\n\n");
printf("Current database: ");
if (dataset_gpu == NULL) {
printf(KRED "None");
} else {
printf(KGRN "%s\n\n", dataset_gpu->name);
printf(KNRM "Number of original images: ");
printf(KWHT "%d\n", dataset_gpu->num_original_images);
printf(KNRM "Number of eigenfaces: ");
printf(KWHT "%d\n", dataset_gpu->num_eigenfaces);
printf(KNRM "Number of faces: ");
printf(KWHT "%d\n", dataset_gpu->num_faces);
printf(KNRM "Number of new faces: ");
printf(KWHT "%d\n", dataset_gpu->num_new_faces);
}
printf(KNRM "\n\n===== MENU =====\n\n");
printf("1. Create database\n");
printf("2. Load database\n");
printf("3. Save database to disk\n");
printf("4. Add face(s) to database\n");
printf("5. Identify face\n");
printf("6. Export eigenfaces\n");
printf("7. Reconstruct faces\n");
printf("8. Exit\n");
printf("\nYour choice: ");
}
int get_user_choice()
{
size_t len = 0;
int char_read;
char *user_command;
char_read = getline(&user_command, &len, stdin);
if (char_read == -1) {
PRINT("BUG", "Unexpected error.");
return 0;
}
user_command[char_read - 1] = '\0';
char *p;
int tmp = strtol(user_command, &p, 10);
if (*p != '\0' || (tmp == 0 && errno != 0)) {
PRINT("WARN", "Invalid choice!\n");
return 0;
} else if (tmp < 1 || tmp > 8) {
PRINT("WARN", "Invalid choice!\n");
tmp = 0;
}
free(user_command);
return tmp;
}
void get_user_string(char **s)
{
size_t len = 0;
int char_read;
char_read = getline(s, &len, stdin);
if (char_read == -1) {
PRINT("BUG", "Unexpected error.");
return;
}
(*s)[char_read - 1] = '\0';
}
|
2ffcc55cccbdaa0ecbf4dbce5d2d05f339b48060.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// reorder operation after all2all in forward propagation
template <typename TypeEmbeddingComp>
__global__ void forward_reorder_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size,
int gpu_num, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output[dst_addr + tid] = input[src_addr + tid];
}
}
}
// reorder operation after all2all in forward propagation
__global__ void forward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const __half *input, __half *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
const __half2 *input2 = reinterpret_cast<const __half2 *>(input);
__half2 *output2 = reinterpret_cast<__half2 *>(output);
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output2[dst_addr + tid] = input2[src_addr + tid];
}
}
}
template <typename TypeEmbeddingComp>
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output, hipStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( forward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const __half *input, __half *output,
hipStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
hipLaunchKernelGGL(( forward_reorder_align2_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output);
} else {
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( forward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
}
} // namespace
/**
* reoder the sequence of data after all2all operation in forward propagation
* @param batch_size_per_gpu batch size per GPU
* @param slot_num the number of localized slots
* @param embedding_vec_size embedding vector size.
* @param src_tensors the source tensors before reorder
* @param dst_tensors the destination tensors after reorder
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device.
*/
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
do_forward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count,
src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(),
local_gpu->get_stream());
}
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR | 2ffcc55cccbdaa0ecbf4dbce5d2d05f339b48060.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// reorder operation after all2all in forward propagation
template <typename TypeEmbeddingComp>
__global__ void forward_reorder_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size,
int gpu_num, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output[dst_addr + tid] = input[src_addr + tid];
}
}
}
// reorder operation after all2all in forward propagation
__global__ void forward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const __half *input, __half *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
const __half2 *input2 = reinterpret_cast<const __half2 *>(input);
__half2 *output2 = reinterpret_cast<__half2 *>(output);
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output2[dst_addr + tid] = input2[src_addr + tid];
}
}
}
template <typename TypeEmbeddingComp>
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output, cudaStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
const size_t block_size = embedding_vec_size;
forward_reorder_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const __half *input, __half *output,
cudaStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
forward_reorder_align2_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output);
} else {
const size_t block_size = embedding_vec_size;
forward_reorder_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
}
} // namespace
/**
* reoder the sequence of data after all2all operation in forward propagation
* @param batch_size_per_gpu batch size per GPU
* @param slot_num the number of localized slots
* @param embedding_vec_size embedding vector size.
* @param src_tensors the source tensors before reorder
* @param dst_tensors the destination tensors after reorder
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device.
*/
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
do_forward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count,
src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(),
local_gpu->get_stream());
}
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR |
55d22d2aac155a7608086d4b5df7a880df3243f4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "solvePDEKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *hPop = NULL;
hipMalloc(&hPop, XSIZE*YSIZE);
float *T_Last = NULL;
hipMalloc(&T_Last, XSIZE*YSIZE);
float *T_New = NULL;
hipMalloc(&T_New, XSIZE*YSIZE);
float *T_Surface = NULL;
hipMalloc(&T_Surface, XSIZE*YSIZE);
float Tw = 1;
float lamda = 1;
float pho = 1;
float ce = 1;
int ny = 1;
float dy = 1;
int nx = 1;
float dx = 1;
float tau = 1;
int tnpts = 1;
int tstep = 1;
float Vcast = 1;
int Section = 1;
float *ccml = NULL;
hipMalloc(&ccml, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
solvePDEKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, hPop,T_Last,T_New,T_Surface,Tw,lamda,pho,ce,ny,dy,nx,dx,tau,tnpts,tstep,Vcast,Section,ccml);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
solvePDEKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, hPop,T_Last,T_New,T_Surface,Tw,lamda,pho,ce,ny,dy,nx,dx,tau,tnpts,tstep,Vcast,Section,ccml);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
solvePDEKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, hPop,T_Last,T_New,T_Surface,Tw,lamda,pho,ce,ny,dy,nx,dx,tau,tnpts,tstep,Vcast,Section,ccml);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 55d22d2aac155a7608086d4b5df7a880df3243f4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "solvePDEKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *hPop = NULL;
cudaMalloc(&hPop, XSIZE*YSIZE);
float *T_Last = NULL;
cudaMalloc(&T_Last, XSIZE*YSIZE);
float *T_New = NULL;
cudaMalloc(&T_New, XSIZE*YSIZE);
float *T_Surface = NULL;
cudaMalloc(&T_Surface, XSIZE*YSIZE);
float Tw = 1;
float lamda = 1;
float pho = 1;
float ce = 1;
int ny = 1;
float dy = 1;
int nx = 1;
float dx = 1;
float tau = 1;
int tnpts = 1;
int tstep = 1;
float Vcast = 1;
int Section = 1;
float *ccml = NULL;
cudaMalloc(&ccml, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
solvePDEKernel<<<gridBlock,threadBlock>>>(hPop,T_Last,T_New,T_Surface,Tw,lamda,pho,ce,ny,dy,nx,dx,tau,tnpts,tstep,Vcast,Section,ccml);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
solvePDEKernel<<<gridBlock,threadBlock>>>(hPop,T_Last,T_New,T_Surface,Tw,lamda,pho,ce,ny,dy,nx,dx,tau,tnpts,tstep,Vcast,Section,ccml);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
solvePDEKernel<<<gridBlock,threadBlock>>>(hPop,T_Last,T_New,T_Surface,Tw,lamda,pho,ce,ny,dy,nx,dx,tau,tnpts,tstep,Vcast,Section,ccml);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7886a0dec936adff7d43852ec5b6d974dd5da302.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert correct index so that each element is calculated by a different thread */
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
/* experiment with different values of N */
/* how large can you make it? */
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice );
hipMemset( d_c, 0, size );
/* launch the kernel on the GPU */
/* insert the correct launch parameters to use 1 block and N threads */
hipLaunchKernelGGL(( add), dim3(1), dim3(N), 0, 0, d_a, d_b, d_c );
/* copy result back to host */
hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
} /* end for */
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| 7886a0dec936adff7d43852ec5b6d974dd5da302.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert correct index so that each element is calculated by a different thread */
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
/* experiment with different values of N */
/* how large can you make it? */
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
cudaMemset( d_c, 0, size );
/* launch the kernel on the GPU */
/* insert the correct launch parameters to use 1 block and N threads */
add<<< 1, N>>>( d_a, d_b, d_c );
/* copy result back to host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
} /* end for */
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
c994f39039362b4df3b95368dc74647650cec569.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "testSparse.h"
#define TREEINDX(xx,yy) ((xx) + (yy)*length - (yy)*((yy)-1)/2)
#define SHACCESS(vec,ind) vec[ind / BLOCKDIM][ind % BLOCKDIM]
#define SHSTORE(sname) sname[NSYMS/BLOCKDIM][BLOCKDIM+1]
//#define SHACCESS(vec,ind) vec[ind % BLOCKDIM][ind / BLOCKDIM]
//#define SHSTORE(sname) sname[BLOCKDIM][BLOCKDIM+1]
//#define SHACCESS(vec,ind) vec[ind]
//#define SHSTORE(sname) sname[NSYMS]
__device__ void findvmax(float SHSTORE(scores), int n, float *outv, int *outi) {
__shared__ float locv[NSYMS/BLOCKDIM];
__shared__ int loci[NSYMS/BLOCKDIM];
float newv;
int newi;
float mymax = SHACCESS(scores, threadIdx.x);
int myi = threadIdx.x;
for (int i = threadIdx.x + blockDim.x; i < n; i += blockDim.x) {
newv = SHACCESS(scores, i);
if (newv > mymax) {
mymax = newv;
myi = i;
}
}
for (int i = 1; i < 32; i *= 2) {
newv = __shfl_down(mymax, i);
newi = __shfl_down(myi, i);
if (newv > mymax) {
mymax = newv;
myi = newi;
}
}
if (threadIdx.x % BLOCKDIM == 0) {
locv[threadIdx.x / BLOCKDIM] = mymax;
loci[threadIdx.x / BLOCKDIM] = myi;
}
__syncthreads();
if (threadIdx.x < blockDim.x/BLOCKDIM) {
mymax = locv[threadIdx.x];
myi = loci[threadIdx.x];
for (int i = 1; i < blockDim.x/BLOCKDIM; i *= 2) {
newv = __shfl_down(mymax, i);
newi = __shfl_down(myi, i);
if (newv > mymax) {
mymax = newv;
myi = newi;
}
}
}
if (threadIdx.x < 1) {
outv[0] = mymax;
outi[0] = myi;
}
__syncthreads();
}
__device__ void findvmax2(float SHSTORE(scores), int n, float *outv, int *outi) {
__shared__ float locv[BLOCKDIM];
__shared__ int loci[BLOCKDIM];
float mymax = 0;
int myi = 0;
if (threadIdx.x < BLOCKDIM) {
for (int i = threadIdx.x; i < n; i += BLOCKDIM) {
if (SHACCESS(scores,i) > mymax) {
mymax = SHACCESS(scores,i);
myi = i;
}
}
locv[threadIdx.x] = mymax;
loci[threadIdx.x] = myi;
}
__syncthreads();
if (threadIdx.x < BLOCKDIM) {
mymax = locv[threadIdx.x];
myi = loci[threadIdx.x];
for (int i = 1; i < 32; i *= 2) {
float newv = __shfl_down(mymax, i);
int newi = __shfl_down(myi, i);
if (newv > mymax) {
mymax = newv;
myi = newi;
}
}
}
if (threadIdx.x < 1) {
outv[0] = mymax;
outi[0] = myi;
}
__syncthreads();
}
__global__ void __testvmax2(float *vec, int n, float *pkvmax, int *pkimax, int nreps) {
for (int irep = 0; irep < nreps; irep++) {
float maxv = vec[threadIdx.x];
int maxi = threadIdx.x;
for (int i = threadIdx.x + 32; i < n; i += 32) {
if (vec[i] > maxv) {
maxv = vec[i];
maxi = i;
}
}
__syncthreads();
for (int i = 1; i < 32; i *= 2) {
float newv = __shfl_down(maxv, i);
int newi = __shfl_down(maxi, i);
if (newv > maxv) {
maxv = newv;
maxi = newi;
}
}
if (threadIdx.x < 1) {
pkvmax[0] = maxv;
pkimax[0] = maxi;
}
}
__syncthreads();
}
__global__ void __testvmax(float *vec, int n, float *pkvmax, int *pkimax, int nreps) {
__shared__ float SHSTORE(vals);
for (int i = threadIdx.x; i < n; i += blockDim.x) {
SHACCESS(vals, i) = vec[i];
}
__syncthreads();
for (int i = 0; i < nreps; i++) {
findvmax(vals, n, pkvmax, pkimax);
}
}
void testvmax(float *vec, float *cvec, int n, int nreps, int iter) {
float vmax, kvmax, *pkvmax;
int imax, kimax, *pkimax;
hipMalloc((void**) &pkvmax, sizeof(float));
hipMalloc((void**) &pkimax, sizeof(int));
// __testvmax<<<1,320>>>(vec, n, pkvmax, pkimax, nreps);
hipLaunchKernelGGL(( __testvmax), dim3(1),dim3(32), 0, 0, vec, n, pkvmax, pkimax, nreps);
hipDeviceSynchronize();
if (iter == 0) {
hipMemcpy(&kvmax, pkvmax, sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&kimax, pkimax, sizeof(int), hipMemcpyDeviceToHost);
vmax = 0;
for (int i = 0; i < n; i++) {
if (cvec[i] > vmax) {
vmax = cvec[i];
imax = i;
}
}
if (imax == kimax && vmax == kvmax) {
printf("\nMax computed correctly\n");
} else {
printf("\nMax=%f(%i), kernel=%f(%i)\n", vmax, imax, kvmax, kimax);
}
}
hipFree(pkvmax);
hipFree(pkimax);
}
__global__ void viterbi(int *lrules, int *rrules, float *bivals, int *bip, int n,
int *unirules, float *univals, int *unip,
float **allbitrees, float **allunitrees, int *lengths,
int **parsetrees, int **nsyms, float **nscores) {
__shared__ float maxval[1];
__shared__ int imaxi[1];
__shared__ int left[1];
__shared__ float SHSTORE(lvals);
__shared__ float SHSTORE(rvals);
__shared__ float SHSTORE(lscores);
__shared__ float SHSTORE(rscores);
float *bitree = allbitrees[blockIdx.x]; // Each block processes one tree, so gather all the data
float *unitree = allunitrees[blockIdx.x]; // for this tree.
int length = lengths[blockIdx.x];
int *parsetree = parsetrees[blockIdx.x];
int *nsym = nsyms[blockIdx.x];
float *nscore = nscores[blockIdx.x];
for (int k = threadIdx.x; k < n; k += blockDim.x) {
SHACCESS(lvals, k) = bitree[k + NSYMS*TREEINDX(0, length-1)]; // Load root node scores
}
__syncthreads();
findvmax(lvals, n, maxval, imaxi); // Find the best score
if (threadIdx.x < 1) { // Initialize root node props
parsetree[0] = 2*length-1;
nsym[0] = imaxi[0];
nscore[0] = maxval[0];
left[0] = 0; // Number of input symbols processed so far/ x coord
}
__syncthreads();
for (int i = 0; i < 2*length-3; i++) {
int x = left[0]; // x coord of current node
int y = (parsetree[i]-i-1)/2; // y coord of current node
int here = nsym[i]; // this node's best symbol
float lmax = 0; // score of best rule, tallied over left children
float rmax = 0; // score of best rule, tallied over right children
int hmax = 0; // height of best left child
int symleft = -1; // best left symbol
int symright = -1; // best right symbol
// float thisc = nscore[i];
for (int k = threadIdx.x; k < n; k += blockDim.x) {
SHACCESS(lvals, k) = unitree[k + NSYMS*TREEINDX(x,y)]; // load pre-unary-rule scores for this node
SHACCESS(lscores, k) = 0;
}
__syncthreads();
for (int k = unip[here] + threadIdx.x; k < unip[here+1]; k += blockIdx.x) { // Compute pre-unary rule scores given root symbol
float tscore = SHACCESS(lvals,unirules[k]) * univals[k];
SHACCESS(lscores, unirules[k]) = max(SHACCESS(lscores, unirules[k]), tscore);
}
__syncthreads();
findvmax(lscores, n, maxval, imaxi); // Find best pre-unary symbol
here = imaxi[0];
// thisc = maxvals[0];
for (int j = 0; j < y; j++) { // Loop over splits for this node, if its not a leaf
for (int k = threadIdx.x; k < n; k += blockDim.x) {
SHACCESS(lvals, k) = bitree[k + NSYMS*TREEINDX(x,j)]; // Load left and right node score for this split
SHACCESS(rvals, k) = bitree[k + NSYMS*TREEINDX(x+1+j, y-1-j)];
SHACCESS(lscores, k) = 0;
SHACCESS(rscores, k) = 0;
}
__syncthreads();
for (int k = bip[here] + threadIdx.x; k < bip[here+1]; k += blockIdx.x) { // Compute left and right scores given root symbol
float tscore = SHACCESS(lvals,lrules[k]) * SHACCESS(rvals,rrules[k]) * bivals[k];
SHACCESS(lscores, lrules[k]) = max(SHACCESS(lscores, lrules[k]), tscore);
SHACCESS(rscores, rrules[k]) = max(SHACCESS(rscores, rrules[k]), tscore);
}
__syncthreads();
findvmax(lscores, n, maxval, imaxi); // Find the best left score and its height
if (threadIdx.x < 1 && maxval[0] > lmax) {
lmax = maxval[0];
hmax = j;
symleft = imaxi[0];
}
__syncthreads();
findvmax(rscores, n, maxval, imaxi); // Find the best (hopefully matching) right score
if (threadIdx.x < 1 && maxval[0] > rmax) {
rmax = maxval[0];
symright = imaxi[0];
}
__syncthreads();
}
if (threadIdx.x < 1) { // Update data with one thread
if (y > 0) { // If we're not at a leaf, process children
int nexti = i+2*hmax+2; // Tree is prefix order, left child is always the next node
parsetree[i+1] = nexti; // parsetree[x] points to next sibling of x, or just beyond own subtree for right siblings
nsym[i+1] = symleft;
nscore[i+1] = lmax;
parsetree[nexti] = parsetree[i]; // Right sibling points just beyond own subtree (same as beyond parent's subtree)
nsym[nexti] = symright;
nscore[nexti] = rmax;
} else { // We processed a leaf, so advance our leaf pointer
left[0] = left[0] + 1;
}
}
__syncthreads();
}
}
| c994f39039362b4df3b95368dc74647650cec569.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "testSparse.h"
#define TREEINDX(xx,yy) ((xx) + (yy)*length - (yy)*((yy)-1)/2)
#define SHACCESS(vec,ind) vec[ind / BLOCKDIM][ind % BLOCKDIM]
#define SHSTORE(sname) sname[NSYMS/BLOCKDIM][BLOCKDIM+1]
//#define SHACCESS(vec,ind) vec[ind % BLOCKDIM][ind / BLOCKDIM]
//#define SHSTORE(sname) sname[BLOCKDIM][BLOCKDIM+1]
//#define SHACCESS(vec,ind) vec[ind]
//#define SHSTORE(sname) sname[NSYMS]
__device__ void findvmax(float SHSTORE(scores), int n, float *outv, int *outi) {
__shared__ float locv[NSYMS/BLOCKDIM];
__shared__ int loci[NSYMS/BLOCKDIM];
float newv;
int newi;
float mymax = SHACCESS(scores, threadIdx.x);
int myi = threadIdx.x;
for (int i = threadIdx.x + blockDim.x; i < n; i += blockDim.x) {
newv = SHACCESS(scores, i);
if (newv > mymax) {
mymax = newv;
myi = i;
}
}
for (int i = 1; i < 32; i *= 2) {
newv = __shfl_down(mymax, i);
newi = __shfl_down(myi, i);
if (newv > mymax) {
mymax = newv;
myi = newi;
}
}
if (threadIdx.x % BLOCKDIM == 0) {
locv[threadIdx.x / BLOCKDIM] = mymax;
loci[threadIdx.x / BLOCKDIM] = myi;
}
__syncthreads();
if (threadIdx.x < blockDim.x/BLOCKDIM) {
mymax = locv[threadIdx.x];
myi = loci[threadIdx.x];
for (int i = 1; i < blockDim.x/BLOCKDIM; i *= 2) {
newv = __shfl_down(mymax, i);
newi = __shfl_down(myi, i);
if (newv > mymax) {
mymax = newv;
myi = newi;
}
}
}
if (threadIdx.x < 1) {
outv[0] = mymax;
outi[0] = myi;
}
__syncthreads();
}
__device__ void findvmax2(float SHSTORE(scores), int n, float *outv, int *outi) {
__shared__ float locv[BLOCKDIM];
__shared__ int loci[BLOCKDIM];
float mymax = 0;
int myi = 0;
if (threadIdx.x < BLOCKDIM) {
for (int i = threadIdx.x; i < n; i += BLOCKDIM) {
if (SHACCESS(scores,i) > mymax) {
mymax = SHACCESS(scores,i);
myi = i;
}
}
locv[threadIdx.x] = mymax;
loci[threadIdx.x] = myi;
}
__syncthreads();
if (threadIdx.x < BLOCKDIM) {
mymax = locv[threadIdx.x];
myi = loci[threadIdx.x];
for (int i = 1; i < 32; i *= 2) {
float newv = __shfl_down(mymax, i);
int newi = __shfl_down(myi, i);
if (newv > mymax) {
mymax = newv;
myi = newi;
}
}
}
if (threadIdx.x < 1) {
outv[0] = mymax;
outi[0] = myi;
}
__syncthreads();
}
__global__ void __testvmax2(float *vec, int n, float *pkvmax, int *pkimax, int nreps) {
for (int irep = 0; irep < nreps; irep++) {
float maxv = vec[threadIdx.x];
int maxi = threadIdx.x;
for (int i = threadIdx.x + 32; i < n; i += 32) {
if (vec[i] > maxv) {
maxv = vec[i];
maxi = i;
}
}
__syncthreads();
for (int i = 1; i < 32; i *= 2) {
float newv = __shfl_down(maxv, i);
int newi = __shfl_down(maxi, i);
if (newv > maxv) {
maxv = newv;
maxi = newi;
}
}
if (threadIdx.x < 1) {
pkvmax[0] = maxv;
pkimax[0] = maxi;
}
}
__syncthreads();
}
__global__ void __testvmax(float *vec, int n, float *pkvmax, int *pkimax, int nreps) {
__shared__ float SHSTORE(vals);
for (int i = threadIdx.x; i < n; i += blockDim.x) {
SHACCESS(vals, i) = vec[i];
}
__syncthreads();
for (int i = 0; i < nreps; i++) {
findvmax(vals, n, pkvmax, pkimax);
}
}
void testvmax(float *vec, float *cvec, int n, int nreps, int iter) {
float vmax, kvmax, *pkvmax;
int imax, kimax, *pkimax;
cudaMalloc((void**) &pkvmax, sizeof(float));
cudaMalloc((void**) &pkimax, sizeof(int));
// __testvmax<<<1,320>>>(vec, n, pkvmax, pkimax, nreps);
__testvmax<<<1,32>>>(vec, n, pkvmax, pkimax, nreps);
cudaDeviceSynchronize();
if (iter == 0) {
cudaMemcpy(&kvmax, pkvmax, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&kimax, pkimax, sizeof(int), cudaMemcpyDeviceToHost);
vmax = 0;
for (int i = 0; i < n; i++) {
if (cvec[i] > vmax) {
vmax = cvec[i];
imax = i;
}
}
if (imax == kimax && vmax == kvmax) {
printf("\nMax computed correctly\n");
} else {
printf("\nMax=%f(%i), kernel=%f(%i)\n", vmax, imax, kvmax, kimax);
}
}
cudaFree(pkvmax);
cudaFree(pkimax);
}
__global__ void viterbi(int *lrules, int *rrules, float *bivals, int *bip, int n,
int *unirules, float *univals, int *unip,
float **allbitrees, float **allunitrees, int *lengths,
int **parsetrees, int **nsyms, float **nscores) {
__shared__ float maxval[1];
__shared__ int imaxi[1];
__shared__ int left[1];
__shared__ float SHSTORE(lvals);
__shared__ float SHSTORE(rvals);
__shared__ float SHSTORE(lscores);
__shared__ float SHSTORE(rscores);
float *bitree = allbitrees[blockIdx.x]; // Each block processes one tree, so gather all the data
float *unitree = allunitrees[blockIdx.x]; // for this tree.
int length = lengths[blockIdx.x];
int *parsetree = parsetrees[blockIdx.x];
int *nsym = nsyms[blockIdx.x];
float *nscore = nscores[blockIdx.x];
for (int k = threadIdx.x; k < n; k += blockDim.x) {
SHACCESS(lvals, k) = bitree[k + NSYMS*TREEINDX(0, length-1)]; // Load root node scores
}
__syncthreads();
findvmax(lvals, n, maxval, imaxi); // Find the best score
if (threadIdx.x < 1) { // Initialize root node props
parsetree[0] = 2*length-1;
nsym[0] = imaxi[0];
nscore[0] = maxval[0];
left[0] = 0; // Number of input symbols processed so far/ x coord
}
__syncthreads();
for (int i = 0; i < 2*length-3; i++) {
int x = left[0]; // x coord of current node
int y = (parsetree[i]-i-1)/2; // y coord of current node
int here = nsym[i]; // this node's best symbol
float lmax = 0; // score of best rule, tallied over left children
float rmax = 0; // score of best rule, tallied over right children
int hmax = 0; // height of best left child
int symleft = -1; // best left symbol
int symright = -1; // best right symbol
// float thisc = nscore[i];
for (int k = threadIdx.x; k < n; k += blockDim.x) {
SHACCESS(lvals, k) = unitree[k + NSYMS*TREEINDX(x,y)]; // load pre-unary-rule scores for this node
SHACCESS(lscores, k) = 0;
}
__syncthreads();
for (int k = unip[here] + threadIdx.x; k < unip[here+1]; k += blockIdx.x) { // Compute pre-unary rule scores given root symbol
float tscore = SHACCESS(lvals,unirules[k]) * univals[k];
SHACCESS(lscores, unirules[k]) = max(SHACCESS(lscores, unirules[k]), tscore);
}
__syncthreads();
findvmax(lscores, n, maxval, imaxi); // Find best pre-unary symbol
here = imaxi[0];
// thisc = maxvals[0];
for (int j = 0; j < y; j++) { // Loop over splits for this node, if its not a leaf
for (int k = threadIdx.x; k < n; k += blockDim.x) {
SHACCESS(lvals, k) = bitree[k + NSYMS*TREEINDX(x,j)]; // Load left and right node score for this split
SHACCESS(rvals, k) = bitree[k + NSYMS*TREEINDX(x+1+j, y-1-j)];
SHACCESS(lscores, k) = 0;
SHACCESS(rscores, k) = 0;
}
__syncthreads();
for (int k = bip[here] + threadIdx.x; k < bip[here+1]; k += blockIdx.x) { // Compute left and right scores given root symbol
float tscore = SHACCESS(lvals,lrules[k]) * SHACCESS(rvals,rrules[k]) * bivals[k];
SHACCESS(lscores, lrules[k]) = max(SHACCESS(lscores, lrules[k]), tscore);
SHACCESS(rscores, rrules[k]) = max(SHACCESS(rscores, rrules[k]), tscore);
}
__syncthreads();
findvmax(lscores, n, maxval, imaxi); // Find the best left score and its height
if (threadIdx.x < 1 && maxval[0] > lmax) {
lmax = maxval[0];
hmax = j;
symleft = imaxi[0];
}
__syncthreads();
findvmax(rscores, n, maxval, imaxi); // Find the best (hopefully matching) right score
if (threadIdx.x < 1 && maxval[0] > rmax) {
rmax = maxval[0];
symright = imaxi[0];
}
__syncthreads();
}
if (threadIdx.x < 1) { // Update data with one thread
if (y > 0) { // If we're not at a leaf, process children
int nexti = i+2*hmax+2; // Tree is prefix order, left child is always the next node
parsetree[i+1] = nexti; // parsetree[x] points to next sibling of x, or just beyond own subtree for right siblings
nsym[i+1] = symleft;
nscore[i+1] = lmax;
parsetree[nexti] = parsetree[i]; // Right sibling points just beyond own subtree (same as beyond parent's subtree)
nsym[nexti] = symright;
nscore[nexti] = rmax;
} else { // We processed a leaf, so advance our leaf pointer
left[0] = left[0] + 1;
}
}
__syncthreads();
}
}
|
07713797c4fab1a55f037a47d882327e0ad4142b.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 07713797c4fab1a55f037a47d882327e0ad4142b.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
c598da502d5efda8329db7361270eb703e8294b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void matAddKernel(float *A, float *B, float *C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x, j;
if(i < n){
for(j = 0; j < n; j++){
C[i+j*n] = A[i+j*n] + B[i+j*n];
}
}
}
void matAdd(float* A, float* B, float* C, int n){
int size = n*n*sizeof(float);
float *d_A, *d_B, *d_C;
hipMalloc((void **) &d_A, size);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
hipMalloc((void **) &d_C, size);
hipLaunchKernelGGL(( matAddKernel), dim3(ceil(n/256.0)), dim3(256), 0, 0, d_A,d_B,d_C,n);
hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
hipFree(d_A); hipFree(d_B); hipFree(d_C);
}
int main(){
int n,i,j;
float *h_A,*h_B,*h_C;
scanf("%d", &n);
h_A = (float*) malloc(n*n*sizeof(float));
h_B = (float*) malloc(n*n*sizeof(float));
h_C = (float*) malloc(n*n*sizeof(float));
for(i = 0; i < n; i++){
//scanf("%f", &h_A[i]);
for(j = 0; j < n; j++)
h_A[i*n+j] = 1;
}
for(i = 0; i < n; i++){
//scanf("%f", &h_B[i]);
for(j = 0; j < n; j++)
h_B[i*n+j] = 1;
}
matAdd(h_A,h_B,h_C,n);
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
printf("%f ", h_C[i*n+j]);
}
printf("\n");
}
printf("\n");
return 0;
}
| c598da502d5efda8329db7361270eb703e8294b8.cu | #include <stdio.h>
__global__
void matAddKernel(float *A, float *B, float *C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x, j;
if(i < n){
for(j = 0; j < n; j++){
C[i+j*n] = A[i+j*n] + B[i+j*n];
}
}
}
void matAdd(float* A, float* B, float* C, int n){
int size = n*n*sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
matAddKernel<<<ceil(n/256.0), 256>>>(d_A,d_B,d_C,n);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
int main(){
int n,i,j;
float *h_A,*h_B,*h_C;
scanf("%d", &n);
h_A = (float*) malloc(n*n*sizeof(float));
h_B = (float*) malloc(n*n*sizeof(float));
h_C = (float*) malloc(n*n*sizeof(float));
for(i = 0; i < n; i++){
//scanf("%f", &h_A[i]);
for(j = 0; j < n; j++)
h_A[i*n+j] = 1;
}
for(i = 0; i < n; i++){
//scanf("%f", &h_B[i]);
for(j = 0; j < n; j++)
h_B[i*n+j] = 1;
}
matAdd(h_A,h_B,h_C,n);
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
printf("%f ", h_C[i*n+j]);
}
printf("\n");
}
printf("\n");
return 0;
}
|
Floyd-sh.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019 Microsoft Corporation. All rights reserved.
*
* Please refer to the Microsoft end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Floyd-Warshall with shared memory optimization
*/
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "cuda/inc/Floyd.cuh"
#include "inc/test.h"
void Floyd_Warshall_Shared(const std::shared_ptr<int[]>& matrix, const std::shared_ptr<int[]>& path,
const unsigned size, float* time)
{
hipEvent_t start, stop;
const int memSize = sizeof(int)*size*size;
// Initialize CUDA GPU Timers
hipEventCreate(&start);
hipEventCreate(&stop);
// Start CUDA Timer
hipEventRecord(start, nullptr);
// custom deleter as stateless lambda function
const auto deleter = [&](int* ptr) { hipFree(ptr); };
// Allocate GPU device arrays
std::unique_ptr<int[], decltype(deleter)> matrixOnGPU(new int[memSize], deleter);
hipMallocManaged(reinterpret_cast<void **>(&matrixOnGPU), memSize);
std::unique_ptr<int[], decltype(deleter)> pathOnGPU(new int[memSize], deleter);
hipMallocManaged(reinterpret_cast<void **>(&pathOnGPU), memSize);
// Copy the host data into device arrays
hipMemcpy(matrixOnGPU.get(), matrix.get(), memSize, hipMemcpyHostToDevice);
hipMemcpy(pathOnGPU.get(), path.get(), memSize, hipMemcpyHostToDevice);
// It is very important to synchronize between GPU and CPU data transfers
hipDeviceSynchronize();
// dimension
dim3 thread_per_block(SH_TILE_HEIGHT, SH_TILE_WIDTH);
dim3 num_block(static_cast<unsigned int>(ceil(1.0 * size / thread_per_block.x)),
static_cast<unsigned int>(ceil(1.0 * size / thread_per_block.y)));
// run kernel
for (unsigned int k = 0; k < size; ++k)
hipLaunchKernelGGL(( cudaKernel_shared) , dim3(num_block), dim3(thread_per_block) , 0, 0, matrixOnGPU.get(), pathOnGPU.get(), size, k);
// get result back
hipMemcpy(matrix.get(), matrixOnGPU.get(), memSize, hipMemcpyDeviceToHost);
hipMemcpy(path.get(), pathOnGPU.get(), memSize, hipMemcpyDeviceToHost);
// Stop CUDA Timer
hipEventRecord(stop, nullptr);
//Synchronize GPU with CPU
hipEventSynchronize(stop);
// Read the elapsed time and release memory
hipEventElapsedTime(time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
// Clean up
hipFree(matrixOnGPU.get());
hipFree(pathOnGPU.get());
hipDeviceReset();
}
__global__ void cudaKernel_shared(int *matrix, int* path, int size, int k)
{
//define shared memory arrays
__shared__ int cost_i_k[SH_TILE_HEIGHT];
__shared__ int cost_k_j[SH_TILE_WIDTH];
// compute indexes
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
// calculate Floyd Warshall algorithm
if (i < size && j < size) {
const int cost_i_j = matrix[i*size + j];
if (i % SH_TILE_HEIGHT == 0) {
cost_k_j[j % SH_TILE_WIDTH] = matrix[k*size + j];
}
if (j % SH_TILE_WIDTH == 0) {
cost_i_k[i % SH_TILE_WIDTH] = matrix[i*size + k];
}
__syncthreads();
if (cost_i_k[i % SH_TILE_HEIGHT] != INF && cost_k_j[j % SH_TILE_WIDTH] != INF) {
const int sum = cost_i_k[i % SH_TILE_HEIGHT] + cost_k_j[j % SH_TILE_WIDTH];
if (cost_i_j == INF || sum < cost_i_j) {
matrix[i*size + j] = sum;
path[i*size + j] = path[k*size + j];
}
}
}
}
| Floyd-sh.cu | /**
* Copyright 2019 Microsoft Corporation. All rights reserved.
*
* Please refer to the Microsoft end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Floyd-Warshall with shared memory optimization
*/
#include "cuda.h"
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "cuda/inc/Floyd.cuh"
#include "inc/test.h"
void Floyd_Warshall_Shared(const std::shared_ptr<int[]>& matrix, const std::shared_ptr<int[]>& path,
const unsigned size, float* time)
{
cudaEvent_t start, stop;
const int memSize = sizeof(int)*size*size;
// Initialize CUDA GPU Timers
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Start CUDA Timer
cudaEventRecord(start, nullptr);
// custom deleter as stateless lambda function
const auto deleter = [&](int* ptr) { cudaFree(ptr); };
// Allocate GPU device arrays
std::unique_ptr<int[], decltype(deleter)> matrixOnGPU(new int[memSize], deleter);
cudaMallocManaged(reinterpret_cast<void **>(&matrixOnGPU), memSize);
std::unique_ptr<int[], decltype(deleter)> pathOnGPU(new int[memSize], deleter);
cudaMallocManaged(reinterpret_cast<void **>(&pathOnGPU), memSize);
// Copy the host data into device arrays
cudaMemcpy(matrixOnGPU.get(), matrix.get(), memSize, cudaMemcpyHostToDevice);
cudaMemcpy(pathOnGPU.get(), path.get(), memSize, cudaMemcpyHostToDevice);
// It is very important to synchronize between GPU and CPU data transfers
cudaDeviceSynchronize();
// dimension
dim3 thread_per_block(SH_TILE_HEIGHT, SH_TILE_WIDTH);
dim3 num_block(static_cast<unsigned int>(ceil(1.0 * size / thread_per_block.x)),
static_cast<unsigned int>(ceil(1.0 * size / thread_per_block.y)));
// run kernel
for (unsigned int k = 0; k < size; ++k)
cudaKernel_shared <<< num_block, thread_per_block >>> (matrixOnGPU.get(), pathOnGPU.get(), size, k);
// get result back
cudaMemcpy(matrix.get(), matrixOnGPU.get(), memSize, cudaMemcpyDeviceToHost);
cudaMemcpy(path.get(), pathOnGPU.get(), memSize, cudaMemcpyDeviceToHost);
// Stop CUDA Timer
cudaEventRecord(stop, nullptr);
//Synchronize GPU with CPU
cudaEventSynchronize(stop);
// Read the elapsed time and release memory
cudaEventElapsedTime(time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Clean up
cudaFree(matrixOnGPU.get());
cudaFree(pathOnGPU.get());
cudaDeviceReset();
}
__global__ void cudaKernel_shared(int *matrix, int* path, int size, int k)
{
//define shared memory arrays
__shared__ int cost_i_k[SH_TILE_HEIGHT];
__shared__ int cost_k_j[SH_TILE_WIDTH];
// compute indexes
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
// calculate Floyd Warshall algorithm
if (i < size && j < size) {
const int cost_i_j = matrix[i*size + j];
if (i % SH_TILE_HEIGHT == 0) {
cost_k_j[j % SH_TILE_WIDTH] = matrix[k*size + j];
}
if (j % SH_TILE_WIDTH == 0) {
cost_i_k[i % SH_TILE_WIDTH] = matrix[i*size + k];
}
__syncthreads();
if (cost_i_k[i % SH_TILE_HEIGHT] != INF && cost_k_j[j % SH_TILE_WIDTH] != INF) {
const int sum = cost_i_k[i % SH_TILE_HEIGHT] + cost_k_j[j % SH_TILE_WIDTH];
if (cost_i_j == INF || sum < cost_i_j) {
matrix[i*size + j] = sum;
path[i*size + j] = path[k*size + j];
}
}
}
}
|
7e42159df92e25227f5a6e04c203182b8712bfd9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <stdio.h>
#define Size 122
#define N 100
#define thread_size 10
// thread_index calculation
__global__ void thread_index_test(int *a) {
//int id = threadIdx.x;
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id < Size) {
a[id] = threadIdx.x;
}
}
int main(void) {
int A[N][N] = { { 1,2 },{ 3,4 } };
int B[N][N] = { { 5,6 },{ 7,8 } };
int C[N][N] = { { 0,0 },{ 0,0 } };
int *h_a;
int *d_a;
h_a = (int *)malloc(sizeof(int)*Size);
hipMalloc((void **)&d_a, sizeof(int)*Size);
for (int i = 0; i < Size; i++) {
h_a[i] = -1;
}
dim3 grid(Size, Size);
hipMemcpy(d_a, h_a, sizeof(int)*Size, hipMemcpyHostToDevice);
// kernel i aryoruz...
thread_index_test << <(Size + thread_size - 1) / thread_size, thread_size >> >(d_a);
hipMemcpy(h_a, d_a, sizeof(int)*Size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for (int i = 0; i < Size; i++) {
printf("%d = %d\n", i, h_a[i]);
}
hipFree(d_a);
free(h_a);
return 0;
}
| 7e42159df92e25227f5a6e04c203182b8712bfd9.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <stdio.h>
#define Size 122
#define N 100
#define thread_size 10
// thread_index calculation
__global__ void thread_index_test(int *a) {
//int id = threadIdx.x;
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id < Size) {
a[id] = threadIdx.x;
}
}
int main(void) {
int A[N][N] = { { 1,2 },{ 3,4 } };
int B[N][N] = { { 5,6 },{ 7,8 } };
int C[N][N] = { { 0,0 },{ 0,0 } };
int *h_a;
int *d_a;
h_a = (int *)malloc(sizeof(int)*Size);
cudaMalloc((void **)&d_a, sizeof(int)*Size);
for (int i = 0; i < Size; i++) {
h_a[i] = -1;
}
dim3 grid(Size, Size);
cudaMemcpy(d_a, h_a, sizeof(int)*Size, cudaMemcpyHostToDevice);
// kernel i çağırıyoruz...
thread_index_test << <(Size + thread_size - 1) / thread_size, thread_size >> >(d_a);
cudaMemcpy(h_a, d_a, sizeof(int)*Size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for (int i = 0; i < Size; i++) {
printf("%d = %d\n", i, h_a[i]);
}
cudaFree(d_a);
free(h_a);
return 0;
}
|
c73604d85c5cf852acf4a216a801090c7e575aa3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void getNearest(int b,int n,float * xyz,int m,float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
#include <cstdio>
#include <time.h>
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
void getNearestLauncher(int b,int n,float * xyz,int m,float * xyz2,float * result,int * result_i){
float * xyz_g;
float * xyz2_g;
float * result_g;
int * result_i_g;
double t0=get_time();
hipMalloc(&xyz_g,b*n*3*4);
hipMalloc(&xyz2_g,b*m*3*4);
hipMalloc(&result_g,b*n*4);
hipMalloc(&result_i_g,b*n*4);
hipMemcpy(xyz_g,xyz,b*n*3*4,hipMemcpyHostToDevice);
hipMemcpy(xyz2_g,xyz2,b*m*3*4,hipMemcpyHostToDevice);
double t1=get_time();
hipLaunchKernelGGL(( getNearest), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,n,xyz_g,m,xyz2_g,result_g,result_i_g);
hipDeviceSynchronize();
double t2=get_time();
hipMemcpy(result,result_g,b*n*4,hipMemcpyDeviceToHost);
hipMemcpy(result_i,result_i_g,b*n*4,hipMemcpyDeviceToHost);
hipFree(xyz_g);
hipFree(xyz2_g);
hipFree(result_g);
hipFree(result_i_g);
double t3=get_time();
printf("time %f %f %f %f\n",t3-t0,t1-t0,t2-t1,t3-t2);
}
void getNearestLauncher2(int b,int n,float * xyz,int m,float * xyz2,float * result,int * result_i,float * result2,int * result2_i){
float * xyz_g;
float * xyz2_g;
float * result_g;
int * result_i_g;
float * result2_g;
int * result2_i_g;
double t0=get_time();
hipMalloc(&xyz_g,b*n*3*4);
hipMalloc(&xyz2_g,b*m*3*4);
hipMalloc(&result_g,b*n*4);
hipMalloc(&result_i_g,b*n*4);
hipMalloc(&result2_g,b*n*4);
hipMalloc(&result2_i_g,b*n*4);
hipMemcpy(xyz_g,xyz,b*n*3*4,hipMemcpyHostToDevice);
hipMemcpy(xyz2_g,xyz2,b*m*3*4,hipMemcpyHostToDevice);
double t1=get_time();
hipLaunchKernelGGL(( getNearest), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,n,xyz_g,m,xyz2_g,result_g,result_i_g);
hipLaunchKernelGGL(( getNearest), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,m,xyz2_g,n,xyz_g,result2_g,result2_i_g);
hipDeviceSynchronize();
double t2=get_time();
hipMemcpy(result,result_g,b*n*4,hipMemcpyDeviceToHost);
hipMemcpy(result_i,result_i_g,b*n*4,hipMemcpyDeviceToHost);
hipMemcpy(result2,result2_g,b*m*4,hipMemcpyDeviceToHost);
hipMemcpy(result2_i,result2_i_g,b*m*4,hipMemcpyDeviceToHost);
hipFree(xyz_g);
hipFree(xyz2_g);
hipFree(result_g);
hipFree(result_i_g);
hipFree(result2_g);
hipFree(result2_i_g);
double t3=get_time();
printf("time %f %f %f %f\n",t3-t0,t1-t0,t2-t1,t3-t2);
}
| c73604d85c5cf852acf4a216a801090c7e575aa3.cu | __global__ void getNearest(int b,int n,float * xyz,int m,float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
#include <cstdio>
#include <time.h>
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
void getNearestLauncher(int b,int n,float * xyz,int m,float * xyz2,float * result,int * result_i){
float * xyz_g;
float * xyz2_g;
float * result_g;
int * result_i_g;
double t0=get_time();
cudaMalloc(&xyz_g,b*n*3*4);
cudaMalloc(&xyz2_g,b*m*3*4);
cudaMalloc(&result_g,b*n*4);
cudaMalloc(&result_i_g,b*n*4);
cudaMemcpy(xyz_g,xyz,b*n*3*4,cudaMemcpyHostToDevice);
cudaMemcpy(xyz2_g,xyz2,b*m*3*4,cudaMemcpyHostToDevice);
double t1=get_time();
getNearest<<<dim3(32,16,1),512>>>(b,n,xyz_g,m,xyz2_g,result_g,result_i_g);
cudaDeviceSynchronize();
double t2=get_time();
cudaMemcpy(result,result_g,b*n*4,cudaMemcpyDeviceToHost);
cudaMemcpy(result_i,result_i_g,b*n*4,cudaMemcpyDeviceToHost);
cudaFree(xyz_g);
cudaFree(xyz2_g);
cudaFree(result_g);
cudaFree(result_i_g);
double t3=get_time();
printf("time %f %f %f %f\n",t3-t0,t1-t0,t2-t1,t3-t2);
}
void getNearestLauncher2(int b,int n,float * xyz,int m,float * xyz2,float * result,int * result_i,float * result2,int * result2_i){
float * xyz_g;
float * xyz2_g;
float * result_g;
int * result_i_g;
float * result2_g;
int * result2_i_g;
double t0=get_time();
cudaMalloc(&xyz_g,b*n*3*4);
cudaMalloc(&xyz2_g,b*m*3*4);
cudaMalloc(&result_g,b*n*4);
cudaMalloc(&result_i_g,b*n*4);
cudaMalloc(&result2_g,b*n*4);
cudaMalloc(&result2_i_g,b*n*4);
cudaMemcpy(xyz_g,xyz,b*n*3*4,cudaMemcpyHostToDevice);
cudaMemcpy(xyz2_g,xyz2,b*m*3*4,cudaMemcpyHostToDevice);
double t1=get_time();
getNearest<<<dim3(32,16,1),512>>>(b,n,xyz_g,m,xyz2_g,result_g,result_i_g);
getNearest<<<dim3(32,16,1),512>>>(b,m,xyz2_g,n,xyz_g,result2_g,result2_i_g);
cudaDeviceSynchronize();
double t2=get_time();
cudaMemcpy(result,result_g,b*n*4,cudaMemcpyDeviceToHost);
cudaMemcpy(result_i,result_i_g,b*n*4,cudaMemcpyDeviceToHost);
cudaMemcpy(result2,result2_g,b*m*4,cudaMemcpyDeviceToHost);
cudaMemcpy(result2_i,result2_i_g,b*m*4,cudaMemcpyDeviceToHost);
cudaFree(xyz_g);
cudaFree(xyz2_g);
cudaFree(result_g);
cudaFree(result_i_g);
cudaFree(result2_g);
cudaFree(result2_i_g);
double t3=get_time();
printf("time %f %f %f %f\n",t3-t0,t1-t0,t2-t1,t3-t2);
}
|
ac66ab57e7ff0717c6314ff622bc6a20e020bef0.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* pageableMemcpyHtoD16Blocking.cu
*
* Microdemo that illustrates the importance of correct
* synchronization. Identical to pageableMemcpyHtoD16.cu
* except the event synchronize has been removed.
*
* A pair of pinned staging buffers are allocated, and after the first
* staging buffer has been filled, the GPU pulls from one while the
* CPU fills the other. CUDA events are used for synchronization.
*
* This implementation uses the SSE-optimized memcpy of memcpy16.cpp,
* so for simplicity, it requires host pointers to be 16-byte aligned.
*
* Build with: nvcc -I ../chLib <options> pageableMemcpyHtoD16Blocking.cu memcpy16.cpp
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include "chError.h"
#include "chTimer.h"
#define STAGING_BUFFER_SIZE 1048576
void *g_hostBuffers[2];
hipEvent_t g_events[2];
// these are already defined on some platforms - make our
// own definitions that will work.
#undef min
#undef max
#define min(a,b) ((a)<(b)?(a):(b))
#define max(a,b) ((b)<(a)?(a):(b))
extern bool memcpy16( void *_dst, const void *_src, size_t N );
void
chMemcpyHtoD( void *device, const void *host, size_t N )
{
hipError_t status;
char *dst = (char *) device;
const char *src = (const char *) host;
int stagingIndex = 0;
while ( N ) {
size_t thisCopySize = min( N, STAGING_BUFFER_SIZE );
cuda(EventSynchronize( g_events[stagingIndex] ) );
memcpy16( g_hostBuffers[stagingIndex], src, thisCopySize );
cuda(MemcpyAsync( dst, g_hostBuffers[stagingIndex], thisCopySize,
hipMemcpyHostToDevice, NULL ) );
cuda(EventRecord( g_events[1-stagingIndex], NULL ) );
dst += thisCopySize;
src += thisCopySize;
N -= thisCopySize;
stagingIndex = 1 - stagingIndex;
}
Error:
return;
}
bool
TestMemcpy( int *dstDevice, int *srcHost, const int *srcOriginal,
size_t dstOffset, size_t srcOffset, size_t numInts )
{
chMemcpyHtoD( dstDevice+dstOffset, srcOriginal+srcOffset, numInts*sizeof(int) );
hipMemcpy( srcHost, dstDevice+dstOffset, numInts*sizeof(int), hipMemcpyDeviceToHost );
for ( size_t i = 0; i < numInts; i++ ) {
if ( srcHost[i] != srcOriginal[srcOffset+i] ) {
return false;
}
}
return true;
}
int
main( int argc, char *argv[] )
{
hipError_t status;
int *deviceInt = 0;
int *hostInt = 0;
const size_t numInts = 32*1048576;
const int cIterations = 10;
int *testVector = 0;
printf( "Pageable memcpy (16-byte aligned)... " ); fflush( stdout );
chTimerTimestamp start, stop;
cuda(HostAlloc( &g_hostBuffers[0], STAGING_BUFFER_SIZE, hipHostMallocDefault ) );
cuda(HostAlloc( &g_hostBuffers[1], STAGING_BUFFER_SIZE, hipHostMallocDefault ) );
cuda(EventCreate( &g_events[0], hipEventBlockingSync ) );
cuda(EventRecord( g_events[0], 0 ) ); // so it is signaled on first synchronize
cuda(EventCreate( &g_events[1], hipEventBlockingSync ) );
cuda(EventRecord( g_events[1], 0 ) ); // so it is signaled on first synchronize
cuda(Malloc( &deviceInt, numInts*sizeof(int) ) );
cuda(HostAlloc( &hostInt, numInts*sizeof(int), 0 ) );
testVector = (int *) malloc( numInts*sizeof(int) );
if ( ! testVector ) {
printf( "malloc() failed\n" );
return 1;
}
for ( size_t i = 0; i < numInts; i++ ) {
testVector[i] = rand();
}
if ( ! TestMemcpy( deviceInt, hostInt, testVector, 0, 0, numInts ) ) {
goto Error;
}
for ( int i = 0; i < cIterations; i++ ) {
size_t numInts4 = numInts / 4;
size_t dstOffset = rand() % (numInts4-1);
size_t srcOffset = rand() % (numInts4-1);
size_t intsThisIteration = 1 + rand() % (numInts4-max(dstOffset,srcOffset)-1);
dstOffset *= 4;
srcOffset *= 4;
intsThisIteration *= 4;
if ( ! TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ) ) {
TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration );
goto Error;
}
}
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
chMemcpyHtoD( deviceInt, testVector, numInts*sizeof(int) ) ;
}
cuda(DeviceSynchronize() );
chTimerGetTime( &stop );
{
double MBytes = cIterations*numInts*sizeof(int) / 1048576.0;
double MBpers = MBytes / chTimerElapsedTime( &start, &stop );
printf( "%.2f MB/s\n", MBpers );
}
hipFree( deviceInt );
hipHostFree( hostInt );
return 0;
Error:
printf( "Error\n" );
return 1;
}
| ac66ab57e7ff0717c6314ff622bc6a20e020bef0.cu | /*
*
* pageableMemcpyHtoD16Blocking.cu
*
* Microdemo that illustrates the importance of correct
* synchronization. Identical to pageableMemcpyHtoD16.cu
* except the event synchronize has been removed.
*
* A pair of pinned staging buffers are allocated, and after the first
* staging buffer has been filled, the GPU pulls from one while the
* CPU fills the other. CUDA events are used for synchronization.
*
* This implementation uses the SSE-optimized memcpy of memcpy16.cpp,
* so for simplicity, it requires host pointers to be 16-byte aligned.
*
* Build with: nvcc -I ../chLib <options> pageableMemcpyHtoD16Blocking.cu memcpy16.cpp
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include "chError.h"
#include "chTimer.h"
#define STAGING_BUFFER_SIZE 1048576
void *g_hostBuffers[2];
cudaEvent_t g_events[2];
// these are already defined on some platforms - make our
// own definitions that will work.
#undef min
#undef max
#define min(a,b) ((a)<(b)?(a):(b))
#define max(a,b) ((b)<(a)?(a):(b))
extern bool memcpy16( void *_dst, const void *_src, size_t N );
void
chMemcpyHtoD( void *device, const void *host, size_t N )
{
cudaError_t status;
char *dst = (char *) device;
const char *src = (const char *) host;
int stagingIndex = 0;
while ( N ) {
size_t thisCopySize = min( N, STAGING_BUFFER_SIZE );
cuda(EventSynchronize( g_events[stagingIndex] ) );
memcpy16( g_hostBuffers[stagingIndex], src, thisCopySize );
cuda(MemcpyAsync( dst, g_hostBuffers[stagingIndex], thisCopySize,
cudaMemcpyHostToDevice, NULL ) );
cuda(EventRecord( g_events[1-stagingIndex], NULL ) );
dst += thisCopySize;
src += thisCopySize;
N -= thisCopySize;
stagingIndex = 1 - stagingIndex;
}
Error:
return;
}
bool
TestMemcpy( int *dstDevice, int *srcHost, const int *srcOriginal,
size_t dstOffset, size_t srcOffset, size_t numInts )
{
chMemcpyHtoD( dstDevice+dstOffset, srcOriginal+srcOffset, numInts*sizeof(int) );
cudaMemcpy( srcHost, dstDevice+dstOffset, numInts*sizeof(int), cudaMemcpyDeviceToHost );
for ( size_t i = 0; i < numInts; i++ ) {
if ( srcHost[i] != srcOriginal[srcOffset+i] ) {
return false;
}
}
return true;
}
int
main( int argc, char *argv[] )
{
cudaError_t status;
int *deviceInt = 0;
int *hostInt = 0;
const size_t numInts = 32*1048576;
const int cIterations = 10;
int *testVector = 0;
printf( "Pageable memcpy (16-byte aligned)... " ); fflush( stdout );
chTimerTimestamp start, stop;
cuda(HostAlloc( &g_hostBuffers[0], STAGING_BUFFER_SIZE, cudaHostAllocDefault ) );
cuda(HostAlloc( &g_hostBuffers[1], STAGING_BUFFER_SIZE, cudaHostAllocDefault ) );
cuda(EventCreate( &g_events[0], cudaEventBlockingSync ) );
cuda(EventRecord( g_events[0], 0 ) ); // so it is signaled on first synchronize
cuda(EventCreate( &g_events[1], cudaEventBlockingSync ) );
cuda(EventRecord( g_events[1], 0 ) ); // so it is signaled on first synchronize
cuda(Malloc( &deviceInt, numInts*sizeof(int) ) );
cuda(HostAlloc( &hostInt, numInts*sizeof(int), 0 ) );
testVector = (int *) malloc( numInts*sizeof(int) );
if ( ! testVector ) {
printf( "malloc() failed\n" );
return 1;
}
for ( size_t i = 0; i < numInts; i++ ) {
testVector[i] = rand();
}
if ( ! TestMemcpy( deviceInt, hostInt, testVector, 0, 0, numInts ) ) {
goto Error;
}
for ( int i = 0; i < cIterations; i++ ) {
size_t numInts4 = numInts / 4;
size_t dstOffset = rand() % (numInts4-1);
size_t srcOffset = rand() % (numInts4-1);
size_t intsThisIteration = 1 + rand() % (numInts4-max(dstOffset,srcOffset)-1);
dstOffset *= 4;
srcOffset *= 4;
intsThisIteration *= 4;
if ( ! TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ) ) {
TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration );
goto Error;
}
}
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
chMemcpyHtoD( deviceInt, testVector, numInts*sizeof(int) ) ;
}
cuda(DeviceSynchronize() );
chTimerGetTime( &stop );
{
double MBytes = cIterations*numInts*sizeof(int) / 1048576.0;
double MBpers = MBytes / chTimerElapsedTime( &start, &stop );
printf( "%.2f MB/s\n", MBpers );
}
cudaFree( deviceInt );
cudaFreeHost( hostInt );
return 0;
Error:
printf( "Error\n" );
return 1;
}
|
74c92a524da844daef25b0aedc67db9a0ce4112c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "edge_contractions_woc_thrust.h"
#include <gpuMST.h>
#include "ECLgraph.h"
#include "rama_utils.h"
struct is_positive_edge
{
__host__ __device__
bool operator()(const thrust::tuple<int,int,float> t)
{
return thrust::get<2>(t) > 0;
}
};
struct edge_index_in_same_cc_func
{
const int* cc_labels;
const int* row_ids;
const int* col_ids;
__host__ __device__
bool operator()(const int edge_index)
{
const int i = row_ids[edge_index];
const int j = col_ids[edge_index];
return cc_labels[i] == cc_labels[j];
}
};
struct edge_in_diff_cc_func
{
const int* cc_labels;
__host__ __device__
bool operator()(const thrust::tuple<int, int>& t)
{
const int i = thrust::get<0>(t);
const int j = thrust::get<1>(t);
return cc_labels[i] != cc_labels[j];
}
};
void frontier::restrict_to_indices(const thrust::device_vector<int>& indices_to_keep)
{
int num_to_keep = indices_to_keep.size();
thrust::device_vector<int> new_nodes(num_to_keep);
thrust::device_vector<int> new_parent_nodes(num_to_keep);
thrust::device_vector<int> new_rep_edges(num_to_keep);
thrust::device_vector<int> new_bottleneck_indices(num_to_keep);
thrust::device_vector<float> new_bottleneck_values(num_to_keep);
auto first_input = thrust::make_zip_iterator(thrust::make_tuple(nodes.begin(), parent_nodes.begin(), rep_edges.begin(), bottleneck_indices.begin(), bottleneck_values.begin()));
auto first_output = thrust::make_zip_iterator(thrust::make_tuple(new_nodes.begin(), new_parent_nodes.begin(), new_rep_edges.begin(), new_bottleneck_indices.begin(), new_bottleneck_values.begin()));
thrust::gather(indices_to_keep.begin(), indices_to_keep.begin() + num_to_keep, first_input, first_output);
thrust::swap(new_nodes, nodes);
thrust::swap(new_parent_nodes, parent_nodes);
thrust::swap(new_rep_edges, rep_edges);
thrust::swap(new_bottleneck_indices, bottleneck_indices);
thrust::swap(new_bottleneck_values, bottleneck_values);
}
void frontier::filter_by_rep_edges(const thrust::device_vector<int>& rep_edges_to_remove)
{
assert(thrust::is_sorted(rep_edges_to_remove.begin(), rep_edges_to_remove.end()));
thrust::device_vector<int> rep_edges_sorted = rep_edges;
thrust::device_vector<int> rep_edges_sorting_order(rep_edges_sorted.size());
thrust::sequence(rep_edges_sorting_order.begin(), rep_edges_sorting_order.end());
thrust::sort_by_key(rep_edges_sorted.begin(), rep_edges_sorted.end(), rep_edges_sorting_order.begin());
thrust::device_vector<int> indices_to_keep(nodes.size());
auto last_to_keep = thrust::set_difference_by_key(rep_edges_sorted.begin(), rep_edges_sorted.end(), rep_edges_to_remove.begin(), rep_edges_to_remove.end(),
rep_edges_sorting_order.begin(), thrust::make_constant_iterator<int>(0),
thrust::make_discard_iterator(), indices_to_keep.begin());
indices_to_keep.resize(std::distance(indices_to_keep.begin(), last_to_keep.second));
restrict_to_indices(indices_to_keep);
}
void frontier::filter_by_mst_edges(const thrust::device_vector<int>& mst_edges_to_keep)
{
assert(thrust::is_sorted(mst_edges_to_keep.begin(), mst_edges_to_keep.end()));
thrust::device_vector<int> bottleneck_indices_sorted = bottleneck_indices;
thrust::device_vector<int> bottleneck_indices_sorting_order(bottleneck_indices_sorted.size());
thrust::sequence(bottleneck_indices_sorting_order.begin(), bottleneck_indices_sorting_order.end());
thrust::sort_by_key(bottleneck_indices_sorted.begin(), bottleneck_indices_sorted.end(), bottleneck_indices_sorting_order.begin());
thrust::device_vector<int> indices_to_keep(bottleneck_indices_sorted.size());
auto last_to_keep = thrust::set_intersection_by_key(bottleneck_indices_sorted.begin(), bottleneck_indices_sorted.end(),
mst_edges_to_keep.begin(), mst_edges_to_keep.end(),
bottleneck_indices_sorting_order.begin(),
thrust::make_discard_iterator(), indices_to_keep.begin());
indices_to_keep.resize(std::distance(indices_to_keep.begin(), last_to_keep.second));
restrict_to_indices(indices_to_keep);
}
void frontier::reassign_mst_indices(const thrust::device_vector<int>& valid_mst_indices, const int prev_mst_size)
{
map_old_values_consec(bottleneck_indices, valid_mst_indices, prev_mst_size);
}
int edge_contractions_woc_thrust::filter_by_cc()
{
assert(cc_labels.size() == num_nodes);
thrust::device_vector<int> mst_row_offsets = compute_offsets(mst_row_ids, num_nodes - 1);
computeCC_gpu(num_nodes, mst_row_ids.size(),
thrust::raw_pointer_cast(mst_row_offsets.data()),
thrust::raw_pointer_cast(mst_col_ids.data()),
thrust::raw_pointer_cast(cc_labels.data()),
get_cuda_device());
edge_in_diff_cc_func edge_in_diff_cc({thrust::raw_pointer_cast(cc_labels.data())});
// thrust::device_vector<int> rep_invalid_indices(rep_row_ids.size());
// thrust::sequence(rep_invalid_indices.begin(), rep_invalid_indices.end());
// auto last_invalid = thrust::remove_if(rep_invalid_indices.begin(), rep_invalid_indices.end(), edge_in_same_cc);
// const int num_invalid_rep = std::distance(rep_invalid_indices.begin(), last_invalid);
// rep_invalid_indices.resize(num_invalid_rep);
// row_frontier.filter_by_rep_edges(rep_invalid_indices);
// col_frontier.filter_by_rep_edges(rep_invalid_indices);
// return rep_row_ids.size() - num_invalid_rep;
auto first_rep = thrust::make_zip_iterator(thrust::make_tuple(rep_row_ids.begin(), rep_col_ids.begin()));
auto last_rep = thrust::make_zip_iterator(thrust::make_tuple(rep_row_ids.end(), rep_col_ids.end()));
auto last_invalid = thrust::remove_if(first_rep, last_rep, edge_in_diff_cc);
int num_rep_edges = std::distance(first_rep, last_invalid);
rep_row_ids.resize(num_rep_edges);
rep_col_ids.resize(num_rep_edges);
// Re-initialize the frontiers. Another (probably faster) possibility is to only filter-out the found conflicted cycles and keep going, however
// then we need to keep track of all predecessors which requires more memory.
row_frontier = frontier(rep_row_ids);
col_frontier = frontier(rep_col_ids);
return num_rep_edges;
}
struct recompute_degree_func
{
__host__ __device__
void operator()(thrust::tuple<const int, int&> t)
{
const int parent_node = thrust::get<0>(t);
if (parent_node != -1)
{
int& degree = thrust::get<1>(t);
degree--;
}
}
};
__global__ void expand_cuda(const int num_vertices,
const int* const __restrict__ row_offsets,
const int* const __restrict__ col_ids,
const float* const __restrict__ costs,
const int* const __restrict__ v_frontier,
const int* const __restrict__ v_frontier_offsets,
const int* const __restrict__ v_rep_edges,
const int* const __restrict__ v_parent_nodes,
const int* const __restrict__ v_bottleneck_edge_index,
const float* const __restrict__ v_bottleneck_edge_value,
int* __restrict__ expanded_frontier,
int* __restrict__ expanded_rep_edges,
int* __restrict__ expanded_parent_nodes,
int* __restrict__ expanded_bottleneck_edge_index,
float* __restrict__ expanded_bottleneck_edge_value)
{
const int start_index = blockIdx.x * blockDim.x + threadIdx.x;
const int num_threads = blockDim.x * gridDim.x;
for (int idx = start_index; idx < num_vertices; idx += num_threads)
{
const int src = v_frontier[idx];
const int src_parent = v_parent_nodes[idx];
const int src_rep_edge = v_rep_edges[idx];
const int prev_bottleneck_index = v_bottleneck_edge_index[idx];
const float prev_bottleneck_value = v_bottleneck_edge_value[idx];
int output_offset = v_frontier_offsets[idx];
for (int input_offset = row_offsets[src]; input_offset != row_offsets[src + 1]; ++input_offset)
{
const int dst = col_ids[input_offset];
if (dst != src_parent)
{
expanded_frontier[output_offset] = dst;
expanded_rep_edges[output_offset] = src_rep_edge;
expanded_parent_nodes[output_offset] = src;
const float cost = costs[input_offset];
if (cost < prev_bottleneck_value)
{
expanded_bottleneck_edge_index[output_offset] = input_offset;
expanded_bottleneck_edge_value[output_offset] = cost;
}
else
{
expanded_bottleneck_edge_index[output_offset] = prev_bottleneck_index;
expanded_bottleneck_edge_value[output_offset] = prev_bottleneck_value;
}
++output_offset;
}
}
}
}
struct reduce_intersecting_paths
{
__host__ __device__
thrust::tuple<int, float, int>
operator()(const thrust::tuple<int, float, int>& t1,
const thrust::tuple<int, float, int>& t2)
{
const float val1 = thrust::get<1>(t1);
const float val2 = thrust::get<1>(t2);
const int count = thrust::get<2>(t1) + thrust::get<2>(t2);
if (val1 < val2)
return thrust::make_tuple(thrust::get<0>(t1), val1, count);
else
return thrust::make_tuple(thrust::get<0>(t2), val2, count);
}
};
struct single_occurence
{
__host__ __device__
bool operator()(const thrust::tuple<int, int, int>& t)
{
return thrust::get<2>(t) == 1;
}
};
struct is_row_unique_frontier
{
__host__ __device__
bool operator()(const thrust::tuple<int, int, int, int, float, int, bool>& t)
{
return thrust::get<5>(t) == 1 && thrust::get<6>(t);
}
};
struct is_col_unique_frontier
{
__host__ __device__
bool operator()(const thrust::tuple<int, int, int, int, float, int, bool>& t)
{
return thrust::get<5>(t) == 1 && !thrust::get<6>(t);
}
};
// void remove_frontiers(thrust::device_vector<int>& v_frontier, thrust::device_vector<int>& v_rep_edges,
// thrust::device_vector<int>& v_parent_nodes, thrust::device_vector<int>& v_bottleneck_edge_index, thrust::device_vector<float>& v_bottleneck_edge_value,
// const thrust::device_vector<int>& rep_edges_to_remove)
// {
// assert(thrust::is_sorted(rep_edges_to_remove.begin(), rep_edges_to_remove.end()));
// auto first_val_wo_rep = thrust::make_zip_iterator(thrust::make_tuple(v_frontier.begin(), v_parent_nodes.begin(), v_bottleneck_edge_index.begin(), v_bottleneck_edge_value.begin()));
// thrust::sort_by_key(v_rep_edges.begin(), v_rep_edges.end(), first_val_wo_rep);
// auto second_val_dummy = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_constant_iterator<int>(0), thrust::make_constant_iterator<int>(0), thrust::make_constant_iterator<int>(0), thrust::make_constant_iterator<int>(0)));
// thrust::device_vector<int> v_frontier_valid(v_frontier.size);
// thrust::device_vector<int> v_rep_edges_valid(v_frontier.size);
// thrust::device_vector<int> v_parent_nodes_valid(v_frontier.size);
// thrust::device_vector<int> v_bottleneck_edge_index_valid(v_frontier.size);
// thrust::device_vector<float> v_bottleneck_edge_value_valid(v_frontier.size);
// auto first_val_valid = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_valid.begin(), v_parent_nodes_valid.begin(), v_bottleneck_edge_index_valid.begin(), v_bottleneck_edge_value_valid.begin()));
// auto last_valid = thrust::set_difference_by_key(v_rep_edges.begin(), v_rep_edges.end(), rep_edges_to_remove.begin(), rep_edges_to_remove.end(),
// first_val_wo_rep, second_val_dummy, v_rep_edges_valid.begin(), first_val_valid);
// thrust::swap(v_frontier_valid, v_frontier);
// thrust::swap(v_rep_edges_valid, v_rep_edges);
// thrust::swap(v_parent_nodes_valid, v_parent_nodes);
// thrust::swap(v_bottleneck_edge_index_valid, v_bottleneck_edge_index);
// thrust::swap(v_bottleneck_edge_value_valid, v_bottleneck_edge_value);
// }
bool edge_contractions_woc_thrust::filter_cycles()
{
// The elements in (v_frontier_row, v_rep_edges_row) which match (v_frontier_col, v_rep_edges_col) correspond to conflicted cycles
// for these elements find the best bottleneck edge index by comparing v_bottleneck_edge_index_row with v_bottleneck_edge_index_col based
// on corresponding values and remove that attractive edge from mst.
thrust::device_vector<int>& v_frontier_row = row_frontier.get_nodes();
thrust::device_vector<int>& v_parent_nodes_row = row_frontier.get_parent_nodes();
thrust::device_vector<int>& v_rep_edges_row = row_frontier.get_rep_edges();
thrust::device_vector<int>& v_bottleneck_edge_index_row = row_frontier.get_bottleneck_indices();
thrust::device_vector<float>& v_bottleneck_edge_value_row = row_frontier.get_bottleneck_values();
// Prepare by sorting both arrays:
auto first_row_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_row.begin(), v_rep_edges_row.begin()));
auto last_row_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_row.end(), v_rep_edges_row.end()));
auto first_row_val = thrust::make_zip_iterator(thrust::make_tuple(v_parent_nodes_row.begin(), v_bottleneck_edge_index_row.begin(), v_bottleneck_edge_value_row.begin()));
thrust::sort_by_key(first_row_key, last_row_key, first_row_val);
thrust::device_vector<int>& v_frontier_col = col_frontier.get_nodes();
thrust::device_vector<int>& v_parent_nodes_col = col_frontier.get_parent_nodes();
thrust::device_vector<int>& v_rep_edges_col = col_frontier.get_rep_edges();
thrust::device_vector<int>& v_bottleneck_edge_index_col = col_frontier.get_bottleneck_indices();
thrust::device_vector<float>& v_bottleneck_edge_value_col = col_frontier.get_bottleneck_values();
auto first_col_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_col.begin(), v_rep_edges_col.begin()));
auto last_col_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_col.end(), v_rep_edges_col.end()));
auto first_col_val = thrust::make_zip_iterator(thrust::make_tuple(v_parent_nodes_col.begin(), v_bottleneck_edge_index_col.begin(), v_bottleneck_edge_value_col.begin()));
thrust::sort_by_key(first_col_key, last_col_key, first_col_val);
// Merge and search for duplicates
thrust::device_vector<int> v_frontier_merged(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<int> v_rep_edges_merged(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<int> v_bottleneck_index_merged(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<float> v_bottleneck_value_merged(v_frontier_row.size() + v_frontier_col.size());
auto first_row_val_merge = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_edge_index_row.begin(), v_bottleneck_edge_value_row.begin()));
auto first_col_val_merge = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_edge_index_col.begin(), v_bottleneck_edge_value_col.begin()));
auto first_merged_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_merged.begin(), v_rep_edges_merged.begin()));
auto first_merged_val = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_index_merged.begin(), v_bottleneck_value_merged.begin()));
auto last_merged = thrust::merge_by_key(first_row_key, last_row_key, first_col_key, last_col_key,
first_row_val_merge, first_col_val_merge, first_merged_key, first_merged_val);
assert(std::distance(first_merged_key, last_merged.first) == v_frontier_merged.size());
auto first_merged_val_with_count = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_index_merged.begin(), v_bottleneck_value_merged.begin(), thrust::make_constant_iterator<int>(1)));
thrust::device_vector<int> v_rep_edges_reduced(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<int> v_bottleneck_index_reduced(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<int> num_occ(v_frontier_row.size() + v_frontier_col.size());
auto reduced_key_first = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_discard_iterator(), v_rep_edges_reduced.begin()));
auto reduced_val_first = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_index_reduced.begin(), thrust::make_discard_iterator(), num_occ.begin()));
thrust::equal_to<thrust::tuple<int, int>> binary_pred_comp;
auto last_reduce = thrust::reduce_by_key(first_merged_key, last_merged.first, first_merged_val_with_count, reduced_key_first, reduced_val_first, binary_pred_comp, reduce_intersecting_paths());
int num_reduced = std::distance(reduced_key_first, last_reduce.first);
v_rep_edges_reduced.resize(num_reduced);
v_bottleneck_index_reduced.resize(num_reduced);
num_occ.resize(num_reduced);
// Find bottleneck edges and repulsive edges to remove.
thrust::device_vector<int> mst_edges_to_remove = v_bottleneck_index_reduced;
thrust::device_vector<int> rep_edges_to_remove = v_rep_edges_reduced;
auto first_mst_remove = thrust::make_zip_iterator(thrust::make_tuple(mst_edges_to_remove.begin(), v_rep_edges_reduced.begin(), num_occ.begin()));
auto last_mst_remove = thrust::make_zip_iterator(thrust::make_tuple(mst_edges_to_remove.end(), v_rep_edges_reduced.begin(), num_occ.end()));
auto last_mst_remove_valid = thrust::remove_if(first_mst_remove, last_mst_remove, single_occurence());
int num_directed_edges_to_remove = std::distance(first_mst_remove, last_mst_remove_valid);
if (num_directed_edges_to_remove == 0)
return false;
mst_edges_to_remove.resize(num_directed_edges_to_remove);
rep_edges_to_remove.resize(num_directed_edges_to_remove);
thrust::sort(mst_edges_to_remove.begin(), mst_edges_to_remove.end());
thrust::sort(rep_edges_to_remove.begin(), rep_edges_to_remove.end());
auto last_mst_unique = thrust::unique(mst_edges_to_remove.begin(), mst_edges_to_remove.end());
mst_edges_to_remove.resize(std::distance(mst_edges_to_remove.begin(), last_mst_unique));
// Now remove the bottleneck edges (in both directions) from mst.
// For this first find the edges which need to be removed and make them undirected.
thrust::device_vector<int> mst_i_to_remove(mst_edges_to_remove.size());
thrust::device_vector<int> mst_j_to_remove(mst_edges_to_remove.size());
thrust::gather(mst_edges_to_remove.begin(), mst_edges_to_remove.end(), mst_row_ids.begin(), mst_i_to_remove.begin());
thrust::gather(mst_edges_to_remove.begin(), mst_edges_to_remove.end(), mst_col_ids.begin(), mst_j_to_remove.begin());
std::tie(mst_i_to_remove, mst_j_to_remove) = to_undirected(mst_i_to_remove, mst_j_to_remove);
coo_sorting(mst_i_to_remove, mst_j_to_remove);
auto first_mst = thrust::make_zip_iterator(thrust::make_tuple(mst_row_ids.begin(), mst_col_ids.begin()));
auto last_mst = thrust::make_zip_iterator(thrust::make_tuple(mst_row_ids.end(), mst_col_ids.end()));
auto first_mst_val = thrust::make_zip_iterator(thrust::make_tuple(mst_data.begin(), thrust::make_counting_iterator<int>(0)));
auto val2_dummy = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_constant_iterator<float>(0), thrust::make_counting_iterator<int>(0)));
auto first_mst_to_remove = thrust::make_zip_iterator(thrust::make_tuple(mst_i_to_remove.begin(), mst_j_to_remove.begin()));
auto last_mst_to_remove = thrust::make_zip_iterator(thrust::make_tuple(mst_i_to_remove.end(), mst_j_to_remove.end()));
thrust::device_vector<int> mst_row_ids_valid(mst_row_ids.size());
thrust::device_vector<int> mst_col_ids_valid(mst_col_ids.size());
thrust::device_vector<float> mst_data_valid(mst_data.size());
thrust::device_vector<int> mst_valid_indices(mst_row_ids.size());
auto first_mst_valid_key = thrust::make_zip_iterator(thrust::make_tuple(mst_row_ids_valid.begin(), mst_col_ids_valid.begin()));
auto first_mst_valid_val = thrust::make_zip_iterator(thrust::make_tuple(mst_data_valid.begin(), mst_valid_indices.begin()));
auto last_to_keep = thrust::set_difference_by_key(first_mst, last_mst, first_mst_to_remove, last_mst_to_remove,
first_mst_val, val2_dummy,
first_mst_valid_key, first_mst_valid_val);
int num_valid_mst_edges = std::distance(first_mst_valid_key, last_to_keep.first);
mst_row_ids_valid.resize(num_valid_mst_edges);
mst_col_ids_valid.resize(num_valid_mst_edges);
mst_data_valid.resize(num_valid_mst_edges);
mst_valid_indices.resize(num_valid_mst_edges);
// Since MST has changed, map old mst indices to new ones present in bottleneck_edge_index and remove invalid.
// Since we re-initialize the frontiers anyway therefore no need to filter out.
// row_frontier.filter_by_mst_edges(mst_valid_indices);
// col_frontier.filter_by_mst_edges(mst_valid_indices);
// row_frontier.reassign_mst_indices(mst_valid_indices, mst_row_ids.size());
// col_frontier.reassign_mst_indices(mst_valid_indices, mst_row_ids.size());
thrust::swap(mst_row_ids_valid, mst_row_ids);
thrust::swap(mst_col_ids_valid, mst_col_ids);
thrust::swap(mst_data_valid, mst_data);
return true;
// Remove the intersected path from frontier (would be done by CC):
// remove_frontiers(v_frontier_row, v_rep_edges_row, v_parent_nodes_row, v_bottleneck_edge_index_row, v_bottleneck_edge_value_row, rep_edges_to_remove);
// remove_frontiers(v_frontier_col, v_rep_edges_col, v_parent_nodes_col, v_bottleneck_edge_index_col, v_bottleneck_edge_value_col, rep_edges_to_remove);
}
void edge_contractions_woc_thrust::expand_frontier(frontier& f)
{
const thrust::device_vector<int>& v_frontier = f.get_nodes();
const thrust::device_vector<int>& v_parent_nodes = f.get_parent_nodes();
const thrust::device_vector<int>& v_rep_edges = f.get_rep_edges();
const thrust::device_vector<int>& v_bottleneck_edge_index = f.get_bottleneck_indices();
const thrust::device_vector<float>& v_bottleneck_edge_value = f.get_bottleneck_values();
assert(v_frontier.size() == v_rep_edges.size());
assert(v_frontier.size() == v_parent_nodes.size()); //parent node = -1 corresponds to seeds.
assert(v_frontier.size() == v_bottleneck_edge_index.size());
assert(v_frontier.size() == v_bottleneck_edge_value.size());
const thrust::device_vector<int> mst_row_offsets = compute_offsets(mst_row_ids, num_nodes - 1);
const thrust::device_vector<int> mst_node_degrees = offsets_to_degrees(mst_row_offsets);
assert(mst_node_degrees.size() == num_nodes);
thrust::device_vector<int> v_frontier_num_neighbours(v_frontier.size());
thrust::gather(v_frontier.begin(), v_frontier.end(), mst_node_degrees.begin(), v_frontier_num_neighbours.begin());
auto first = thrust::make_zip_iterator(thrust::make_tuple(v_parent_nodes.begin(), v_frontier_num_neighbours.begin()));
auto last = thrust::make_zip_iterator(thrust::make_tuple(v_parent_nodes.end(), v_frontier_num_neighbours.end()));
thrust::for_each(first, last, recompute_degree_func());
thrust::device_vector<int> v_frontier_offsets = degrees_to_offsets(v_frontier_num_neighbours);
const int num_expansions = v_frontier_offsets[v_frontier_offsets.size() - 1];
thrust::device_vector<int> expanded_frontier(num_expansions);
thrust::device_vector<int> expanded_rep_edges(num_expansions);
thrust::device_vector<int> expanded_parent_nodes(num_expansions);
thrust::device_vector<int> expanded_bottleneck_edge_index(num_expansions);
thrust::device_vector<float> expanded_bottleneck_edge_value(num_expansions);
const int threadCount = 256;
const int blockCount = ceil(v_frontier.size() / (float) threadCount);
hipLaunchKernelGGL(( expand_cuda), dim3(blockCount), dim3(threadCount), 0, 0, v_frontier.size(),
thrust::raw_pointer_cast(mst_row_offsets.data()),
thrust::raw_pointer_cast(mst_col_ids.data()),
thrust::raw_pointer_cast(mst_data.data()),
thrust::raw_pointer_cast(v_frontier.data()),
thrust::raw_pointer_cast(v_frontier_offsets.data()),
thrust::raw_pointer_cast(v_rep_edges.data()),
thrust::raw_pointer_cast(v_parent_nodes.data()),
thrust::raw_pointer_cast(v_bottleneck_edge_index.data()),
thrust::raw_pointer_cast(v_bottleneck_edge_value.data()),
thrust::raw_pointer_cast(expanded_frontier.data()),
thrust::raw_pointer_cast(expanded_rep_edges.data()),
thrust::raw_pointer_cast(expanded_parent_nodes.data()),
thrust::raw_pointer_cast(expanded_bottleneck_edge_index.data()),
thrust::raw_pointer_cast(expanded_bottleneck_edge_value.data()));
f = frontier(std::move(expanded_frontier),
std::move(expanded_parent_nodes),
std::move(expanded_rep_edges),
std::move(expanded_bottleneck_edge_index),
std::move(expanded_bottleneck_edge_value));
}
edge_contractions_woc_thrust::edge_contractions_woc_thrust(const dCOO& A) : num_nodes(A.max_dim())
{
cc_labels = thrust::device_vector<int>(num_nodes);
// 1. Parition into positive and negative edges:
assert(A.is_directed());
const thrust::device_vector<int> row_ids = A.get_row_ids();
const thrust::device_vector<int> col_ids = A.get_col_ids();
const thrust::device_vector<float> data = A.get_data();
auto first = thrust::make_zip_iterator(thrust::make_tuple(row_ids.begin(), col_ids.begin(), data.begin()));
auto last = thrust::make_zip_iterator(thrust::make_tuple(row_ids.end(), col_ids.end(), data.end()));
thrust::device_vector<int> pos_row_ids(row_ids.size());
thrust::device_vector<int> pos_col_ids(col_ids.size());
thrust::device_vector<float> pos_data(data.size());
auto first_pos = thrust::make_zip_iterator(thrust::make_tuple(pos_row_ids.begin(), pos_col_ids.begin(), pos_data.begin()));
rep_row_ids = thrust::device_vector<int>(row_ids.size());
rep_col_ids = thrust::device_vector<int>(col_ids.size());
auto first_rep = thrust::make_zip_iterator(thrust::make_tuple(rep_row_ids.begin(), rep_col_ids.begin(), thrust::make_discard_iterator()));
auto ends = thrust::partition_copy(first, last, first_pos, first_rep, is_positive_edge());
const int num_positive = std::distance(first_pos, ends.first);
if (num_positive == 0)
return;
pos_row_ids.resize(num_positive);
pos_col_ids.resize(num_positive);
pos_data.resize(num_positive);
const int num_negative = std::distance(first_rep, ends.second);
rep_row_ids.resize(num_negative);
rep_col_ids.resize(num_negative);
// 2. Compute maximum spanning tree in attractive edges.
std::tie(mst_row_ids, mst_col_ids, mst_data) = MST_boruvka::maximum_spanning_tree(pos_row_ids, pos_col_ids, pos_data);
std::tie(mst_row_ids, mst_col_ids, mst_data) = to_undirected(mst_row_ids, mst_col_ids, mst_data);
coo_sorting(mst_row_ids, mst_col_ids, mst_data);
}
std::tuple<thrust::device_vector<int>, int> edge_contractions_woc_thrust::find_contraction_mapping()
{
MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME;
if (mst_row_ids.size() == 0)
return {thrust::device_vector<int>(0), 0};
std::cout<<"# MST edges "<<mst_row_ids.size()<<", # Repulsive edges "<<rep_row_ids.size()<<"\n";
row_frontier = frontier(rep_row_ids);
col_frontier = frontier(rep_col_ids);
int num_rep_valid = filter_by_cc();
int itr = 0;
while(num_rep_valid > 0 && mst_row_ids.size() > 0)
{
// filter odd length conflicted cycles:
std::cout<<"Conflicted cycle removal MST, Iteration: "<<itr<<", # MST edges "<<mst_row_ids.size()<<", # Repulsive edges "<<num_rep_valid<<"\n";
expand_frontier(row_frontier);
bool any_removed = filter_cycles();
if (any_removed)
num_rep_valid = filter_by_cc();
if (num_rep_valid == 0 || mst_row_ids.size() == 0)
break;
// filter even length conflicted cycles:
std::cout<<"Conflicted cycle removal MST, Iteration: "<<itr<<", # MST edges "<<mst_row_ids.size()<<", # Repulsive edges "<<num_rep_valid<<"\n";
expand_frontier(col_frontier);
any_removed = filter_cycles();
if (any_removed)
num_rep_valid = filter_by_cc();
itr++;
}
thrust::device_vector<int> node_mapping = compress_label_sequence(cc_labels, cc_labels.size() - 1);
int nr_ccs = *thrust::max_element(node_mapping.begin(), node_mapping.end()) + 1;
std::cout<<"Found conflict-free contraction mapping with: "<<nr_ccs<<" connected components\n";
assert(nr_ccs < num_nodes);
return {node_mapping, mst_row_ids.size()};
} | 74c92a524da844daef25b0aedc67db9a0ce4112c.cu | #include "edge_contractions_woc_thrust.h"
#include <gpuMST.h>
#include "ECLgraph.h"
#include "rama_utils.h"
struct is_positive_edge
{
__host__ __device__
bool operator()(const thrust::tuple<int,int,float> t)
{
return thrust::get<2>(t) > 0;
}
};
struct edge_index_in_same_cc_func
{
const int* cc_labels;
const int* row_ids;
const int* col_ids;
__host__ __device__
bool operator()(const int edge_index)
{
const int i = row_ids[edge_index];
const int j = col_ids[edge_index];
return cc_labels[i] == cc_labels[j];
}
};
struct edge_in_diff_cc_func
{
const int* cc_labels;
__host__ __device__
bool operator()(const thrust::tuple<int, int>& t)
{
const int i = thrust::get<0>(t);
const int j = thrust::get<1>(t);
return cc_labels[i] != cc_labels[j];
}
};
void frontier::restrict_to_indices(const thrust::device_vector<int>& indices_to_keep)
{
int num_to_keep = indices_to_keep.size();
thrust::device_vector<int> new_nodes(num_to_keep);
thrust::device_vector<int> new_parent_nodes(num_to_keep);
thrust::device_vector<int> new_rep_edges(num_to_keep);
thrust::device_vector<int> new_bottleneck_indices(num_to_keep);
thrust::device_vector<float> new_bottleneck_values(num_to_keep);
auto first_input = thrust::make_zip_iterator(thrust::make_tuple(nodes.begin(), parent_nodes.begin(), rep_edges.begin(), bottleneck_indices.begin(), bottleneck_values.begin()));
auto first_output = thrust::make_zip_iterator(thrust::make_tuple(new_nodes.begin(), new_parent_nodes.begin(), new_rep_edges.begin(), new_bottleneck_indices.begin(), new_bottleneck_values.begin()));
thrust::gather(indices_to_keep.begin(), indices_to_keep.begin() + num_to_keep, first_input, first_output);
thrust::swap(new_nodes, nodes);
thrust::swap(new_parent_nodes, parent_nodes);
thrust::swap(new_rep_edges, rep_edges);
thrust::swap(new_bottleneck_indices, bottleneck_indices);
thrust::swap(new_bottleneck_values, bottleneck_values);
}
void frontier::filter_by_rep_edges(const thrust::device_vector<int>& rep_edges_to_remove)
{
assert(thrust::is_sorted(rep_edges_to_remove.begin(), rep_edges_to_remove.end()));
thrust::device_vector<int> rep_edges_sorted = rep_edges;
thrust::device_vector<int> rep_edges_sorting_order(rep_edges_sorted.size());
thrust::sequence(rep_edges_sorting_order.begin(), rep_edges_sorting_order.end());
thrust::sort_by_key(rep_edges_sorted.begin(), rep_edges_sorted.end(), rep_edges_sorting_order.begin());
thrust::device_vector<int> indices_to_keep(nodes.size());
auto last_to_keep = thrust::set_difference_by_key(rep_edges_sorted.begin(), rep_edges_sorted.end(), rep_edges_to_remove.begin(), rep_edges_to_remove.end(),
rep_edges_sorting_order.begin(), thrust::make_constant_iterator<int>(0),
thrust::make_discard_iterator(), indices_to_keep.begin());
indices_to_keep.resize(std::distance(indices_to_keep.begin(), last_to_keep.second));
restrict_to_indices(indices_to_keep);
}
void frontier::filter_by_mst_edges(const thrust::device_vector<int>& mst_edges_to_keep)
{
assert(thrust::is_sorted(mst_edges_to_keep.begin(), mst_edges_to_keep.end()));
thrust::device_vector<int> bottleneck_indices_sorted = bottleneck_indices;
thrust::device_vector<int> bottleneck_indices_sorting_order(bottleneck_indices_sorted.size());
thrust::sequence(bottleneck_indices_sorting_order.begin(), bottleneck_indices_sorting_order.end());
thrust::sort_by_key(bottleneck_indices_sorted.begin(), bottleneck_indices_sorted.end(), bottleneck_indices_sorting_order.begin());
thrust::device_vector<int> indices_to_keep(bottleneck_indices_sorted.size());
auto last_to_keep = thrust::set_intersection_by_key(bottleneck_indices_sorted.begin(), bottleneck_indices_sorted.end(),
mst_edges_to_keep.begin(), mst_edges_to_keep.end(),
bottleneck_indices_sorting_order.begin(),
thrust::make_discard_iterator(), indices_to_keep.begin());
indices_to_keep.resize(std::distance(indices_to_keep.begin(), last_to_keep.second));
restrict_to_indices(indices_to_keep);
}
void frontier::reassign_mst_indices(const thrust::device_vector<int>& valid_mst_indices, const int prev_mst_size)
{
map_old_values_consec(bottleneck_indices, valid_mst_indices, prev_mst_size);
}
int edge_contractions_woc_thrust::filter_by_cc()
{
assert(cc_labels.size() == num_nodes);
thrust::device_vector<int> mst_row_offsets = compute_offsets(mst_row_ids, num_nodes - 1);
computeCC_gpu(num_nodes, mst_row_ids.size(),
thrust::raw_pointer_cast(mst_row_offsets.data()),
thrust::raw_pointer_cast(mst_col_ids.data()),
thrust::raw_pointer_cast(cc_labels.data()),
get_cuda_device());
edge_in_diff_cc_func edge_in_diff_cc({thrust::raw_pointer_cast(cc_labels.data())});
// thrust::device_vector<int> rep_invalid_indices(rep_row_ids.size());
// thrust::sequence(rep_invalid_indices.begin(), rep_invalid_indices.end());
// auto last_invalid = thrust::remove_if(rep_invalid_indices.begin(), rep_invalid_indices.end(), edge_in_same_cc);
// const int num_invalid_rep = std::distance(rep_invalid_indices.begin(), last_invalid);
// rep_invalid_indices.resize(num_invalid_rep);
// row_frontier.filter_by_rep_edges(rep_invalid_indices);
// col_frontier.filter_by_rep_edges(rep_invalid_indices);
// return rep_row_ids.size() - num_invalid_rep;
auto first_rep = thrust::make_zip_iterator(thrust::make_tuple(rep_row_ids.begin(), rep_col_ids.begin()));
auto last_rep = thrust::make_zip_iterator(thrust::make_tuple(rep_row_ids.end(), rep_col_ids.end()));
auto last_invalid = thrust::remove_if(first_rep, last_rep, edge_in_diff_cc);
int num_rep_edges = std::distance(first_rep, last_invalid);
rep_row_ids.resize(num_rep_edges);
rep_col_ids.resize(num_rep_edges);
// Re-initialize the frontiers. Another (probably faster) possibility is to only filter-out the found conflicted cycles and keep going, however
// then we need to keep track of all predecessors which requires more memory.
row_frontier = frontier(rep_row_ids);
col_frontier = frontier(rep_col_ids);
return num_rep_edges;
}
struct recompute_degree_func
{
__host__ __device__
void operator()(thrust::tuple<const int, int&> t)
{
const int parent_node = thrust::get<0>(t);
if (parent_node != -1)
{
int& degree = thrust::get<1>(t);
degree--;
}
}
};
__global__ void expand_cuda(const int num_vertices,
const int* const __restrict__ row_offsets,
const int* const __restrict__ col_ids,
const float* const __restrict__ costs,
const int* const __restrict__ v_frontier,
const int* const __restrict__ v_frontier_offsets,
const int* const __restrict__ v_rep_edges,
const int* const __restrict__ v_parent_nodes,
const int* const __restrict__ v_bottleneck_edge_index,
const float* const __restrict__ v_bottleneck_edge_value,
int* __restrict__ expanded_frontier,
int* __restrict__ expanded_rep_edges,
int* __restrict__ expanded_parent_nodes,
int* __restrict__ expanded_bottleneck_edge_index,
float* __restrict__ expanded_bottleneck_edge_value)
{
const int start_index = blockIdx.x * blockDim.x + threadIdx.x;
const int num_threads = blockDim.x * gridDim.x;
for (int idx = start_index; idx < num_vertices; idx += num_threads)
{
const int src = v_frontier[idx];
const int src_parent = v_parent_nodes[idx];
const int src_rep_edge = v_rep_edges[idx];
const int prev_bottleneck_index = v_bottleneck_edge_index[idx];
const float prev_bottleneck_value = v_bottleneck_edge_value[idx];
int output_offset = v_frontier_offsets[idx];
for (int input_offset = row_offsets[src]; input_offset != row_offsets[src + 1]; ++input_offset)
{
const int dst = col_ids[input_offset];
if (dst != src_parent)
{
expanded_frontier[output_offset] = dst;
expanded_rep_edges[output_offset] = src_rep_edge;
expanded_parent_nodes[output_offset] = src;
const float cost = costs[input_offset];
if (cost < prev_bottleneck_value)
{
expanded_bottleneck_edge_index[output_offset] = input_offset;
expanded_bottleneck_edge_value[output_offset] = cost;
}
else
{
expanded_bottleneck_edge_index[output_offset] = prev_bottleneck_index;
expanded_bottleneck_edge_value[output_offset] = prev_bottleneck_value;
}
++output_offset;
}
}
}
}
struct reduce_intersecting_paths
{
__host__ __device__
thrust::tuple<int, float, int>
operator()(const thrust::tuple<int, float, int>& t1,
const thrust::tuple<int, float, int>& t2)
{
const float val1 = thrust::get<1>(t1);
const float val2 = thrust::get<1>(t2);
const int count = thrust::get<2>(t1) + thrust::get<2>(t2);
if (val1 < val2)
return thrust::make_tuple(thrust::get<0>(t1), val1, count);
else
return thrust::make_tuple(thrust::get<0>(t2), val2, count);
}
};
struct single_occurence
{
__host__ __device__
bool operator()(const thrust::tuple<int, int, int>& t)
{
return thrust::get<2>(t) == 1;
}
};
struct is_row_unique_frontier
{
__host__ __device__
bool operator()(const thrust::tuple<int, int, int, int, float, int, bool>& t)
{
return thrust::get<5>(t) == 1 && thrust::get<6>(t);
}
};
struct is_col_unique_frontier
{
__host__ __device__
bool operator()(const thrust::tuple<int, int, int, int, float, int, bool>& t)
{
return thrust::get<5>(t) == 1 && !thrust::get<6>(t);
}
};
// void remove_frontiers(thrust::device_vector<int>& v_frontier, thrust::device_vector<int>& v_rep_edges,
// thrust::device_vector<int>& v_parent_nodes, thrust::device_vector<int>& v_bottleneck_edge_index, thrust::device_vector<float>& v_bottleneck_edge_value,
// const thrust::device_vector<int>& rep_edges_to_remove)
// {
// assert(thrust::is_sorted(rep_edges_to_remove.begin(), rep_edges_to_remove.end()));
// auto first_val_wo_rep = thrust::make_zip_iterator(thrust::make_tuple(v_frontier.begin(), v_parent_nodes.begin(), v_bottleneck_edge_index.begin(), v_bottleneck_edge_value.begin()));
// thrust::sort_by_key(v_rep_edges.begin(), v_rep_edges.end(), first_val_wo_rep);
// auto second_val_dummy = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_constant_iterator<int>(0), thrust::make_constant_iterator<int>(0), thrust::make_constant_iterator<int>(0), thrust::make_constant_iterator<int>(0)));
// thrust::device_vector<int> v_frontier_valid(v_frontier.size);
// thrust::device_vector<int> v_rep_edges_valid(v_frontier.size);
// thrust::device_vector<int> v_parent_nodes_valid(v_frontier.size);
// thrust::device_vector<int> v_bottleneck_edge_index_valid(v_frontier.size);
// thrust::device_vector<float> v_bottleneck_edge_value_valid(v_frontier.size);
// auto first_val_valid = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_valid.begin(), v_parent_nodes_valid.begin(), v_bottleneck_edge_index_valid.begin(), v_bottleneck_edge_value_valid.begin()));
// auto last_valid = thrust::set_difference_by_key(v_rep_edges.begin(), v_rep_edges.end(), rep_edges_to_remove.begin(), rep_edges_to_remove.end(),
// first_val_wo_rep, second_val_dummy, v_rep_edges_valid.begin(), first_val_valid);
// thrust::swap(v_frontier_valid, v_frontier);
// thrust::swap(v_rep_edges_valid, v_rep_edges);
// thrust::swap(v_parent_nodes_valid, v_parent_nodes);
// thrust::swap(v_bottleneck_edge_index_valid, v_bottleneck_edge_index);
// thrust::swap(v_bottleneck_edge_value_valid, v_bottleneck_edge_value);
// }
bool edge_contractions_woc_thrust::filter_cycles()
{
// The elements in (v_frontier_row, v_rep_edges_row) which match (v_frontier_col, v_rep_edges_col) correspond to conflicted cycles
// for these elements find the best bottleneck edge index by comparing v_bottleneck_edge_index_row with v_bottleneck_edge_index_col based
// on corresponding values and remove that attractive edge from mst.
thrust::device_vector<int>& v_frontier_row = row_frontier.get_nodes();
thrust::device_vector<int>& v_parent_nodes_row = row_frontier.get_parent_nodes();
thrust::device_vector<int>& v_rep_edges_row = row_frontier.get_rep_edges();
thrust::device_vector<int>& v_bottleneck_edge_index_row = row_frontier.get_bottleneck_indices();
thrust::device_vector<float>& v_bottleneck_edge_value_row = row_frontier.get_bottleneck_values();
// Prepare by sorting both arrays:
auto first_row_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_row.begin(), v_rep_edges_row.begin()));
auto last_row_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_row.end(), v_rep_edges_row.end()));
auto first_row_val = thrust::make_zip_iterator(thrust::make_tuple(v_parent_nodes_row.begin(), v_bottleneck_edge_index_row.begin(), v_bottleneck_edge_value_row.begin()));
thrust::sort_by_key(first_row_key, last_row_key, first_row_val);
thrust::device_vector<int>& v_frontier_col = col_frontier.get_nodes();
thrust::device_vector<int>& v_parent_nodes_col = col_frontier.get_parent_nodes();
thrust::device_vector<int>& v_rep_edges_col = col_frontier.get_rep_edges();
thrust::device_vector<int>& v_bottleneck_edge_index_col = col_frontier.get_bottleneck_indices();
thrust::device_vector<float>& v_bottleneck_edge_value_col = col_frontier.get_bottleneck_values();
auto first_col_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_col.begin(), v_rep_edges_col.begin()));
auto last_col_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_col.end(), v_rep_edges_col.end()));
auto first_col_val = thrust::make_zip_iterator(thrust::make_tuple(v_parent_nodes_col.begin(), v_bottleneck_edge_index_col.begin(), v_bottleneck_edge_value_col.begin()));
thrust::sort_by_key(first_col_key, last_col_key, first_col_val);
// Merge and search for duplicates
thrust::device_vector<int> v_frontier_merged(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<int> v_rep_edges_merged(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<int> v_bottleneck_index_merged(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<float> v_bottleneck_value_merged(v_frontier_row.size() + v_frontier_col.size());
auto first_row_val_merge = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_edge_index_row.begin(), v_bottleneck_edge_value_row.begin()));
auto first_col_val_merge = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_edge_index_col.begin(), v_bottleneck_edge_value_col.begin()));
auto first_merged_key = thrust::make_zip_iterator(thrust::make_tuple(v_frontier_merged.begin(), v_rep_edges_merged.begin()));
auto first_merged_val = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_index_merged.begin(), v_bottleneck_value_merged.begin()));
auto last_merged = thrust::merge_by_key(first_row_key, last_row_key, first_col_key, last_col_key,
first_row_val_merge, first_col_val_merge, first_merged_key, first_merged_val);
assert(std::distance(first_merged_key, last_merged.first) == v_frontier_merged.size());
auto first_merged_val_with_count = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_index_merged.begin(), v_bottleneck_value_merged.begin(), thrust::make_constant_iterator<int>(1)));
thrust::device_vector<int> v_rep_edges_reduced(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<int> v_bottleneck_index_reduced(v_frontier_row.size() + v_frontier_col.size());
thrust::device_vector<int> num_occ(v_frontier_row.size() + v_frontier_col.size());
auto reduced_key_first = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_discard_iterator(), v_rep_edges_reduced.begin()));
auto reduced_val_first = thrust::make_zip_iterator(thrust::make_tuple(v_bottleneck_index_reduced.begin(), thrust::make_discard_iterator(), num_occ.begin()));
thrust::equal_to<thrust::tuple<int, int>> binary_pred_comp;
auto last_reduce = thrust::reduce_by_key(first_merged_key, last_merged.first, first_merged_val_with_count, reduced_key_first, reduced_val_first, binary_pred_comp, reduce_intersecting_paths());
int num_reduced = std::distance(reduced_key_first, last_reduce.first);
v_rep_edges_reduced.resize(num_reduced);
v_bottleneck_index_reduced.resize(num_reduced);
num_occ.resize(num_reduced);
// Find bottleneck edges and repulsive edges to remove.
thrust::device_vector<int> mst_edges_to_remove = v_bottleneck_index_reduced;
thrust::device_vector<int> rep_edges_to_remove = v_rep_edges_reduced;
auto first_mst_remove = thrust::make_zip_iterator(thrust::make_tuple(mst_edges_to_remove.begin(), v_rep_edges_reduced.begin(), num_occ.begin()));
auto last_mst_remove = thrust::make_zip_iterator(thrust::make_tuple(mst_edges_to_remove.end(), v_rep_edges_reduced.begin(), num_occ.end()));
auto last_mst_remove_valid = thrust::remove_if(first_mst_remove, last_mst_remove, single_occurence());
int num_directed_edges_to_remove = std::distance(first_mst_remove, last_mst_remove_valid);
if (num_directed_edges_to_remove == 0)
return false;
mst_edges_to_remove.resize(num_directed_edges_to_remove);
rep_edges_to_remove.resize(num_directed_edges_to_remove);
thrust::sort(mst_edges_to_remove.begin(), mst_edges_to_remove.end());
thrust::sort(rep_edges_to_remove.begin(), rep_edges_to_remove.end());
auto last_mst_unique = thrust::unique(mst_edges_to_remove.begin(), mst_edges_to_remove.end());
mst_edges_to_remove.resize(std::distance(mst_edges_to_remove.begin(), last_mst_unique));
// Now remove the bottleneck edges (in both directions) from mst.
// For this first find the edges which need to be removed and make them undirected.
thrust::device_vector<int> mst_i_to_remove(mst_edges_to_remove.size());
thrust::device_vector<int> mst_j_to_remove(mst_edges_to_remove.size());
thrust::gather(mst_edges_to_remove.begin(), mst_edges_to_remove.end(), mst_row_ids.begin(), mst_i_to_remove.begin());
thrust::gather(mst_edges_to_remove.begin(), mst_edges_to_remove.end(), mst_col_ids.begin(), mst_j_to_remove.begin());
std::tie(mst_i_to_remove, mst_j_to_remove) = to_undirected(mst_i_to_remove, mst_j_to_remove);
coo_sorting(mst_i_to_remove, mst_j_to_remove);
auto first_mst = thrust::make_zip_iterator(thrust::make_tuple(mst_row_ids.begin(), mst_col_ids.begin()));
auto last_mst = thrust::make_zip_iterator(thrust::make_tuple(mst_row_ids.end(), mst_col_ids.end()));
auto first_mst_val = thrust::make_zip_iterator(thrust::make_tuple(mst_data.begin(), thrust::make_counting_iterator<int>(0)));
auto val2_dummy = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_constant_iterator<float>(0), thrust::make_counting_iterator<int>(0)));
auto first_mst_to_remove = thrust::make_zip_iterator(thrust::make_tuple(mst_i_to_remove.begin(), mst_j_to_remove.begin()));
auto last_mst_to_remove = thrust::make_zip_iterator(thrust::make_tuple(mst_i_to_remove.end(), mst_j_to_remove.end()));
thrust::device_vector<int> mst_row_ids_valid(mst_row_ids.size());
thrust::device_vector<int> mst_col_ids_valid(mst_col_ids.size());
thrust::device_vector<float> mst_data_valid(mst_data.size());
thrust::device_vector<int> mst_valid_indices(mst_row_ids.size());
auto first_mst_valid_key = thrust::make_zip_iterator(thrust::make_tuple(mst_row_ids_valid.begin(), mst_col_ids_valid.begin()));
auto first_mst_valid_val = thrust::make_zip_iterator(thrust::make_tuple(mst_data_valid.begin(), mst_valid_indices.begin()));
auto last_to_keep = thrust::set_difference_by_key(first_mst, last_mst, first_mst_to_remove, last_mst_to_remove,
first_mst_val, val2_dummy,
first_mst_valid_key, first_mst_valid_val);
int num_valid_mst_edges = std::distance(first_mst_valid_key, last_to_keep.first);
mst_row_ids_valid.resize(num_valid_mst_edges);
mst_col_ids_valid.resize(num_valid_mst_edges);
mst_data_valid.resize(num_valid_mst_edges);
mst_valid_indices.resize(num_valid_mst_edges);
// Since MST has changed, map old mst indices to new ones present in bottleneck_edge_index and remove invalid.
// Since we re-initialize the frontiers anyway therefore no need to filter out.
// row_frontier.filter_by_mst_edges(mst_valid_indices);
// col_frontier.filter_by_mst_edges(mst_valid_indices);
// row_frontier.reassign_mst_indices(mst_valid_indices, mst_row_ids.size());
// col_frontier.reassign_mst_indices(mst_valid_indices, mst_row_ids.size());
thrust::swap(mst_row_ids_valid, mst_row_ids);
thrust::swap(mst_col_ids_valid, mst_col_ids);
thrust::swap(mst_data_valid, mst_data);
return true;
// Remove the intersected path from frontier (would be done by CC):
// remove_frontiers(v_frontier_row, v_rep_edges_row, v_parent_nodes_row, v_bottleneck_edge_index_row, v_bottleneck_edge_value_row, rep_edges_to_remove);
// remove_frontiers(v_frontier_col, v_rep_edges_col, v_parent_nodes_col, v_bottleneck_edge_index_col, v_bottleneck_edge_value_col, rep_edges_to_remove);
}
void edge_contractions_woc_thrust::expand_frontier(frontier& f)
{
const thrust::device_vector<int>& v_frontier = f.get_nodes();
const thrust::device_vector<int>& v_parent_nodes = f.get_parent_nodes();
const thrust::device_vector<int>& v_rep_edges = f.get_rep_edges();
const thrust::device_vector<int>& v_bottleneck_edge_index = f.get_bottleneck_indices();
const thrust::device_vector<float>& v_bottleneck_edge_value = f.get_bottleneck_values();
assert(v_frontier.size() == v_rep_edges.size());
assert(v_frontier.size() == v_parent_nodes.size()); //parent node = -1 corresponds to seeds.
assert(v_frontier.size() == v_bottleneck_edge_index.size());
assert(v_frontier.size() == v_bottleneck_edge_value.size());
const thrust::device_vector<int> mst_row_offsets = compute_offsets(mst_row_ids, num_nodes - 1);
const thrust::device_vector<int> mst_node_degrees = offsets_to_degrees(mst_row_offsets);
assert(mst_node_degrees.size() == num_nodes);
thrust::device_vector<int> v_frontier_num_neighbours(v_frontier.size());
thrust::gather(v_frontier.begin(), v_frontier.end(), mst_node_degrees.begin(), v_frontier_num_neighbours.begin());
auto first = thrust::make_zip_iterator(thrust::make_tuple(v_parent_nodes.begin(), v_frontier_num_neighbours.begin()));
auto last = thrust::make_zip_iterator(thrust::make_tuple(v_parent_nodes.end(), v_frontier_num_neighbours.end()));
thrust::for_each(first, last, recompute_degree_func());
thrust::device_vector<int> v_frontier_offsets = degrees_to_offsets(v_frontier_num_neighbours);
const int num_expansions = v_frontier_offsets[v_frontier_offsets.size() - 1];
thrust::device_vector<int> expanded_frontier(num_expansions);
thrust::device_vector<int> expanded_rep_edges(num_expansions);
thrust::device_vector<int> expanded_parent_nodes(num_expansions);
thrust::device_vector<int> expanded_bottleneck_edge_index(num_expansions);
thrust::device_vector<float> expanded_bottleneck_edge_value(num_expansions);
const int threadCount = 256;
const int blockCount = ceil(v_frontier.size() / (float) threadCount);
expand_cuda<<<blockCount, threadCount>>>(v_frontier.size(),
thrust::raw_pointer_cast(mst_row_offsets.data()),
thrust::raw_pointer_cast(mst_col_ids.data()),
thrust::raw_pointer_cast(mst_data.data()),
thrust::raw_pointer_cast(v_frontier.data()),
thrust::raw_pointer_cast(v_frontier_offsets.data()),
thrust::raw_pointer_cast(v_rep_edges.data()),
thrust::raw_pointer_cast(v_parent_nodes.data()),
thrust::raw_pointer_cast(v_bottleneck_edge_index.data()),
thrust::raw_pointer_cast(v_bottleneck_edge_value.data()),
thrust::raw_pointer_cast(expanded_frontier.data()),
thrust::raw_pointer_cast(expanded_rep_edges.data()),
thrust::raw_pointer_cast(expanded_parent_nodes.data()),
thrust::raw_pointer_cast(expanded_bottleneck_edge_index.data()),
thrust::raw_pointer_cast(expanded_bottleneck_edge_value.data()));
f = frontier(std::move(expanded_frontier),
std::move(expanded_parent_nodes),
std::move(expanded_rep_edges),
std::move(expanded_bottleneck_edge_index),
std::move(expanded_bottleneck_edge_value));
}
edge_contractions_woc_thrust::edge_contractions_woc_thrust(const dCOO& A) : num_nodes(A.max_dim())
{
cc_labels = thrust::device_vector<int>(num_nodes);
// 1. Parition into positive and negative edges:
assert(A.is_directed());
const thrust::device_vector<int> row_ids = A.get_row_ids();
const thrust::device_vector<int> col_ids = A.get_col_ids();
const thrust::device_vector<float> data = A.get_data();
auto first = thrust::make_zip_iterator(thrust::make_tuple(row_ids.begin(), col_ids.begin(), data.begin()));
auto last = thrust::make_zip_iterator(thrust::make_tuple(row_ids.end(), col_ids.end(), data.end()));
thrust::device_vector<int> pos_row_ids(row_ids.size());
thrust::device_vector<int> pos_col_ids(col_ids.size());
thrust::device_vector<float> pos_data(data.size());
auto first_pos = thrust::make_zip_iterator(thrust::make_tuple(pos_row_ids.begin(), pos_col_ids.begin(), pos_data.begin()));
rep_row_ids = thrust::device_vector<int>(row_ids.size());
rep_col_ids = thrust::device_vector<int>(col_ids.size());
auto first_rep = thrust::make_zip_iterator(thrust::make_tuple(rep_row_ids.begin(), rep_col_ids.begin(), thrust::make_discard_iterator()));
auto ends = thrust::partition_copy(first, last, first_pos, first_rep, is_positive_edge());
const int num_positive = std::distance(first_pos, ends.first);
if (num_positive == 0)
return;
pos_row_ids.resize(num_positive);
pos_col_ids.resize(num_positive);
pos_data.resize(num_positive);
const int num_negative = std::distance(first_rep, ends.second);
rep_row_ids.resize(num_negative);
rep_col_ids.resize(num_negative);
// 2. Compute maximum spanning tree in attractive edges.
std::tie(mst_row_ids, mst_col_ids, mst_data) = MST_boruvka::maximum_spanning_tree(pos_row_ids, pos_col_ids, pos_data);
std::tie(mst_row_ids, mst_col_ids, mst_data) = to_undirected(mst_row_ids, mst_col_ids, mst_data);
coo_sorting(mst_row_ids, mst_col_ids, mst_data);
}
std::tuple<thrust::device_vector<int>, int> edge_contractions_woc_thrust::find_contraction_mapping()
{
MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME;
if (mst_row_ids.size() == 0)
return {thrust::device_vector<int>(0), 0};
std::cout<<"# MST edges "<<mst_row_ids.size()<<", # Repulsive edges "<<rep_row_ids.size()<<"\n";
row_frontier = frontier(rep_row_ids);
col_frontier = frontier(rep_col_ids);
int num_rep_valid = filter_by_cc();
int itr = 0;
while(num_rep_valid > 0 && mst_row_ids.size() > 0)
{
// filter odd length conflicted cycles:
std::cout<<"Conflicted cycle removal MST, Iteration: "<<itr<<", # MST edges "<<mst_row_ids.size()<<", # Repulsive edges "<<num_rep_valid<<"\n";
expand_frontier(row_frontier);
bool any_removed = filter_cycles();
if (any_removed)
num_rep_valid = filter_by_cc();
if (num_rep_valid == 0 || mst_row_ids.size() == 0)
break;
// filter even length conflicted cycles:
std::cout<<"Conflicted cycle removal MST, Iteration: "<<itr<<", # MST edges "<<mst_row_ids.size()<<", # Repulsive edges "<<num_rep_valid<<"\n";
expand_frontier(col_frontier);
any_removed = filter_cycles();
if (any_removed)
num_rep_valid = filter_by_cc();
itr++;
}
thrust::device_vector<int> node_mapping = compress_label_sequence(cc_labels, cc_labels.size() - 1);
int nr_ccs = *thrust::max_element(node_mapping.begin(), node_mapping.end()) + 1;
std::cout<<"Found conflict-free contraction mapping with: "<<nr_ccs<<" connected components\n";
assert(nr_ccs < num_nodes);
return {node_mapping, mst_row_ids.size()};
} |
45215b37a3684eb5a85f27c64e97ecddbd4302be.hip | // !!! This is a file automatically generated by hipify!!!
/* Includes, system */
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define N 1024
/* DEVICE CODE */
__global__ void suma_2_enteros(int *d1, int *d2, int *sum){
int idBloque = blockIdx.y * gridDim.x + blockIdx.x;
int idThread = idBloque * blockDim.z * blockDim.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
sum[idThread] = d1[idThread] + d2[idThread];
}
/* HOST CODE*/
int main(int argc, char** argv)
{
int DeviceCount = 0,i;
int *h_d1,*h_d2,*h_sum;
int *d_d1,*d_d2,*d_sum;
dim3 dimGrid(8,2);
dim3 dimBlock(8,4,2);
h_d1 = (int*)malloc(N * sizeof(h_d1[0]));
h_d2 = (int*)malloc(N * sizeof(h_d2[0]));
h_sum = (int*)malloc(N * sizeof(h_sum[0]));
for (i=0;i<N;i++){h_d1[i]=i;h_d2[i]=10*i;h_sum[i]=0;}
/* Initialize CUDA */
if (hipInit(0) != 0){
printf("ERROR de inicializacion\n");
exit(0);
}
hipGetDeviceCount(&DeviceCount);
if (DeviceCount == 0){
printf("ERROR ningun dispositivo soporta CUDA\n");
exit(0);
}
hipMalloc((void**)&d_d1,N*sizeof(d_d1));hipMemset(d_d1,0,N*sizeof(d_d1));
hipMalloc((void**)&d_d2,N*sizeof(d_d2));hipMemset(d_d2,0,N*sizeof(d_d2));
hipMalloc((void**)&d_sum,N*sizeof(d_sum));hipMemset(d_sum,0,N*sizeof(d_sum));
hipMemcpy(d_d1,h_d1,N*sizeof(h_d1[0]),hipMemcpyHostToDevice);
hipMemcpy(d_d2,h_d2,N*sizeof(h_d2[0]),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( suma_2_enteros), dim3(dimGrid),dim3(dimBlock), 0, 0, d_d1,d_d2,d_sum);
hipMemcpy(h_sum,d_sum,N*sizeof(h_sum[0]),hipMemcpyDeviceToHost);
for (i=510;i<520;i++) printf("Resultado: %d \n",h_sum[i]);
hipFree(d_d1);hipFree(d_d2);hipFree(d_sum);
}
| 45215b37a3684eb5a85f27c64e97ecddbd4302be.cu |
/* Includes, system */
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 1024
/* DEVICE CODE */
__global__ void suma_2_enteros(int *d1, int *d2, int *sum){
int idBloque = blockIdx.y * gridDim.x + blockIdx.x;
int idThread = idBloque * blockDim.z * blockDim.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
sum[idThread] = d1[idThread] + d2[idThread];
}
/* HOST CODE*/
int main(int argc, char** argv)
{
int DeviceCount = 0,i;
int *h_d1,*h_d2,*h_sum;
int *d_d1,*d_d2,*d_sum;
dim3 dimGrid(8,2);
dim3 dimBlock(8,4,2);
h_d1 = (int*)malloc(N * sizeof(h_d1[0]));
h_d2 = (int*)malloc(N * sizeof(h_d2[0]));
h_sum = (int*)malloc(N * sizeof(h_sum[0]));
for (i=0;i<N;i++){h_d1[i]=i;h_d2[i]=10*i;h_sum[i]=0;}
/* Initialize CUDA */
if (cuInit(0) != 0){
printf("ERROR de inicializacion\n");
exit(0);
}
cuDeviceGetCount(&DeviceCount);
if (DeviceCount == 0){
printf("ERROR ningun dispositivo soporta CUDA\n");
exit(0);
}
cudaMalloc((void**)&d_d1,N*sizeof(d_d1));cudaMemset(d_d1,0,N*sizeof(d_d1));
cudaMalloc((void**)&d_d2,N*sizeof(d_d2));cudaMemset(d_d2,0,N*sizeof(d_d2));
cudaMalloc((void**)&d_sum,N*sizeof(d_sum));cudaMemset(d_sum,0,N*sizeof(d_sum));
cudaMemcpy(d_d1,h_d1,N*sizeof(h_d1[0]),cudaMemcpyHostToDevice);
cudaMemcpy(d_d2,h_d2,N*sizeof(h_d2[0]),cudaMemcpyHostToDevice);
suma_2_enteros<<<dimGrid,dimBlock>>>(d_d1,d_d2,d_sum);
cudaMemcpy(h_sum,d_sum,N*sizeof(h_sum[0]),cudaMemcpyDeviceToHost);
for (i=510;i<520;i++) printf("Resultado: %d \n",h_sum[i]);
cudaFree(d_d1);cudaFree(d_d2);cudaFree(d_sum);
}
|
416d51afbf8d6af43b48e3bbede58af492d82aad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/cudf_test_fixtures.h>
#include <hash/concurrent_unordered_map.cuh>
#include <groupby/aggregation_operations.hpp>
#include <cudf.h>
#include <thrust/device_vector.h>
#include <rmm/thrust_rmm_allocator.h>
#include <gtest/gtest.h>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <random>
#include <cstdlib>
// This is necessary to do a parametrized typed-test over multiple template arguments
template <typename Key, typename Value, template <typename> typename Aggregation_Operator>
struct KeyValueTypes
{
using key_type = Key;
using value_type = Value;
using op_type = Aggregation_Operator<value_type>;
};
// A new instance of this class will be created for each *TEST(MapTest, ...)
// Put all repeated stuff for each test here
template <class T>
struct MapTest : public GdfTest
{
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using op_type = typename T::op_type;
using map_type = concurrent_unordered_map<key_type, value_type, std::numeric_limits<key_type>::max()>;
using pair_type = thrust::pair<key_type, value_type>;
std::unique_ptr<map_type> the_map;
const key_type unused_key = std::numeric_limits<key_type>::max();
const value_type unused_value = op_type::IDENTITY;
const int size;
const int THREAD_BLOCK_SIZE{256};
std::vector<thrust::pair<key_type,value_type>> pairs;
rmm::device_vector<pair_type> d_pairs;
std::unordered_map<key_type, value_type> expected_values;
MapTest(const int hash_table_size = 10000)
: size(hash_table_size), the_map(new map_type(hash_table_size, op_type::IDENTITY))
{
}
pair_type * create_input(const int num_unique_keys, const int num_values_per_key, const int ratio = 2, const int max_key = RAND_MAX, const int max_value = RAND_MAX, bool shuffle = false)
{
const int TOTAL_PAIRS = num_unique_keys * num_values_per_key;
this->the_map.reset(new map_type(ratio*TOTAL_PAIRS, unused_value));
pairs.reserve(TOTAL_PAIRS);
// Always use the same seed so the random sequence is the same each time
std::srand(0);
for(int i = 0; i < num_unique_keys; ++i )
{
// Create random key
key_type current_key = std::rand() % max_key;
// Don't use unused_key
while(current_key == this->unused_key)
{
current_key = std::rand();
}
// For the current key, generate random values
for(int j = 0; j < num_values_per_key; ++j)
{
value_type current_value = std::rand() % max_value;
// Don't use unused_value
while(current_value == this->unused_value)
{
current_value = std::rand();
}
// Store current key and value
pairs.push_back(std::make_pair(current_key, current_value));
// Use a STL map to keep track of the max value for each key
auto found = expected_values.find(current_key);
// Key doesn't exist yet, insert it
if(found == expected_values.end())
{
expected_values.insert(std::make_pair(current_key,current_value));
}
// Key exists, update the value with the operator
else
{
op_type op;
value_type new_value = op(found->second, current_value);
found->second = new_value;
}
}
}
if(shuffle == true)
std::random_shuffle(pairs.begin(), pairs.end());
d_pairs = pairs;
return thrust::raw_pointer_cast(d_pairs.data());
}
void check_answer(){
for(auto const &k : this->expected_values)
{
key_type test_key = k.first;
value_type expected_value = k.second;
auto found = this->the_map->find(test_key);
ASSERT_NE(this->the_map->end(), found);
value_type test_value = found->second;
EXPECT_EQ(expected_value, test_value) << "Key is: " << test_key;
}
}
~MapTest(){
}
};
// Google Test can only do a parameterized typed-test over a single type, so we have
// to nest multiple types inside of the KeyValueTypes struct above
// KeyValueTypes<type1, type2> implies key_type = type1, value_type = type2
// This list is the types across which Google Test will run our tests
typedef ::testing::Types< KeyValueTypes<int, int, max_op>,
KeyValueTypes<int, float, max_op>,
KeyValueTypes<int, double, max_op>,
KeyValueTypes<int, long long int, max_op>,
KeyValueTypes<int, unsigned long long int, max_op>,
KeyValueTypes<unsigned long long int, int, max_op>,
KeyValueTypes<unsigned long long int, float, max_op>,
KeyValueTypes<unsigned long long int, double, max_op>,
KeyValueTypes<unsigned long long int, long long int, max_op>,
KeyValueTypes<unsigned long long int, unsigned long long int, max_op>,
KeyValueTypes<int, int, min_op>,
KeyValueTypes<int, float, min_op>,
KeyValueTypes<int, double, min_op>,
KeyValueTypes<int, long long int, min_op>,
KeyValueTypes<int, unsigned long long int, min_op>,
KeyValueTypes<unsigned long long int, int, min_op>,
KeyValueTypes<unsigned long long int, float, min_op>,
KeyValueTypes<unsigned long long int, double, min_op>,
KeyValueTypes<unsigned long long int, long long int, min_op>,
KeyValueTypes<unsigned long long int, unsigned long long int, min_op>
> Implementations;
TYPED_TEST_CASE(MapTest, Implementations);
TYPED_TEST(MapTest, InitialState)
{
using key_type = typename TypeParam::key_type;
using value_type = typename TypeParam::value_type;
auto begin = this->the_map->begin();
auto end = this->the_map->end();
EXPECT_NE(begin,end);
}
TYPED_TEST(MapTest, CheckUnusedValues){
EXPECT_EQ(this->the_map->get_unused_key(), this->unused_key);
auto begin = this->the_map->begin();
EXPECT_EQ(begin->first, this->unused_key);
EXPECT_EQ(begin->second, this->unused_value);
}
template<typename map_type, typename Aggregation_Operator>
__global__ void build_table(map_type * const the_map,
const typename map_type::value_type * const input_pairs,
const typename map_type::size_type input_size,
Aggregation_Operator op)
{
using size_type = typename map_type::size_type;
size_type i = threadIdx.x + blockIdx.x * blockDim.x;
while( i < input_size ){
the_map->insert(input_pairs[i], op);
i += blockDim.x * gridDim.x;
}
}
TYPED_TEST(MapTest, AggregationTestDeviceAllSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1, 1<<20);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( build_table), dim3(grid_size), dim3(block_size), 0, 0, (this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
hipDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceAllUnique)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<18, 1);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( build_table), dim3(grid_size), dim3(block_size), 0, 0, (this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
hipDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceWarpSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<15, 32);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( build_table), dim3(grid_size), dim3(block_size), 0, 0, (this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
hipDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceBlockSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<12, this->THREAD_BLOCK_SIZE);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( build_table), dim3(grid_size), dim3(block_size), 0, 0, (this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
hipDeviceSynchronize();
this->check_answer();
}
int main(int argc, char * argv[]){
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 416d51afbf8d6af43b48e3bbede58af492d82aad.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/cudf_test_fixtures.h>
#include <hash/concurrent_unordered_map.cuh>
#include <groupby/aggregation_operations.hpp>
#include <cudf.h>
#include <thrust/device_vector.h>
#include <rmm/thrust_rmm_allocator.h>
#include <gtest/gtest.h>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <random>
#include <cstdlib>
// This is necessary to do a parametrized typed-test over multiple template arguments
template <typename Key, typename Value, template <typename> typename Aggregation_Operator>
struct KeyValueTypes
{
using key_type = Key;
using value_type = Value;
using op_type = Aggregation_Operator<value_type>;
};
// A new instance of this class will be created for each *TEST(MapTest, ...)
// Put all repeated stuff for each test here
template <class T>
struct MapTest : public GdfTest
{
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using op_type = typename T::op_type;
using map_type = concurrent_unordered_map<key_type, value_type, std::numeric_limits<key_type>::max()>;
using pair_type = thrust::pair<key_type, value_type>;
std::unique_ptr<map_type> the_map;
const key_type unused_key = std::numeric_limits<key_type>::max();
const value_type unused_value = op_type::IDENTITY;
const int size;
const int THREAD_BLOCK_SIZE{256};
std::vector<thrust::pair<key_type,value_type>> pairs;
rmm::device_vector<pair_type> d_pairs;
std::unordered_map<key_type, value_type> expected_values;
MapTest(const int hash_table_size = 10000)
: size(hash_table_size), the_map(new map_type(hash_table_size, op_type::IDENTITY))
{
}
pair_type * create_input(const int num_unique_keys, const int num_values_per_key, const int ratio = 2, const int max_key = RAND_MAX, const int max_value = RAND_MAX, bool shuffle = false)
{
const int TOTAL_PAIRS = num_unique_keys * num_values_per_key;
this->the_map.reset(new map_type(ratio*TOTAL_PAIRS, unused_value));
pairs.reserve(TOTAL_PAIRS);
// Always use the same seed so the random sequence is the same each time
std::srand(0);
for(int i = 0; i < num_unique_keys; ++i )
{
// Create random key
key_type current_key = std::rand() % max_key;
// Don't use unused_key
while(current_key == this->unused_key)
{
current_key = std::rand();
}
// For the current key, generate random values
for(int j = 0; j < num_values_per_key; ++j)
{
value_type current_value = std::rand() % max_value;
// Don't use unused_value
while(current_value == this->unused_value)
{
current_value = std::rand();
}
// Store current key and value
pairs.push_back(std::make_pair(current_key, current_value));
// Use a STL map to keep track of the max value for each key
auto found = expected_values.find(current_key);
// Key doesn't exist yet, insert it
if(found == expected_values.end())
{
expected_values.insert(std::make_pair(current_key,current_value));
}
// Key exists, update the value with the operator
else
{
op_type op;
value_type new_value = op(found->second, current_value);
found->second = new_value;
}
}
}
if(shuffle == true)
std::random_shuffle(pairs.begin(), pairs.end());
d_pairs = pairs;
return thrust::raw_pointer_cast(d_pairs.data());
}
void check_answer(){
for(auto const &k : this->expected_values)
{
key_type test_key = k.first;
value_type expected_value = k.second;
auto found = this->the_map->find(test_key);
ASSERT_NE(this->the_map->end(), found);
value_type test_value = found->second;
EXPECT_EQ(expected_value, test_value) << "Key is: " << test_key;
}
}
~MapTest(){
}
};
// Google Test can only do a parameterized typed-test over a single type, so we have
// to nest multiple types inside of the KeyValueTypes struct above
// KeyValueTypes<type1, type2> implies key_type = type1, value_type = type2
// This list is the types across which Google Test will run our tests
typedef ::testing::Types< KeyValueTypes<int, int, max_op>,
KeyValueTypes<int, float, max_op>,
KeyValueTypes<int, double, max_op>,
KeyValueTypes<int, long long int, max_op>,
KeyValueTypes<int, unsigned long long int, max_op>,
KeyValueTypes<unsigned long long int, int, max_op>,
KeyValueTypes<unsigned long long int, float, max_op>,
KeyValueTypes<unsigned long long int, double, max_op>,
KeyValueTypes<unsigned long long int, long long int, max_op>,
KeyValueTypes<unsigned long long int, unsigned long long int, max_op>,
KeyValueTypes<int, int, min_op>,
KeyValueTypes<int, float, min_op>,
KeyValueTypes<int, double, min_op>,
KeyValueTypes<int, long long int, min_op>,
KeyValueTypes<int, unsigned long long int, min_op>,
KeyValueTypes<unsigned long long int, int, min_op>,
KeyValueTypes<unsigned long long int, float, min_op>,
KeyValueTypes<unsigned long long int, double, min_op>,
KeyValueTypes<unsigned long long int, long long int, min_op>,
KeyValueTypes<unsigned long long int, unsigned long long int, min_op>
> Implementations;
TYPED_TEST_CASE(MapTest, Implementations);
TYPED_TEST(MapTest, InitialState)
{
using key_type = typename TypeParam::key_type;
using value_type = typename TypeParam::value_type;
auto begin = this->the_map->begin();
auto end = this->the_map->end();
EXPECT_NE(begin,end);
}
TYPED_TEST(MapTest, CheckUnusedValues){
EXPECT_EQ(this->the_map->get_unused_key(), this->unused_key);
auto begin = this->the_map->begin();
EXPECT_EQ(begin->first, this->unused_key);
EXPECT_EQ(begin->second, this->unused_value);
}
template<typename map_type, typename Aggregation_Operator>
__global__ void build_table(map_type * const the_map,
const typename map_type::value_type * const input_pairs,
const typename map_type::size_type input_size,
Aggregation_Operator op)
{
using size_type = typename map_type::size_type;
size_type i = threadIdx.x + blockIdx.x * blockDim.x;
while( i < input_size ){
the_map->insert(input_pairs[i], op);
i += blockDim.x * gridDim.x;
}
}
TYPED_TEST(MapTest, AggregationTestDeviceAllSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1, 1<<20);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
cudaDeviceSynchronize();
build_table<<<grid_size, block_size>>>((this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
cudaDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceAllUnique)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<18, 1);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
cudaDeviceSynchronize();
build_table<<<grid_size, block_size>>>((this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
cudaDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceWarpSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<15, 32);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
cudaDeviceSynchronize();
build_table<<<grid_size, block_size>>>((this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
cudaDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceBlockSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<12, this->THREAD_BLOCK_SIZE);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
cudaDeviceSynchronize();
build_table<<<grid_size, block_size>>>((this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
cudaDeviceSynchronize();
this->check_answer();
}
int main(int argc, char * argv[]){
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
aefe113a73f947b63a6e5829f8d33e517669d400.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void copy_buffer_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
} | aefe113a73f947b63a6e5829f8d33e517669d400.cu | #include "includes.h"
__global__ void copy_buffer_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
} |
b38c513d4486bc190394801fcca52cbecd65ba25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// auction.cu
#ifndef MAIN_AUCTION
#define MAIN_AUCTION
#include <iostream>
#include <string>
#include <fstream>
// #include <chrono>
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
// --
// Define constants
#ifndef __RUN_VARS
#define __RUN_VARS
#define MAX_NODES 20000 // Dimension of problem
#define AUCTION_MAX_EPS 1.0 // Larger values mean solution is more approximate
#define AUCTION_MIN_EPS 1.0
#define AUCTION_FACTOR 0.0
#define NUM_RUNS 10
// Uncomment to run dense version
// #define DENSE
#endif
#include "topdot.cpp"
#ifdef DENSE
#include "auction_kernel_dense.cu"
#else
#include "auction_kernel_csr.cu"
#endif
int load_data(float *raw_data) {
std::ifstream input_file("graph", std::ios_base::in);
std::cerr << "load_data: start" << std::endl;
int i = 0;
float val;
while(input_file >> val) {
raw_data[i] = val;
i++;
if(i > MAX_NODES * MAX_NODES) {
std::cerr << "load_data: ERROR -- data file too large" << std::endl;
return -1;
}
}
std::cerr << "load_data: finish" << std::endl;
return (int)sqrt(i);
}
extern "C" {
int run_auction(
int num_nodes,
int num_edges,
float* h_data, // data
int* h_offsets, // offsets for items
int* h_columns,
int* h_person2item, // results
float auction_max_eps,
float auction_min_eps,
float auction_factor,
int num_runs,
int verbose
)
{
// --
// CUDA options
dim3 threadsPerBlock(512, 1, 1);
dim3 blocksPerGrid(ceil(num_nodes / (double) threadsPerBlock.x), 1, 1);
// --
// Declare variables
float* d_data;
int* d_offsets;
int* d_columns;
int* d_person2item;
int* d_item2person;
float* d_bids;
float* d_prices;
int* d_bidders; // unused
int* d_sbids;
int h_numAssign;
int* d_numAssign = 0;
float* d_rand;
// --
// Allocate device memory
hipMalloc((void **)&d_data, num_edges * sizeof(float));
hipMalloc((void **)&d_columns, num_edges * sizeof(float));
hipMalloc((void **)&d_offsets, (num_nodes + 1) * sizeof(int));
hipMalloc((void **)&d_person2item, num_nodes * sizeof(int));
hipMalloc((void **)&d_item2person, num_nodes * sizeof(int));
hipMalloc((void **)&d_bids, num_nodes * num_nodes * sizeof(float));
hipMalloc((void **)&d_prices, num_nodes * sizeof(float));
hipMalloc((void **)&d_bidders, num_nodes * num_nodes * sizeof(int)); // unused
hipMalloc((void **)&d_sbids, num_nodes * sizeof(int));
hipMalloc((void **)&d_numAssign, 1 * sizeof(int)) ;
hipMalloc((void **)&d_rand, num_nodes * num_nodes * sizeof(float)) ;
// --
// Copy from host to device
hipMemcpy(d_data, h_data, num_edges * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_columns, h_columns, num_edges * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_offsets, h_offsets, (num_nodes + 1) * sizeof(int), hipMemcpyHostToDevice);
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, 123);
hiprandGenerateUniform(gen, d_rand, num_nodes * num_nodes);
for(int run_num = 0; run_num < num_runs; run_num++) {
hipMemset(d_prices, 0.0, num_nodes * sizeof(float));
// Start timer
hipEvent_t start, stop;
float milliseconds = 0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float auction_eps = auction_max_eps;
while(auction_eps >= auction_min_eps) {
h_numAssign = 0;
hipMemset(d_bidders, 0, num_nodes * num_nodes * sizeof(int)); // unused
hipMemset(d_person2item, -1, num_nodes * sizeof(int));
hipMemset(d_item2person, -1, num_nodes * sizeof(int));
hipMemset(d_numAssign, 0, 1 * sizeof(int));
hipDeviceSynchronize();
int counter = 0;
while(h_numAssign < num_nodes){
counter += 1;
hipMemset(d_bids, 0, num_nodes * num_nodes * sizeof(float));
hipMemset(d_sbids, 0, num_nodes * sizeof(int));
hipDeviceSynchronize();
hipLaunchKernelGGL(( run_bidding), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
num_nodes,
d_data,
d_offsets,
d_columns,
d_person2item,
d_bids,
d_bidders,
d_sbids,
d_prices,
auction_eps,
d_rand
);
hipLaunchKernelGGL(( run_assignment), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
num_nodes,
d_person2item,
d_item2person,
d_bids,
d_bidders,
d_sbids,
d_prices,
d_numAssign
);
hipDeviceSynchronize();
hipMemcpy(&h_numAssign, d_numAssign, sizeof(int) * 1, hipMemcpyDeviceToHost);
// std::cerr << "h_numAssign=" << h_numAssign << std::endl;
}
if(verbose) {
std::cerr << "counter=" << counter << std::endl;
}
auction_eps *= auction_factor;
}
hipDeviceSynchronize();
// Stop timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
if(verbose) {
std::cerr <<
"run_num=" << run_num <<
" | h_numAssign=" << h_numAssign <<
" | milliseconds=" << milliseconds << std::endl;
}
hipDeviceSynchronize();
}
// Read out results
hipMemcpy(h_person2item, d_person2item, sizeof(int) * num_nodes, hipMemcpyDeviceToHost);
hipFree(d_data);
hipFree(d_columns);
hipFree(d_offsets);
hipFree(d_person2item);
hipFree(d_item2person);
hipFree(d_bids);
hipFree(d_prices);
hipFree(d_bidders);
hipFree(d_sbids);
hipFree(d_numAssign);
hipFree(d_rand);
return 0;
} // end run_auction
int run_auction_python(
int num_nodes,
int num_edges,
float* h_data, // data
int* h_offsets, // offsets for items
int* h_columns,
int* h_person2item, // results
float auction_max_eps,
float auction_min_eps,
float auction_factor,
int num_runs,
int verbose
) {
hipEvent_t auction_start, auction_stop;
float milliseconds = 0;
hipEventCreate(&auction_start);
hipEventCreate(&auction_stop);
hipEventRecord(auction_start, 0);
run_auction(
num_nodes,
num_edges,
h_data, // data
h_offsets, // offsets for items
h_columns,
h_person2item, // results
auction_max_eps,
auction_min_eps,
auction_factor,
num_runs,
0
);
hipEventRecord(auction_stop, 0);
hipEventSynchronize(auction_stop);
hipEventElapsedTime(&milliseconds, auction_start, auction_stop);
hipEventDestroy(auction_start);
hipEventDestroy(auction_stop);
if(verbose > 0) {
std::cerr << "run_auction " << milliseconds << std::endl;
}
return 0;
}
int dot_auction(
int num_nodes,
int *Ap, int *Aj, double *Ax,
int *Bp, int *Bj, double *Bx,
int k,
int *h_person2item,
int verbose
) {
// std::chrono::high_resolution_clock::time_point topdot_start = std::chrono::high_resolution_clock::now();
int* h_columns = (int *)malloc(sizeof(int) * num_nodes * k);
double* h_data_d = (double *)malloc(sizeof(double) * num_nodes * k);
float* h_data = (float *)malloc(sizeof(float) * num_nodes * k);
int* h_offsets = (int *)malloc(sizeof(int) * num_nodes + 1);
_topdot(num_nodes, num_nodes, Ap, Aj, Ax, Bp, Bj, Bx, k, -1, h_columns, h_data_d);
h_offsets[0] = 0;
for(int i = 1; i < num_nodes + 1; i++) {
h_offsets[i] = i * k;
}
for(int i = 0; i < num_nodes * k; i++) {
h_data[i] = (float)h_data_d[i];
if(verbose > 1) {
std::cerr << h_columns[i] << ":" << h_data[i] << " ";
if((i + 1) % k == 0) {
std::cerr << std::endl;
}
}
}
free(h_data_d);
// // Stop timer
// std::chrono::high_resolution_clock::time_point topdot_stop = std::chrono::high_resolution_clock::now();
// std::chrono::duration<double> time_span =
// std::chrono::duration_cast<std::chrono::duration<double>>(topdot_stop - topdot_start);
// if(verbose > 0) {
// std::cerr << "topdot " << 1000 * time_span.count() << std::endl;
// }
// --
// Auction algorithm
hipEvent_t auction_start, auction_stop;
float milliseconds = 0;
hipEventCreate(&auction_start);
hipEventCreate(&auction_stop);
hipEventRecord(auction_start, 0);
run_auction(
(int)num_nodes,
(int)num_nodes * k,
h_data,
h_offsets,
h_columns,
h_person2item,
(float)1.0,
(float)1.0,
(float)0.0,
(int)1, // 1 run
(int)0 // not verbose
);
hipEventRecord(auction_stop, 0);
hipEventSynchronize(auction_stop);
hipEventElapsedTime(&milliseconds, auction_start, auction_stop);
hipEventDestroy(auction_start);
hipEventDestroy(auction_stop);
if(verbose > 0) {
std::cerr << "run_auction " << milliseconds << std::endl;
}
free(h_columns);
free(h_data);
free(h_offsets);
return 0;
} // end dot_auction
} // end extern
void init_device() {
// Init devices
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
if(deviceCount<1){
printf("There is no device detected.\n");
exit(1);
}
int device=0;
hipDeviceProp_t deviceProp;
for (device = 0; device < deviceCount; ++device) {
if(hipGetDeviceProperties(&deviceProp, device) == hipSuccess) {
if(deviceProp.major >= 1) {
break;
}
}
}
if(device == deviceCount) {
printf("There is no device supporting CUDA.\n");
exit(1);
}
hipSetDevice(device);
}
int main(int argc, char **argv)
{
#ifdef DENSE
std::cerr << "auction_kernel_dense.cu" << std::endl;
#else
std::cerr << "auction_kernel_csr.cu" << std::endl;
#endif
init_device();
// Load data
float* raw_data = (float *)malloc(sizeof(float) * MAX_NODES * MAX_NODES);
int num_nodes = load_data(raw_data);
int num_edges = num_nodes * num_nodes;
if(num_nodes <= 0) {
return 1;
}
float* h_data = (float *)realloc(raw_data, sizeof(float) * num_nodes * num_nodes);
// Dense
int* h_offsets = (int *)malloc(sizeof(int) * num_nodes + 1);
h_offsets[0] = 0;
for(int i = 1; i < num_nodes + 1; i++) {
h_offsets[i] = i * num_nodes;
}
int* h_columns = (int *)malloc(sizeof(int) * num_edges);
for(int i = 0; i < num_edges; i++) {
h_columns[i] = i % num_nodes;
}
int* h_person2item = (int *)malloc(sizeof(int) * num_nodes);
int verbose = 1;
run_auction(
num_nodes,
num_edges,
h_data,
h_offsets,
h_columns,
h_person2item,
AUCTION_MAX_EPS,
AUCTION_MIN_EPS,
AUCTION_FACTOR,
NUM_RUNS,
verbose
);
// Print results
float score = 0;
for (int i = 0; i < num_nodes; i++) {
std::cout << i << " " << h_person2item[i] << std::endl;
// score += h_data[i + num_nodes * h_person2item[i]];
score += h_data[i * num_nodes + h_person2item[i]];
}
std::cerr << "score=" << (int)score << std::endl;
free(h_data);
free(h_offsets);
free(h_columns);
free(h_person2item);
}
#endif
| b38c513d4486bc190394801fcca52cbecd65ba25.cu | // auction.cu
#ifndef MAIN_AUCTION
#define MAIN_AUCTION
#include <iostream>
#include <string>
#include <fstream>
// #include <chrono>
#include <stdio.h>
#include <stdlib.h>
#include <curand.h>
#include <curand_kernel.h>
// --
// Define constants
#ifndef __RUN_VARS
#define __RUN_VARS
#define MAX_NODES 20000 // Dimension of problem
#define AUCTION_MAX_EPS 1.0 // Larger values mean solution is more approximate
#define AUCTION_MIN_EPS 1.0
#define AUCTION_FACTOR 0.0
#define NUM_RUNS 10
// Uncomment to run dense version
// #define DENSE
#endif
#include "topdot.cpp"
#ifdef DENSE
#include "auction_kernel_dense.cu"
#else
#include "auction_kernel_csr.cu"
#endif
int load_data(float *raw_data) {
std::ifstream input_file("graph", std::ios_base::in);
std::cerr << "load_data: start" << std::endl;
int i = 0;
float val;
while(input_file >> val) {
raw_data[i] = val;
i++;
if(i > MAX_NODES * MAX_NODES) {
std::cerr << "load_data: ERROR -- data file too large" << std::endl;
return -1;
}
}
std::cerr << "load_data: finish" << std::endl;
return (int)sqrt(i);
}
extern "C" {
int run_auction(
int num_nodes,
int num_edges,
float* h_data, // data
int* h_offsets, // offsets for items
int* h_columns,
int* h_person2item, // results
float auction_max_eps,
float auction_min_eps,
float auction_factor,
int num_runs,
int verbose
)
{
// --
// CUDA options
dim3 threadsPerBlock(512, 1, 1);
dim3 blocksPerGrid(ceil(num_nodes / (double) threadsPerBlock.x), 1, 1);
// --
// Declare variables
float* d_data;
int* d_offsets;
int* d_columns;
int* d_person2item;
int* d_item2person;
float* d_bids;
float* d_prices;
int* d_bidders; // unused
int* d_sbids;
int h_numAssign;
int* d_numAssign = 0;
float* d_rand;
// --
// Allocate device memory
cudaMalloc((void **)&d_data, num_edges * sizeof(float));
cudaMalloc((void **)&d_columns, num_edges * sizeof(float));
cudaMalloc((void **)&d_offsets, (num_nodes + 1) * sizeof(int));
cudaMalloc((void **)&d_person2item, num_nodes * sizeof(int));
cudaMalloc((void **)&d_item2person, num_nodes * sizeof(int));
cudaMalloc((void **)&d_bids, num_nodes * num_nodes * sizeof(float));
cudaMalloc((void **)&d_prices, num_nodes * sizeof(float));
cudaMalloc((void **)&d_bidders, num_nodes * num_nodes * sizeof(int)); // unused
cudaMalloc((void **)&d_sbids, num_nodes * sizeof(int));
cudaMalloc((void **)&d_numAssign, 1 * sizeof(int)) ;
cudaMalloc((void **)&d_rand, num_nodes * num_nodes * sizeof(float)) ;
// --
// Copy from host to device
cudaMemcpy(d_data, h_data, num_edges * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_columns, h_columns, num_edges * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_offsets, h_offsets, (num_nodes + 1) * sizeof(int), cudaMemcpyHostToDevice);
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 123);
curandGenerateUniform(gen, d_rand, num_nodes * num_nodes);
for(int run_num = 0; run_num < num_runs; run_num++) {
cudaMemset(d_prices, 0.0, num_nodes * sizeof(float));
// Start timer
cudaEvent_t start, stop;
float milliseconds = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float auction_eps = auction_max_eps;
while(auction_eps >= auction_min_eps) {
h_numAssign = 0;
cudaMemset(d_bidders, 0, num_nodes * num_nodes * sizeof(int)); // unused
cudaMemset(d_person2item, -1, num_nodes * sizeof(int));
cudaMemset(d_item2person, -1, num_nodes * sizeof(int));
cudaMemset(d_numAssign, 0, 1 * sizeof(int));
cudaThreadSynchronize();
int counter = 0;
while(h_numAssign < num_nodes){
counter += 1;
cudaMemset(d_bids, 0, num_nodes * num_nodes * sizeof(float));
cudaMemset(d_sbids, 0, num_nodes * sizeof(int));
cudaThreadSynchronize();
run_bidding<<<blocksPerGrid, threadsPerBlock>>>(
num_nodes,
d_data,
d_offsets,
d_columns,
d_person2item,
d_bids,
d_bidders,
d_sbids,
d_prices,
auction_eps,
d_rand
);
run_assignment<<<blocksPerGrid, threadsPerBlock>>>(
num_nodes,
d_person2item,
d_item2person,
d_bids,
d_bidders,
d_sbids,
d_prices,
d_numAssign
);
cudaThreadSynchronize();
cudaMemcpy(&h_numAssign, d_numAssign, sizeof(int) * 1, cudaMemcpyDeviceToHost);
// std::cerr << "h_numAssign=" << h_numAssign << std::endl;
}
if(verbose) {
std::cerr << "counter=" << counter << std::endl;
}
auction_eps *= auction_factor;
}
cudaThreadSynchronize();
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
if(verbose) {
std::cerr <<
"run_num=" << run_num <<
" | h_numAssign=" << h_numAssign <<
" | milliseconds=" << milliseconds << std::endl;
}
cudaThreadSynchronize();
}
// Read out results
cudaMemcpy(h_person2item, d_person2item, sizeof(int) * num_nodes, cudaMemcpyDeviceToHost);
cudaFree(d_data);
cudaFree(d_columns);
cudaFree(d_offsets);
cudaFree(d_person2item);
cudaFree(d_item2person);
cudaFree(d_bids);
cudaFree(d_prices);
cudaFree(d_bidders);
cudaFree(d_sbids);
cudaFree(d_numAssign);
cudaFree(d_rand);
return 0;
} // end run_auction
int run_auction_python(
int num_nodes,
int num_edges,
float* h_data, // data
int* h_offsets, // offsets for items
int* h_columns,
int* h_person2item, // results
float auction_max_eps,
float auction_min_eps,
float auction_factor,
int num_runs,
int verbose
) {
cudaEvent_t auction_start, auction_stop;
float milliseconds = 0;
cudaEventCreate(&auction_start);
cudaEventCreate(&auction_stop);
cudaEventRecord(auction_start, 0);
run_auction(
num_nodes,
num_edges,
h_data, // data
h_offsets, // offsets for items
h_columns,
h_person2item, // results
auction_max_eps,
auction_min_eps,
auction_factor,
num_runs,
0
);
cudaEventRecord(auction_stop, 0);
cudaEventSynchronize(auction_stop);
cudaEventElapsedTime(&milliseconds, auction_start, auction_stop);
cudaEventDestroy(auction_start);
cudaEventDestroy(auction_stop);
if(verbose > 0) {
std::cerr << "run_auction " << milliseconds << std::endl;
}
return 0;
}
int dot_auction(
int num_nodes,
int *Ap, int *Aj, double *Ax,
int *Bp, int *Bj, double *Bx,
int k,
int *h_person2item,
int verbose
) {
// std::chrono::high_resolution_clock::time_point topdot_start = std::chrono::high_resolution_clock::now();
int* h_columns = (int *)malloc(sizeof(int) * num_nodes * k);
double* h_data_d = (double *)malloc(sizeof(double) * num_nodes * k);
float* h_data = (float *)malloc(sizeof(float) * num_nodes * k);
int* h_offsets = (int *)malloc(sizeof(int) * num_nodes + 1);
_topdot(num_nodes, num_nodes, Ap, Aj, Ax, Bp, Bj, Bx, k, -1, h_columns, h_data_d);
h_offsets[0] = 0;
for(int i = 1; i < num_nodes + 1; i++) {
h_offsets[i] = i * k;
}
for(int i = 0; i < num_nodes * k; i++) {
h_data[i] = (float)h_data_d[i];
if(verbose > 1) {
std::cerr << h_columns[i] << ":" << h_data[i] << " ";
if((i + 1) % k == 0) {
std::cerr << std::endl;
}
}
}
free(h_data_d);
// // Stop timer
// std::chrono::high_resolution_clock::time_point topdot_stop = std::chrono::high_resolution_clock::now();
// std::chrono::duration<double> time_span =
// std::chrono::duration_cast<std::chrono::duration<double>>(topdot_stop - topdot_start);
// if(verbose > 0) {
// std::cerr << "topdot " << 1000 * time_span.count() << std::endl;
// }
// --
// Auction algorithm
cudaEvent_t auction_start, auction_stop;
float milliseconds = 0;
cudaEventCreate(&auction_start);
cudaEventCreate(&auction_stop);
cudaEventRecord(auction_start, 0);
run_auction(
(int)num_nodes,
(int)num_nodes * k,
h_data,
h_offsets,
h_columns,
h_person2item,
(float)1.0,
(float)1.0,
(float)0.0,
(int)1, // 1 run
(int)0 // not verbose
);
cudaEventRecord(auction_stop, 0);
cudaEventSynchronize(auction_stop);
cudaEventElapsedTime(&milliseconds, auction_start, auction_stop);
cudaEventDestroy(auction_start);
cudaEventDestroy(auction_stop);
if(verbose > 0) {
std::cerr << "run_auction " << milliseconds << std::endl;
}
free(h_columns);
free(h_data);
free(h_offsets);
return 0;
} // end dot_auction
} // end extern
void init_device() {
// Init devices
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if(deviceCount<1){
printf("There is no device detected.\n");
exit(1);
}
int device=0;
cudaDeviceProp deviceProp;
for (device = 0; device < deviceCount; ++device) {
if(cudaGetDeviceProperties(&deviceProp, device) == cudaSuccess) {
if(deviceProp.major >= 1) {
break;
}
}
}
if(device == deviceCount) {
printf("There is no device supporting CUDA.\n");
exit(1);
}
cudaSetDevice(device);
}
int main(int argc, char **argv)
{
#ifdef DENSE
std::cerr << "auction_kernel_dense.cu" << std::endl;
#else
std::cerr << "auction_kernel_csr.cu" << std::endl;
#endif
init_device();
// Load data
float* raw_data = (float *)malloc(sizeof(float) * MAX_NODES * MAX_NODES);
int num_nodes = load_data(raw_data);
int num_edges = num_nodes * num_nodes;
if(num_nodes <= 0) {
return 1;
}
float* h_data = (float *)realloc(raw_data, sizeof(float) * num_nodes * num_nodes);
// Dense
int* h_offsets = (int *)malloc(sizeof(int) * num_nodes + 1);
h_offsets[0] = 0;
for(int i = 1; i < num_nodes + 1; i++) {
h_offsets[i] = i * num_nodes;
}
int* h_columns = (int *)malloc(sizeof(int) * num_edges);
for(int i = 0; i < num_edges; i++) {
h_columns[i] = i % num_nodes;
}
int* h_person2item = (int *)malloc(sizeof(int) * num_nodes);
int verbose = 1;
run_auction(
num_nodes,
num_edges,
h_data,
h_offsets,
h_columns,
h_person2item,
AUCTION_MAX_EPS,
AUCTION_MIN_EPS,
AUCTION_FACTOR,
NUM_RUNS,
verbose
);
// Print results
float score = 0;
for (int i = 0; i < num_nodes; i++) {
std::cout << i << " " << h_person2item[i] << std::endl;
// score += h_data[i + num_nodes * h_person2item[i]];
score += h_data[i * num_nodes + h_person2item[i]];
}
std::cerr << "score=" << (int)score << std::endl;
free(h_data);
free(h_offsets);
free(h_columns);
free(h_person2item);
}
#endif
|
da438df72ee14785b6c94d102c02de60e2e21d68.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
> File Name: 02threadadd.cu
> Author: dong xu
> Mail: [email protected]
> Created Time: 20160330 130338
************************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| da438df72ee14785b6c94d102c02de60e2e21d68.cu | /*************************************************************************
> File Name: 02threadadd.cu
> Author: dong xu
> Mail: [email protected]
> Created Time: 2016年03月30日 星期三 13时03分38秒
************************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaThreadExit must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
5266724997a9e57b94dfdc789ecdab6e0a8223d7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
/* Cublas Works in Column Major and 1 based indexing :: TAKE CARE OF THIS*/
void sgemv(char Trans, int M, int N, float Alpha, float* A, float * X, float*Y , float Beta) {
float * d_A, *d_X, *d_Y;
hipblasHandle_t handle;
hipblasCreate(&handle);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMalloc((void**) &d_A, sizeof(float) * M*N);
hipMemcpy(d_A, A , sizeof(float) * M*N , hipMemcpyHostToDevice);
if(Trans=='n' || Trans == 'N') {
hipMalloc((void**) &d_X, sizeof(float) *N);
hipMemcpy(d_X, X , sizeof(float) *N , hipMemcpyHostToDevice);
hipMalloc((void**) &d_Y, sizeof(float) * M);
hipMemcpy(d_Y, Y , sizeof(float) * M , hipMemcpyHostToDevice);
} else {
hipMalloc((void**) &d_X, sizeof(float) * M);
hipMemcpy(d_X, X , sizeof(float) * M , hipMemcpyHostToDevice);
hipMalloc((void**) &d_Y, sizeof(float) * N);
hipMemcpy(d_Y, Y , sizeof(float) *N , hipMemcpyHostToDevice);
}
if (Trans == 'N') {
hipblasSgemv(
handle, HIPBLAS_OP_N,
M, N,
&Alpha,
d_A, M,
d_X, 1,
&Beta,
d_Y, 1
);
} else {
hipblasSgemv(handle, HIPBLAS_OP_T, M, N, &Alpha, d_A, M, d_X, 1,&Beta, d_Y, 1 );
}
/* Copy Memory Back to Host */
if (Trans == 'n' || Trans == 'N') {
hipMemcpy(Y, d_Y, sizeof(float) * M , hipMemcpyDeviceToHost);
} else {
hipMemcpy(Y, d_Y, sizeof(float) * N, hipMemcpyDeviceToHost);
}
/* Free Device Memory*/
hipFree(d_A);
hipFree(d_X);
hipFree(d_Y);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float timeElapsed;
hipEventElapsedTime (&timeElapsed, start, stop);
printf("Time Elapsed : %f ms\n", timeElapsed);
}
int main(int argc , char * argv[]) {
char Trans;
int M, N;
float Alpha; float Beta;
float* A; // Matrix A
float * X;
float * Y;
const int INCY = 1;
/*
Test Cases are to be input
*/
scanf("%c %d %d %f %f",&Trans, &M, &N, &Alpha, &Beta );
hipHostMalloc((void**) &A, sizeof(float) * M*N);
int y_size, x_size;
if (Trans == 'N' || Trans == 'n') {
hipHostMalloc((void**) &X, sizeof(float)*N);
hipHostMalloc((void**) &Y, sizeof(float)*M);
y_size = M;
x_size = N;
} else {
hipHostMalloc((void**) &X, sizeof(float)*M);
hipHostMalloc((void**) &Y, sizeof(float)*N);
y_size = N;
x_size = M;
}
for (int i= 0; i<M;i++) {
for (int j = 0; j<N;j++) {
scanf("%f", A + j*M + i);
}
}
for (int i= 0; i<x_size;i++) {
scanf("%f", X + i);
}
for (int i= 0; i<y_size;i++) {
scanf("%f", Y + i);
}
/* Cublas Call */
sgemv(Trans, M, N , Alpha, A,X,Y,Beta);
/* Display Output */
FILE * fp;
fp = fopen("Results/sgemv_cublas_last.txt", "w");
for(int i =0; i<y_size;i++) {
fprintf(fp,"%lf ", Y[i]);
}
fclose(fp);
}
| 5266724997a9e57b94dfdc789ecdab6e0a8223d7.cu | #include <stdio.h>
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#include <cuda.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
/* Cublas Works in Column Major and 1 based indexing :: TAKE CARE OF THIS*/
void sgemv(char Trans, int M, int N, float Alpha, float* A, float * X, float*Y , float Beta) {
float * d_A, *d_X, *d_Y;
cublasHandle_t handle;
cublasCreate_v2(&handle);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**) &d_A, sizeof(float) * M*N);
cudaMemcpy(d_A, A , sizeof(float) * M*N , cudaMemcpyHostToDevice);
if(Trans=='n' || Trans == 'N') {
cudaMalloc((void**) &d_X, sizeof(float) *N);
cudaMemcpy(d_X, X , sizeof(float) *N , cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_Y, sizeof(float) * M);
cudaMemcpy(d_Y, Y , sizeof(float) * M , cudaMemcpyHostToDevice);
} else {
cudaMalloc((void**) &d_X, sizeof(float) * M);
cudaMemcpy(d_X, X , sizeof(float) * M , cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_Y, sizeof(float) * N);
cudaMemcpy(d_Y, Y , sizeof(float) *N , cudaMemcpyHostToDevice);
}
if (Trans == 'N') {
cublasSgemv(
handle, CUBLAS_OP_N,
M, N,
&Alpha,
d_A, M,
d_X, 1,
&Beta,
d_Y, 1
);
} else {
cublasSgemv(handle, CUBLAS_OP_T, M, N, &Alpha, d_A, M, d_X, 1,&Beta, d_Y, 1 );
}
/* Copy Memory Back to Host */
if (Trans == 'n' || Trans == 'N') {
cudaMemcpy(Y, d_Y, sizeof(float) * M , cudaMemcpyDeviceToHost);
} else {
cudaMemcpy(Y, d_Y, sizeof(float) * N, cudaMemcpyDeviceToHost);
}
/* Free Device Memory*/
cudaFree(d_A);
cudaFree(d_X);
cudaFree(d_Y);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float timeElapsed;
cudaEventElapsedTime (&timeElapsed, start, stop);
printf("Time Elapsed : %f ms\n", timeElapsed);
}
int main(int argc , char * argv[]) {
char Trans;
int M, N;
float Alpha; float Beta;
float* A; // Matrix A
float * X;
float * Y;
const int INCY = 1;
/*
Test Cases are to be input
*/
scanf("%c %d %d %f %f",&Trans, &M, &N, &Alpha, &Beta );
cudaMallocHost((void**) &A, sizeof(float) * M*N);
int y_size, x_size;
if (Trans == 'N' || Trans == 'n') {
cudaMallocHost((void**) &X, sizeof(float)*N);
cudaMallocHost((void**) &Y, sizeof(float)*M);
y_size = M;
x_size = N;
} else {
cudaMallocHost((void**) &X, sizeof(float)*M);
cudaMallocHost((void**) &Y, sizeof(float)*N);
y_size = N;
x_size = M;
}
for (int i= 0; i<M;i++) {
for (int j = 0; j<N;j++) {
scanf("%f", A + j*M + i);
}
}
for (int i= 0; i<x_size;i++) {
scanf("%f", X + i);
}
for (int i= 0; i<y_size;i++) {
scanf("%f", Y + i);
}
/* Cublas Call */
sgemv(Trans, M, N , Alpha, A,X,Y,Beta);
/* Display Output */
FILE * fp;
fp = fopen("Results/sgemv_cublas_last.txt", "w");
for(int i =0; i<y_size;i++) {
fprintf(fp,"%lf ", Y[i]);
}
fclose(fp);
}
|
6a5cc12dcd7e513aa1b252c5a88c8f154ec33d5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialDilatedMaxPooling.cu"
#else
#include "../common.h"
static inline void THNN_(SpatialDilatedMaxPooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices,
int kH, int kW, int dH, int dW, int padH, int padW,
int dilationH, int dilationW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THArgCheck(dilationH > 0 && dilationW > 0, 12,
"dilation should be greater than zero, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
int batchSize = 1;
if (ndim == 4) {
batchSize = input->size[0];
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size[dimh-1];
int64_t nInputRows = input->size[dimh];
int64_t nInputCols = input->size[dimw];
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
if (indices != NULL) {
THCUNN_check_dim_size_indices(state, indices, 4, 0, batchSize);
THCUNN_check_dim_size_indices(state, indices, 4, 1, nOutputPlane);
THCUNN_check_dim_size_indices(state, indices, 4, 2, nOutputRows);
THCUNN_check_dim_size_indices(state, indices, 4, 3, nOutputCols);
}
}
void THNN_(SpatialDilatedMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, NULL, NULL, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->dim() == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
real* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCUNN_resizeAs_indices(state, indices, output);
THCIndex_t* indices_data = THCIndexTensor_(data)(state, indices);
real* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
hipLaunchKernelGGL(( MaxPoolForward<real, accreal>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data);
THCudaCheck(hipGetLastError());
if(input->dim() == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialDilatedMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, gradOutput, indices, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->_dim() == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
dim3 grid;
int imgcount = nInputCols * nInputRows;
const int blocks = (imgcount + BACKWARD_THREADS - 1) / BACKWARD_THREADS;
grid.x = blocks;
grid.y = batchSize;
grid.z = nInputPlane;
uint64_t maxGridY = THCState_getCurrentDeviceProperties(state)->maxGridSize[1];
uint64_t maxGridZ = THCState_getCurrentDeviceProperties(state)->maxGridSize[2];
if (maxGridY < grid.y) grid.y = maxGridY;
if (maxGridZ < grid.z) grid.z = maxGridZ;
hipLaunchKernelGGL(( MaxPoolBackward<real, accreal>) , dim3(grid), dim3(BACKWARD_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCTensor_(data)(state, gradOutput),
THCIndexTensor_(data)(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
THCTensor_(data)(state, gradInput));
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, gradOutput);
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
| 6a5cc12dcd7e513aa1b252c5a88c8f154ec33d5c.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialDilatedMaxPooling.cu"
#else
#include "../common.h"
static inline void THNN_(SpatialDilatedMaxPooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices,
int kH, int kW, int dH, int dW, int padH, int padW,
int dilationH, int dilationW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THArgCheck(dilationH > 0 && dilationW > 0, 12,
"dilation should be greater than zero, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
int batchSize = 1;
if (ndim == 4) {
batchSize = input->size[0];
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size[dimh-1];
int64_t nInputRows = input->size[dimh];
int64_t nInputCols = input->size[dimw];
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
if (indices != NULL) {
THCUNN_check_dim_size_indices(state, indices, 4, 0, batchSize);
THCUNN_check_dim_size_indices(state, indices, 4, 1, nOutputPlane);
THCUNN_check_dim_size_indices(state, indices, 4, 2, nOutputRows);
THCUNN_check_dim_size_indices(state, indices, 4, 3, nOutputCols);
}
}
void THNN_(SpatialDilatedMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, NULL, NULL, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->dim() == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
real* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCUNN_resizeAs_indices(state, indices, output);
THCIndex_t* indices_data = THCIndexTensor_(data)(state, indices);
real* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
MaxPoolForward<real, accreal> <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data);
THCudaCheck(cudaGetLastError());
if(input->dim() == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialDilatedMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, gradOutput, indices, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->_dim() == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
dim3 grid;
int imgcount = nInputCols * nInputRows;
const int blocks = (imgcount + BACKWARD_THREADS - 1) / BACKWARD_THREADS;
grid.x = blocks;
grid.y = batchSize;
grid.z = nInputPlane;
uint64_t maxGridY = THCState_getCurrentDeviceProperties(state)->maxGridSize[1];
uint64_t maxGridZ = THCState_getCurrentDeviceProperties(state)->maxGridSize[2];
if (maxGridY < grid.y) grid.y = maxGridY;
if (maxGridZ < grid.z) grid.z = maxGridZ;
MaxPoolBackward<real, accreal> <<< grid, BACKWARD_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCTensor_(data)(state, gradOutput),
THCIndexTensor_(data)(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
THCTensor_(data)(state, gradInput));
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, gradOutput);
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
|
c987e29528482cfc993ab1f91c3740c62116c72f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
void forward_network_gpu(network net)
{
int i;
for(i = 0; i < net.n; ++i){
net.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_gpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, net);
net.input_gpu = l.output_gpu;
net.input = l.output;
if(l.truth) {
net.truth_gpu = l.output_gpu;
net.truth = l.output;
}
}
pull_network_output(net);
calc_network_cost(net);
}
void backward_network_gpu(network net)
{
int i;
network orig = net;
for(i = net.n-1; i >= 0; --i){
layer l = net.layers[i];
if(l.stopbackward) break;
if(i == 0){
net = orig;
}else{
layer prev = net.layers[i-1];
net.input = prev.output;
net.delta = prev.delta;
net.input_gpu = prev.output_gpu;
net.delta_gpu = prev.delta_gpu;
}
net.index = i;
l.backward_gpu(l, net);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
update_args a = {0};
a.batch = net.batch*net.subdivisions;
a.learning_rate = get_current_rate(net);
a.momentum = net.momentum;
a.decay = net.decay;
a.adam = net.adam;
a.B1 = net.B1;
a.B2 = net.B2;
a.eps = net.eps;
++*net.t;
a.t = (*net.t);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
if(l.update_gpu){
l.update_gpu(l, a);
}
}
}
void harmless_update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
if(l.weight_updates_gpu) fill_gpu(l.nweights, 0, l.weight_updates_gpu, 1);
if(l.bias_updates_gpu) fill_gpu(l.nbiases, 0, l.bias_updates_gpu, 1);
if(l.scale_updates_gpu) fill_gpu(l.nbiases, 0, l.scale_updates_gpu, 1);
}
}
float train_network_datum_gpu(network net)
{
*net.seen += net.batch;
int x_size = net.inputs*net.batch;
int y_size = net.truths*net.batch;
cuda_push_array(net.input_gpu, net.input, x_size);
cuda_push_array(net.truth_gpu, net.truth, y_size);
net.train = 1;
forward_network_gpu(net);
backward_network_gpu(net);
float error = *net.cost;
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weights_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weights_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL) {
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if (base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if (l.type == CONNECTED) {
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
/*
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate*l.learning_rate_scale, net.momentum, net.decay);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
*/
/*
void sync_layer(network *nets, int n, int j)
{
int i;
network net = nets[0];
layer base = net.layers[j];
scale_weights(base, 0);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
}
*/
void sync_layer(network *nets, int n, int j)
{
int i;
network net = nets[0];
layer base = net.layers[j];
scale_weights(base, 0);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//hipDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//hipDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
void pull_network_output(network net)
{
layer l = get_network_output_layer(net);
cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
cuda_push_array(net.input_gpu, input, net.inputs*net.batch);
net.truth = 0;
net.train = 0;
forward_network_gpu(net);
return net.output;
}
| c987e29528482cfc993ab1f91c3740c62116c72f.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
void forward_network_gpu(network net)
{
int i;
for(i = 0; i < net.n; ++i){
net.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_gpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, net);
net.input_gpu = l.output_gpu;
net.input = l.output;
if(l.truth) {
net.truth_gpu = l.output_gpu;
net.truth = l.output;
}
}
pull_network_output(net);
calc_network_cost(net);
}
void backward_network_gpu(network net)
{
int i;
network orig = net;
for(i = net.n-1; i >= 0; --i){
layer l = net.layers[i];
if(l.stopbackward) break;
if(i == 0){
net = orig;
}else{
layer prev = net.layers[i-1];
net.input = prev.output;
net.delta = prev.delta;
net.input_gpu = prev.output_gpu;
net.delta_gpu = prev.delta_gpu;
}
net.index = i;
l.backward_gpu(l, net);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
update_args a = {0};
a.batch = net.batch*net.subdivisions;
a.learning_rate = get_current_rate(net);
a.momentum = net.momentum;
a.decay = net.decay;
a.adam = net.adam;
a.B1 = net.B1;
a.B2 = net.B2;
a.eps = net.eps;
++*net.t;
a.t = (*net.t);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
if(l.update_gpu){
l.update_gpu(l, a);
}
}
}
void harmless_update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
if(l.weight_updates_gpu) fill_gpu(l.nweights, 0, l.weight_updates_gpu, 1);
if(l.bias_updates_gpu) fill_gpu(l.nbiases, 0, l.bias_updates_gpu, 1);
if(l.scale_updates_gpu) fill_gpu(l.nbiases, 0, l.scale_updates_gpu, 1);
}
}
float train_network_datum_gpu(network net)
{
*net.seen += net.batch;
int x_size = net.inputs*net.batch;
int y_size = net.truths*net.batch;
cuda_push_array(net.input_gpu, net.input, x_size);
cuda_push_array(net.truth_gpu, net.truth, y_size);
net.train = 1;
forward_network_gpu(net);
backward_network_gpu(net);
float error = *net.cost;
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weights_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weights_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL) {
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if (base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if (l.type == CONNECTED) {
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
/*
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate*l.learning_rate_scale, net.momentum, net.decay);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
*/
/*
void sync_layer(network *nets, int n, int j)
{
int i;
network net = nets[0];
layer base = net.layers[j];
scale_weights(base, 0);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
}
*/
void sync_layer(network *nets, int n, int j)
{
int i;
network net = nets[0];
layer base = net.layers[j];
scale_weights(base, 0);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//cudaDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//cudaDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
void pull_network_output(network net)
{
layer l = get_network_output_layer(net);
cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
cuda_push_array(net.input_gpu, input, net.inputs*net.batch);
net.truth = 0;
net.train = 0;
forward_network_gpu(net);
return net.output;
}
|
17a8551b854825a3bbc60e1cf19325f8b9ed0d50.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "upscale.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
long no_elements = 1;
int scale_factor_t = 2;
int scale_factor_xy = 2;
int d1 = 2;
int d2 = 2;
int d3 = 2;
int d4 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
upscale), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,no_elements,scale_factor_t,scale_factor_xy,d1,d2,d3,d4);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
upscale), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,no_elements,scale_factor_t,scale_factor_xy,d1,d2,d3,d4);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
upscale), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,no_elements,scale_factor_t,scale_factor_xy,d1,d2,d3,d4);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 17a8551b854825a3bbc60e1cf19325f8b9ed0d50.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "upscale.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
long no_elements = 1;
int scale_factor_t = 2;
int scale_factor_xy = 2;
int d1 = 2;
int d2 = 2;
int d3 = 2;
int d4 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
upscale<<<gridBlock,threadBlock>>>(input,output,no_elements,scale_factor_t,scale_factor_xy,d1,d2,d3,d4);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
upscale<<<gridBlock,threadBlock>>>(input,output,no_elements,scale_factor_t,scale_factor_xy,d1,d2,d3,d4);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
upscale<<<gridBlock,threadBlock>>>(input,output,no_elements,scale_factor_t,scale_factor_xy,d1,d2,d3,d4);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
375b2e2cb95927bf93d6714d04d0fc8ddf0937e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "lab2.h"
#include "cmath"
#include "stdlib.h"
#include <iostream>
#include "PerlinNoise.h"
#include <random>
#include <algorithm>
#include <time.h>
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 240;
PerlinNoise::PerlinNoise() {
// Initialize the permutation vector with the reference values
p = {
151,160,137,91,90,15,131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,
8,99,37,240,21,10,23,190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,
35,11,32,57,177,33,88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,
134,139,48,27,166,77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,
55,46,245,40,244,102,143,54, 65,25,63,161,1,216,80,73,209,76,132,187,208, 89,
18,169,200,196,135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,
250,124,123,5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,
189,28,42,223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167,
43,172,9,129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,
97,228,251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,
107,49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180 };
// Duplicate the permutation vector
p.insert(p.end(), p.begin(), p.end());
}
PerlinNoise::PerlinNoise(unsigned int seed) {
p.resize(256);
// Fill p with values from 0 to 255
int indx = 0;
std::generate(p.begin(), p.end(), [&indx] {return indx++;});
// Initialize a random engine with seed
std::default_random_engine engine(seed);
// Shuffle using the above random engine
std::shuffle(p.begin(), p.end(), engine);
// Duplicate the permutation vector
p.insert(p.end(), p.begin(), p.end());
}
double PerlinNoise::noise(double x, double y, double z) {
// Find the unit cube that contains the point
int X = (int) floor(x) & 255;
int Y = (int) floor(y) & 255;
int Z = (int) floor(z) & 255;
// Find relative x, y,z of point in cube
x -= floor(x);
y -= floor(y);
z -= floor(z);
// Compute fade curves for each of x, y, z
double u = fade(x);
double v = fade(y);
double w = fade(z);
// Hash coordinates of the 8 cube corners
int A = p[X] + Y;
int AA = p[A] + Z;
int AB = p[A + 1] + Z;
int B = p[X + 1] + Y;
int BA = p[B] + Z;
int BB = p[B + 1] + Z;
// Add blended results from 8 corners of cube
double res = lerp(w, lerp(v, lerp(u, grad(p[AA], x, y, z), grad(p[BA], x-1, y, z)), lerp(u, grad(p[AB], x, y-1, z), grad(p[BB], x-1, y-1, z))), lerp(v, lerp(u, grad(p[AA+1], x, y, z-1), grad(p[BA+1], x-1, y, z-1)), lerp(u, grad(p[AB+1], x, y-1, z-1), grad(p[BB+1], x-1, y-1, z-1))));
return (res + 1.0)/2.0;
}
double PerlinNoise::fade(double t) {
return t * t * t * (t * (t * 6 - 15) + 10);
}
double PerlinNoise::lerp(double t, double a, double b) {
return a + t * (b - a);
}
double PerlinNoise::grad(int hash, double x, double y, double z) {
int h = hash & 15;
// Convert lower 4 bits of hash into 12 gradient directions
double u = h < 8 ? x : y,
v = h < 4 ? y : h == 12 || h == 14 ? x : z;
return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
}
struct Lab2VideoGenerator::Impl {
int t = 0;
};
Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) {
}
Lab2VideoGenerator::~Lab2VideoGenerator() {}
void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
void Lab2VideoGenerator::Generate(uint8_t *yuv) {
unsigned int seed = rand() % 255;
PerlinNoise pn(seed);
hipMemset(yuv, (impl->t)*255/NFRAME, W*H);
int i, j ;
for(i=0;i<W*H/2;i++){
// j = i % 256; // purple and green
// j = i % 128; // purple and green
double x = (double)i/((double)W);
double y = (double)i/((double)H);
double n = 20 * pn.noise(x,y,0.8);
n = n - floor(n);
//double n = pn.noise(10 * i, 10 * i, 0.8);
hipMemset(yuv+W*H+i, floor(n*255), 1);
}
//hipMemset(yuv+W*H, n, W*H/2); //
//hipMemset(yuv+W*H, 128, W*H/2); //
++(impl->t);
}
| 375b2e2cb95927bf93d6714d04d0fc8ddf0937e1.cu | #include "lab2.h"
#include "cmath"
#include "stdlib.h"
#include <iostream>
#include "PerlinNoise.h"
#include <random>
#include <algorithm>
#include <time.h>
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 240;
PerlinNoise::PerlinNoise() {
// Initialize the permutation vector with the reference values
p = {
151,160,137,91,90,15,131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,
8,99,37,240,21,10,23,190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,
35,11,32,57,177,33,88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,
134,139,48,27,166,77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,
55,46,245,40,244,102,143,54, 65,25,63,161,1,216,80,73,209,76,132,187,208, 89,
18,169,200,196,135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,
250,124,123,5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,
189,28,42,223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167,
43,172,9,129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,
97,228,251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,
107,49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180 };
// Duplicate the permutation vector
p.insert(p.end(), p.begin(), p.end());
}
PerlinNoise::PerlinNoise(unsigned int seed) {
p.resize(256);
// Fill p with values from 0 to 255
int indx = 0;
std::generate(p.begin(), p.end(), [&indx] {return indx++;});
// Initialize a random engine with seed
std::default_random_engine engine(seed);
// Shuffle using the above random engine
std::shuffle(p.begin(), p.end(), engine);
// Duplicate the permutation vector
p.insert(p.end(), p.begin(), p.end());
}
double PerlinNoise::noise(double x, double y, double z) {
// Find the unit cube that contains the point
int X = (int) floor(x) & 255;
int Y = (int) floor(y) & 255;
int Z = (int) floor(z) & 255;
// Find relative x, y,z of point in cube
x -= floor(x);
y -= floor(y);
z -= floor(z);
// Compute fade curves for each of x, y, z
double u = fade(x);
double v = fade(y);
double w = fade(z);
// Hash coordinates of the 8 cube corners
int A = p[X] + Y;
int AA = p[A] + Z;
int AB = p[A + 1] + Z;
int B = p[X + 1] + Y;
int BA = p[B] + Z;
int BB = p[B + 1] + Z;
// Add blended results from 8 corners of cube
double res = lerp(w, lerp(v, lerp(u, grad(p[AA], x, y, z), grad(p[BA], x-1, y, z)), lerp(u, grad(p[AB], x, y-1, z), grad(p[BB], x-1, y-1, z))), lerp(v, lerp(u, grad(p[AA+1], x, y, z-1), grad(p[BA+1], x-1, y, z-1)), lerp(u, grad(p[AB+1], x, y-1, z-1), grad(p[BB+1], x-1, y-1, z-1))));
return (res + 1.0)/2.0;
}
double PerlinNoise::fade(double t) {
return t * t * t * (t * (t * 6 - 15) + 10);
}
double PerlinNoise::lerp(double t, double a, double b) {
return a + t * (b - a);
}
double PerlinNoise::grad(int hash, double x, double y, double z) {
int h = hash & 15;
// Convert lower 4 bits of hash into 12 gradient directions
double u = h < 8 ? x : y,
v = h < 4 ? y : h == 12 || h == 14 ? x : z;
return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
}
struct Lab2VideoGenerator::Impl {
int t = 0;
};
Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) {
}
Lab2VideoGenerator::~Lab2VideoGenerator() {}
void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
void Lab2VideoGenerator::Generate(uint8_t *yuv) {
unsigned int seed = rand() % 255;
PerlinNoise pn(seed);
cudaMemset(yuv, (impl->t)*255/NFRAME, W*H);
int i, j ;
for(i=0;i<W*H/2;i++){
// j = i % 256; // purple and green
// j = i % 128; // purple and green
double x = (double)i/((double)W);
double y = (double)i/((double)H);
double n = 20 * pn.noise(x,y,0.8);
n = n - floor(n);
//double n = pn.noise(10 * i, 10 * i, 0.8);
cudaMemset(yuv+W*H+i, floor(n*255), 1);
}
//cudaMemset(yuv+W*H, n, W*H/2); // 從綠變灰變粉紅
//cudaMemset(yuv+W*H, 128, W*H/2); // 灰階
++(impl->t);
}
|
26df554b6400a474cf83ec71d86e76489cf25268.hip | // !!! This is a file automatically generated by hipify!!!
/*! @file fdtd.cu
@brief This is the entry point of the file.
*/
#include "fdtd.h"
#include "hip/hip_runtime.h"
#include "cpu_anim.h"
#include "helper_cuda.h"
#include "helper_functions.h"
#include "h5save.h"
#include<stdio.h>
#include<pthread.h>
#include "datablock.h"
#include "kernels_hip.cuh"
#include "constants.h"
#include <thrust/fill.h>
#include<algorithm>
#include "tm_mode.h"
#include "pml_mode.h"
#include "drude_mode.h"
#include<fstream>
#include<assert.h>
#include<string>
#include "common_functions.h"
using namespace std;
pthread_mutex_t mutexcopy;
/** @brief Calls the gpu kernels in order.
* Different types of simulation.
*/
void anim_gpu(Datablock *d, int ticks){
if(d->simulationType == TM_SIMULATION)
anim_gpu_tm(d, ticks);
else if(d->simulationType == TM_PML_SIMULATION)
anim_gpu_pml_tm(d, ticks);
else if(d->simulationType == DRUDE_SIMULATION)
anim_gpu_drude(d, ticks);
}
/*! @brief Clears memory when the simulation is done. */
void anim_exit(Datablock *d){
if(d->simulationType == TM_SIMULATION)
clear_memory_TM_simulation(d);
else if(d->simulationType == TM_PML_SIMULATION)
clear_memory_TM_PML_simulation(d);
else if(d->simulationType == DRUDE_SIMULATION)
clear_memory_drude_simulation(d);
}
/*! @brief Allocates memory for the simulation depending on the type
of the simulation.
*/
size_t allocate_memory(Datablock *data, Structure structure){
if(data->simulationType == TM_SIMULATION)
return allocateTMMemory(data, structure);
else if(data->simulationType == TM_PML_SIMULATION)
return tm_pml_allocate_memory(data, structure);
else if(data->simulationType == DRUDE_SIMULATION)
return allocate_drude_memory(data, structure);
return 0;
}
/*! @brief Initializes the memory for simulation.*/
void initializeArrays(Datablock *data, Structure structure, ifstream &fs){
if(data->simulationType == TM_SIMULATION)
initialize_TM_arrays(data, structure, fs);
else if(data->simulationType == TM_PML_SIMULATION)
tm_pml_initialize_arrays(data, structure, fs);
else if(data->simulationType == DRUDE_SIMULATION)
initialize_drude_arrays(data, structure);
}
/*! @brief Clears all the constants initially declared.
This method is called once all the coefficients are
calculated.
*/
void clear_memory_constants(Datablock *data){
if(data->simulationType == TM_SIMULATION)
tm_clear_memory_constants(data);
else if(data->simulationType == TM_PML_SIMULATION)
tm_pml_clear_memory_constants(data);
else if(data->simulationType == DRUDE_SIMULATION)
drude_clear_memory_constants(data);
}
/*!
@brief Calculates the coefficients for each simulation.
*/
void calculate_coefficients(Datablock *data, Structure structure){
dim3 blocks((structure.x_index_dim + BLOCKSIZE_X - 1) / BLOCKSIZE_X,
(structure.y_index_dim + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y);
dim3 threads(BLOCKSIZE_X, BLOCKSIZE_Y);
if(data->simulationType == TM_SIMULATION)
hipLaunchKernelGGL(( tm_getcoeff), dim3(blocks), dim3(threads), 0, 0, data->constants[MUINDEX],
data->constants[EPSINDEX],
data->constants[SIGMAINDEX],
data->constants[SIGMA_STAR_INDEX],
data->coefs[0],
data->coefs[1],
data->coefs[2],
data->coefs[3]
);
else if(data->simulationType == TM_PML_SIMULATION)
hipLaunchKernelGGL(( pml_tm_get_coefs), dim3(blocks), dim3(threads), 0, 0, data->constants[MUINDEX],
data->constants[EPSINDEX],
data->constants[SIGMAINDEX_X],
data->constants[SIGMAINDEX_Y],
data->constants[SIGMA_STAR_INDEX_X],
data->constants[SIGMA_STAR_INDEX_Y],
data->coefs[0],
data->coefs[1],
data->coefs[2],
data->coefs[3],
data->coefs[4],
data->coefs[5],
data->coefs[6],
data->coefs[7]);
else if(data->simulationType == DRUDE_SIMULATION)
hipLaunchKernelGGL(( drude_get_coefs), dim3(blocks), dim3(threads), 0, 0, data->constants[MUINDEX],
data->constants[EPSINDEX],
data->constants[SIGMAINDEX],
data->constants[SIGMA_STAR_INDEX],
data->constants[GAMMA_INDEX],
data->constants[OMEGAP_INDEX],
data->coefs[0],
data->coefs[1],
data->coefs[2],
data->coefs[3],
data->coefs[4],
data->coefs[5],
data->coefs[6]
);
}
/*! @brief entry point */
int main(int argc, char **argv){
assert(argc == 2);
ifstream fs;
fs.open(argv[1]);
assert(fs.is_open());
string simulation_name;
fs>>simulation_name;
int simulation_type;
fs>>simulation_type;
Datablock data(simulation_type);
data.simulation_name = simulation_name;
float dx;
fs>>dx;
float courant = 0.5;
float dt = courant * dx / LIGHTSPEED;
printf("dt = %f", dt);
int xdim, ydim;
fs>>xdim>>ydim;
Structure structure(xdim, ydim, dx, dt);
CPUAnimBitmap bitmap(structure.x_index_dim, structure.x_index_dim,
&data); /* bitmap structure */
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
checkCudaErrors(hipEventCreate(&data.start, 1) );
checkCudaErrors(hipEventCreate(&data.stop, 1) );
size_t pitch;
pitch = allocate_memory(&data, structure);
structure.pitch = pitch;
copy_symbols(&structure);
printf("pitch = %d", pitch);
data.structure = &structure;
initializeArrays(&data, structure, fs);
// get the coefficients
calculate_coefficients(&data, structure);
clear_memory_constants(&data);
// set the sources
HostSources host_sources;
DeviceSources device_sources;
long long x, y, source_type;
float mean, variance;
while(!fs.eof()){
fs>>x>>y>>source_type>>mean>>variance;
cout<<mean<<endl;
host_sources.add_source(x, y, source_type, mean, variance);
}
data.sources = &device_sources;
copy_sources_device_to_host(&host_sources, &device_sources);
pthread_mutex_init(&mutexcopy, NULL);
for(long i=0; i < 3; i++){
anim_gpu(&data, 0);
}
}
| 26df554b6400a474cf83ec71d86e76489cf25268.cu | /*! @file fdtd.cu
@brief This is the entry point of the file.
*/
#include "fdtd.h"
#include "cuda.h"
#include "cpu_anim.h"
#include "helper_cuda.h"
#include "helper_functions.h"
#include "h5save.h"
#include<stdio.h>
#include<pthread.h>
#include "datablock.h"
#include "kernels.cuh"
#include "constants.h"
#include <thrust/fill.h>
#include<algorithm>
#include "tm_mode.h"
#include "pml_mode.h"
#include "drude_mode.h"
#include<fstream>
#include<assert.h>
#include<string>
#include "common_functions.h"
using namespace std;
pthread_mutex_t mutexcopy;
/** @brief Calls the gpu kernels in order.
* Different types of simulation.
*/
void anim_gpu(Datablock *d, int ticks){
if(d->simulationType == TM_SIMULATION)
anim_gpu_tm(d, ticks);
else if(d->simulationType == TM_PML_SIMULATION)
anim_gpu_pml_tm(d, ticks);
else if(d->simulationType == DRUDE_SIMULATION)
anim_gpu_drude(d, ticks);
}
/*! @brief Clears memory when the simulation is done. */
void anim_exit(Datablock *d){
if(d->simulationType == TM_SIMULATION)
clear_memory_TM_simulation(d);
else if(d->simulationType == TM_PML_SIMULATION)
clear_memory_TM_PML_simulation(d);
else if(d->simulationType == DRUDE_SIMULATION)
clear_memory_drude_simulation(d);
}
/*! @brief Allocates memory for the simulation depending on the type
of the simulation.
*/
size_t allocate_memory(Datablock *data, Structure structure){
if(data->simulationType == TM_SIMULATION)
return allocateTMMemory(data, structure);
else if(data->simulationType == TM_PML_SIMULATION)
return tm_pml_allocate_memory(data, structure);
else if(data->simulationType == DRUDE_SIMULATION)
return allocate_drude_memory(data, structure);
return 0;
}
/*! @brief Initializes the memory for simulation.*/
void initializeArrays(Datablock *data, Structure structure, ifstream &fs){
if(data->simulationType == TM_SIMULATION)
initialize_TM_arrays(data, structure, fs);
else if(data->simulationType == TM_PML_SIMULATION)
tm_pml_initialize_arrays(data, structure, fs);
else if(data->simulationType == DRUDE_SIMULATION)
initialize_drude_arrays(data, structure);
}
/*! @brief Clears all the constants initially declared.
This method is called once all the coefficients are
calculated.
*/
void clear_memory_constants(Datablock *data){
if(data->simulationType == TM_SIMULATION)
tm_clear_memory_constants(data);
else if(data->simulationType == TM_PML_SIMULATION)
tm_pml_clear_memory_constants(data);
else if(data->simulationType == DRUDE_SIMULATION)
drude_clear_memory_constants(data);
}
/*!
@brief Calculates the coefficients for each simulation.
*/
void calculate_coefficients(Datablock *data, Structure structure){
dim3 blocks((structure.x_index_dim + BLOCKSIZE_X - 1) / BLOCKSIZE_X,
(structure.y_index_dim + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y);
dim3 threads(BLOCKSIZE_X, BLOCKSIZE_Y);
if(data->simulationType == TM_SIMULATION)
tm_getcoeff<<<blocks, threads>>>(data->constants[MUINDEX],
data->constants[EPSINDEX],
data->constants[SIGMAINDEX],
data->constants[SIGMA_STAR_INDEX],
data->coefs[0],
data->coefs[1],
data->coefs[2],
data->coefs[3]
);
else if(data->simulationType == TM_PML_SIMULATION)
pml_tm_get_coefs<<<blocks, threads>>>(data->constants[MUINDEX],
data->constants[EPSINDEX],
data->constants[SIGMAINDEX_X],
data->constants[SIGMAINDEX_Y],
data->constants[SIGMA_STAR_INDEX_X],
data->constants[SIGMA_STAR_INDEX_Y],
data->coefs[0],
data->coefs[1],
data->coefs[2],
data->coefs[3],
data->coefs[4],
data->coefs[5],
data->coefs[6],
data->coefs[7]);
else if(data->simulationType == DRUDE_SIMULATION)
drude_get_coefs<<<blocks, threads>>>(data->constants[MUINDEX],
data->constants[EPSINDEX],
data->constants[SIGMAINDEX],
data->constants[SIGMA_STAR_INDEX],
data->constants[GAMMA_INDEX],
data->constants[OMEGAP_INDEX],
data->coefs[0],
data->coefs[1],
data->coefs[2],
data->coefs[3],
data->coefs[4],
data->coefs[5],
data->coefs[6]
);
}
/*! @brief entry point */
int main(int argc, char **argv){
assert(argc == 2);
ifstream fs;
fs.open(argv[1]);
assert(fs.is_open());
string simulation_name;
fs>>simulation_name;
int simulation_type;
fs>>simulation_type;
Datablock data(simulation_type);
data.simulation_name = simulation_name;
float dx;
fs>>dx;
float courant = 0.5;
float dt = courant * dx / LIGHTSPEED;
printf("dt = %f", dt);
int xdim, ydim;
fs>>xdim>>ydim;
Structure structure(xdim, ydim, dx, dt);
CPUAnimBitmap bitmap(structure.x_index_dim, structure.x_index_dim,
&data); /* bitmap structure */
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
checkCudaErrors(cudaEventCreate(&data.start, 1) );
checkCudaErrors(cudaEventCreate(&data.stop, 1) );
size_t pitch;
pitch = allocate_memory(&data, structure);
structure.pitch = pitch;
copy_symbols(&structure);
printf("pitch = %d", pitch);
data.structure = &structure;
initializeArrays(&data, structure, fs);
// get the coefficients
calculate_coefficients(&data, structure);
clear_memory_constants(&data);
// set the sources
HostSources host_sources;
DeviceSources device_sources;
long long x, y, source_type;
float mean, variance;
while(!fs.eof()){
fs>>x>>y>>source_type>>mean>>variance;
cout<<mean<<endl;
host_sources.add_source(x, y, source_type, mean, variance);
}
data.sources = &device_sources;
copy_sources_device_to_host(&host_sources, &device_sources);
pthread_mutex_init(&mutexcopy, NULL);
for(long i=0; i < 3; i++){
anim_gpu(&data, 0);
}
}
|
dada39f1a1a8ee7e6a0702a78771799463eff747.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute/crossentropy/crossentropy_internal.h"
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void kernel_crossentropy_full_device(T *x, T *y, T *softmax, T *out) {
}
template <typename T>
void crossentropy_full_device(Tensor<T> *x, Tensor<T> *y, Tensor<T> *softmax, Tensor<T> *out) {
}
template void crossentropy_full_device(Tensor<int> *x, Tensor<int> *y, Tensor<int> *softmax, Tensor<int> *out);
template void crossentropy_full_device(Tensor<float> *x, Tensor<float> *y, Tensor<float> *softmax, Tensor<float> *out);
template void crossentropy_full_device(Tensor<double> *x, Tensor<double> *y, Tensor<double> *softmax, Tensor<double> *out);
} // namespace op
} // namespace magmadnn | dada39f1a1a8ee7e6a0702a78771799463eff747.cu |
#include "compute/crossentropy/crossentropy_internal.h"
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void kernel_crossentropy_full_device(T *x, T *y, T *softmax, T *out) {
}
template <typename T>
void crossentropy_full_device(Tensor<T> *x, Tensor<T> *y, Tensor<T> *softmax, Tensor<T> *out) {
}
template void crossentropy_full_device(Tensor<int> *x, Tensor<int> *y, Tensor<int> *softmax, Tensor<int> *out);
template void crossentropy_full_device(Tensor<float> *x, Tensor<float> *y, Tensor<float> *softmax, Tensor<float> *out);
template void crossentropy_full_device(Tensor<double> *x, Tensor<double> *y, Tensor<double> *softmax, Tensor<double> *out);
} // namespace op
} // namespace magmadnn |
3997e6c4814cef03af69c537ea2b5d88a794bfdb.hip | // !!! This is a file automatically generated by hipify!!!
#include "DS_timer.h"
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include "device_atomic_functions.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define SIZE 1024*1024*1024
double IntegralCPU(double a, double b, int n);
double IntegralGPU(double a, double b, int n);
double IntegralGPU2(double a, double b, int n);
double IntegralGPU3(double a, double b, int n);
__global__ void threadAtomicAdd(double a, double *t, double *res, int n);
__global__ void threadAtomicAddver2(double a, double *t, double *res, int n);
__global__ void threadAtomicAddver2red(double a, double *t, double *res, int n);
__global__ void threadBlockReduction(double *res);
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
void doubleMatch(double val1, double val2) {
if (abs(val1 - val2) < 0.0000001) {
cout << "two value is equal is done" << endl;
cout << val1 << " == " << val2 << endl;
}
else {
cout << val1 << " != " << val2 << endl;
cout << "val1 - val2 = " << val1 - val2 << endl;
}
}
int main()
{
DS_timer timer(4);
timer.setTimerName(0, "CPU Integral Time");
timer.setTimerName(1, "GPU Integral Time(Atomic Func)");
timer.setTimerName(2, "GPU Integral Time(Atomic Func ver2)");
timer.setTimerName(3, "GPU Integral Time(Atomic Func ver2 reduction func)");
timer.onTimer(0);
double val1 = IntegralCPU(-1, 1, SIZE);
timer.offTimer(0);
timer.onTimer(1);
double val2 = IntegralGPU(-1, 1, SIZE);
timer.offTimer(1);
timer.onTimer(2);
double val3 = IntegralGPU2(-1, 1, SIZE);
timer.offTimer(2);
timer.onTimer(3);
double val4 = IntegralGPU3(-1, 1, SIZE);
timer.offTimer(3);
cout << "cpu and gpu 1" << endl;
doubleMatch(val1, val2);
cout << "cpu and gpu 2" << endl;
doubleMatch(val1, val3);
cout << "cpu and gpu 3" << endl;
doubleMatch(val1, val4);
timer.printTimer();
}
double IntegralCPU(double a, double b, int n) {
double tokenSize = abs(b-a) / (double)n;
double result = 0;
#pragma omp parallel for reduction(+:result)
for (int i = 0; i <= n; i++) {
double s = a + tokenSize * i;
double s2 = a + tokenSize * (i + 1);
result += tokenSize * (s*s + s2 * s2) / 2;
}
return result;
}
double IntegralGPU(double a, double b, int n) {
double tokenSize = abs(b - a) / (double)n;
double result = 0;
double *t; double *resul;
hipMalloc((void **)&t, sizeof(double));
hipMalloc((void **)&resul, sizeof(double));
hipMemcpy(t, &tokenSize, sizeof(double) * 1, hipMemcpyHostToDevice);
threadAtomicAdd << <ceil((float)n/1024)+1, 1024>> > (a, t, resul, SIZE);
hipDeviceSynchronize();
hipMemcpy(&result, resul, sizeof(double), hipMemcpyDeviceToHost);
hipFree(t); hipFree(resul);
return result;
}
double IntegralGPU2(double a, double b, int n) {
double tokenSize = abs(b - a) / (double)n;
double result = 0;
double *t; double *resul;
hipMalloc((void **)&t, sizeof(double));
hipMalloc((void **)&resul, sizeof(double));
hipMemcpy(t, &tokenSize, sizeof(double) * 1, hipMemcpyHostToDevice);
threadAtomicAddver2 << <1024, 1024 >> > (a, t, resul, SIZE);
hipDeviceSynchronize();
hipMemcpy(&result, resul, sizeof(double), hipMemcpyDeviceToHost);
hipFree(t); hipFree(resul);
return result;
}
double IntegralGPU3(double a, double b, int n) {
double tokenSize = abs(b - a) / (double)n;
double result = 0;
double *t; double *resul;
hipMalloc((void **)&t, sizeof(double));
hipMalloc((void **)&resul, sizeof(double)*1024);
hipMemcpy(t, &tokenSize, sizeof(double) * 1, hipMemcpyHostToDevice);
threadAtomicAddver2red << <1024, 1024 >> > (a, t, resul, SIZE);
threadBlockReduction << <1, 1024 >> > (resul);
hipDeviceSynchronize();
hipMemcpy(&result, resul, sizeof(double), hipMemcpyDeviceToHost);
hipFree(t); hipFree(resul);
return result;
}
__global__ void threadAtomicAdd(double a, double *t, double *res, int n) {
if (threadIdx.x + blockIdx.x*blockDim.x > n) return;
__shared__ double sa;
double s = a + __fmul_rn(*t, (threadIdx.x + blockIdx.x*blockDim.x));
double s2 = a + __fmul_rn(*t,(threadIdx.x + blockIdx.x*blockDim.x + 1));
double bar = __fmul_rn(*t , (__fmul_rn(s,s) + __fmul_rn(s2,s2))) / 2;
if (threadIdx.x == 0) {
sa = 0;
}
__syncthreads();
atomicAdd(&sa, bar);
__syncthreads();
if (threadIdx.x == 0) atomicAdd(res, sa);
}
__global__ void threadAtomicAddver2(double a, double *t, double *res, int n) {
if (threadIdx.x + blockIdx.x*blockDim.x > n) return;
__shared__ double sa;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (threadIdx.x == 0) {
sa = 0;
}
double bar = 0;
for (int i = 0; i < 1024; i++) {
double s = a + __fmul_rn(*t, (tid*1024 + i));
double s2 = a + __fmul_rn(*t, (tid*1024 + i + 1));
bar += __fmul_rn(*t, (__fmul_rn(s, s) + __fmul_rn(s2, s2))) / 2;
}
if (tid == n - 1) {
double s = a + __fmul_rn(*t, (tid * 1024 + 1024));
double s2 = a + __fmul_rn(*t, (tid * 1024 + 1025));
bar += __fmul_rn(*t, (__fmul_rn(s, s) + __fmul_rn(s2, s2))) / 2;
}
__syncthreads();
atomicAdd(&sa, bar);
__syncthreads();
if (threadIdx.x == 0) atomicAdd(res, sa);
}
__global__ void threadAtomicAddver2red(double a, double *t, double *res, int n) {
if (threadIdx.x + blockIdx.x*blockDim.x > n) return;
__shared__ double sa[1024];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
double bar = 0;
for (int i = 0; i < 1024; i++) {
double s = a + __fmul_rn(*t, (tid * 1024 + i));
double s2 = a + __fmul_rn(*t, (tid * 1024 + i + 1));
bar += __fmul_rn(*t, (__fmul_rn(s, s) + __fmul_rn(s2, s2))) / 2;
}
sa[threadIdx.x] = bar;
__syncthreads();
//threads reduction
for (int offset = 1; offset < 1024; offset *= 2) {
if (threadIdx.x % (2 * offset) == 0) sa[threadIdx.x] += sa[threadIdx.x + offset];
__syncthreads();
}
__syncthreads();
res[blockIdx.x] = sa[0];
}
__global__ void threadBlockReduction(double *res) {
for (int offset = 1; offset < 1024; offset *= 2) {
if (threadIdx.x % (2 * offset) == 0) res[threadIdx.x] += res[threadIdx.x + offset];
__syncthreads();
}
} | 3997e6c4814cef03af69c537ea2b5d88a794bfdb.cu | #include "DS_timer.h"
#include "cuda_runtime.h"
#include "device_functions.h"
#include "device_atomic_functions.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define SIZE 1024*1024*1024
double IntegralCPU(double a, double b, int n);
double IntegralGPU(double a, double b, int n);
double IntegralGPU2(double a, double b, int n);
double IntegralGPU3(double a, double b, int n);
__global__ void threadAtomicAdd(double a, double *t, double *res, int n);
__global__ void threadAtomicAddver2(double a, double *t, double *res, int n);
__global__ void threadAtomicAddver2red(double a, double *t, double *res, int n);
__global__ void threadBlockReduction(double *res);
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
void doubleMatch(double val1, double val2) {
if (abs(val1 - val2) < 0.0000001) {
cout << "two value is equal is done" << endl;
cout << val1 << " == " << val2 << endl;
}
else {
cout << val1 << " != " << val2 << endl;
cout << "val1 - val2 = " << val1 - val2 << endl;
}
}
int main()
{
DS_timer timer(4);
timer.setTimerName(0, "CPU Integral Time");
timer.setTimerName(1, "GPU Integral Time(Atomic Func)");
timer.setTimerName(2, "GPU Integral Time(Atomic Func ver2)");
timer.setTimerName(3, "GPU Integral Time(Atomic Func ver2 reduction func)");
timer.onTimer(0);
double val1 = IntegralCPU(-1, 1, SIZE);
timer.offTimer(0);
timer.onTimer(1);
double val2 = IntegralGPU(-1, 1, SIZE);
timer.offTimer(1);
timer.onTimer(2);
double val3 = IntegralGPU2(-1, 1, SIZE);
timer.offTimer(2);
timer.onTimer(3);
double val4 = IntegralGPU3(-1, 1, SIZE);
timer.offTimer(3);
cout << "cpu and gpu 1" << endl;
doubleMatch(val1, val2);
cout << "cpu and gpu 2" << endl;
doubleMatch(val1, val3);
cout << "cpu and gpu 3" << endl;
doubleMatch(val1, val4);
timer.printTimer();
}
double IntegralCPU(double a, double b, int n) {
double tokenSize = abs(b-a) / (double)n;
double result = 0;
#pragma omp parallel for reduction(+:result)
for (int i = 0; i <= n; i++) {
double s = a + tokenSize * i;
double s2 = a + tokenSize * (i + 1);
result += tokenSize * (s*s + s2 * s2) / 2;
}
return result;
}
double IntegralGPU(double a, double b, int n) {
double tokenSize = abs(b - a) / (double)n;
double result = 0;
double *t; double *resul;
cudaMalloc((void **)&t, sizeof(double));
cudaMalloc((void **)&resul, sizeof(double));
cudaMemcpy(t, &tokenSize, sizeof(double) * 1, cudaMemcpyHostToDevice);
threadAtomicAdd << <ceil((float)n/1024)+1, 1024>> > (a, t, resul, SIZE);
cudaDeviceSynchronize();
cudaMemcpy(&result, resul, sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(t); cudaFree(resul);
return result;
}
double IntegralGPU2(double a, double b, int n) {
double tokenSize = abs(b - a) / (double)n;
double result = 0;
double *t; double *resul;
cudaMalloc((void **)&t, sizeof(double));
cudaMalloc((void **)&resul, sizeof(double));
cudaMemcpy(t, &tokenSize, sizeof(double) * 1, cudaMemcpyHostToDevice);
threadAtomicAddver2 << <1024, 1024 >> > (a, t, resul, SIZE);
cudaDeviceSynchronize();
cudaMemcpy(&result, resul, sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(t); cudaFree(resul);
return result;
}
double IntegralGPU3(double a, double b, int n) {
double tokenSize = abs(b - a) / (double)n;
double result = 0;
double *t; double *resul;
cudaMalloc((void **)&t, sizeof(double));
cudaMalloc((void **)&resul, sizeof(double)*1024);
cudaMemcpy(t, &tokenSize, sizeof(double) * 1, cudaMemcpyHostToDevice);
threadAtomicAddver2red << <1024, 1024 >> > (a, t, resul, SIZE);
threadBlockReduction << <1, 1024 >> > (resul);
cudaDeviceSynchronize();
cudaMemcpy(&result, resul, sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(t); cudaFree(resul);
return result;
}
__global__ void threadAtomicAdd(double a, double *t, double *res, int n) {
if (threadIdx.x + blockIdx.x*blockDim.x > n) return;
__shared__ double sa;
double s = a + __fmul_rn(*t, (threadIdx.x + blockIdx.x*blockDim.x));
double s2 = a + __fmul_rn(*t,(threadIdx.x + blockIdx.x*blockDim.x + 1));
double bar = __fmul_rn(*t , (__fmul_rn(s,s) + __fmul_rn(s2,s2))) / 2;
if (threadIdx.x == 0) {
sa = 0;
}
__syncthreads();
atomicAdd(&sa, bar);
__syncthreads();
if (threadIdx.x == 0) atomicAdd(res, sa);
}
__global__ void threadAtomicAddver2(double a, double *t, double *res, int n) {
if (threadIdx.x + blockIdx.x*blockDim.x > n) return;
__shared__ double sa;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (threadIdx.x == 0) {
sa = 0;
}
double bar = 0;
for (int i = 0; i < 1024; i++) {
double s = a + __fmul_rn(*t, (tid*1024 + i));
double s2 = a + __fmul_rn(*t, (tid*1024 + i + 1));
bar += __fmul_rn(*t, (__fmul_rn(s, s) + __fmul_rn(s2, s2))) / 2;
}
if (tid == n - 1) {
double s = a + __fmul_rn(*t, (tid * 1024 + 1024));
double s2 = a + __fmul_rn(*t, (tid * 1024 + 1025));
bar += __fmul_rn(*t, (__fmul_rn(s, s) + __fmul_rn(s2, s2))) / 2;
}
__syncthreads();
atomicAdd(&sa, bar);
__syncthreads();
if (threadIdx.x == 0) atomicAdd(res, sa);
}
__global__ void threadAtomicAddver2red(double a, double *t, double *res, int n) {
if (threadIdx.x + blockIdx.x*blockDim.x > n) return;
__shared__ double sa[1024];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
double bar = 0;
for (int i = 0; i < 1024; i++) {
double s = a + __fmul_rn(*t, (tid * 1024 + i));
double s2 = a + __fmul_rn(*t, (tid * 1024 + i + 1));
bar += __fmul_rn(*t, (__fmul_rn(s, s) + __fmul_rn(s2, s2))) / 2;
}
sa[threadIdx.x] = bar;
__syncthreads();
//threads reduction
for (int offset = 1; offset < 1024; offset *= 2) {
if (threadIdx.x % (2 * offset) == 0) sa[threadIdx.x] += sa[threadIdx.x + offset];
__syncthreads();
}
__syncthreads();
res[blockIdx.x] = sa[0];
}
__global__ void threadBlockReduction(double *res) {
for (int offset = 1; offset < 1024; offset *= 2) {
if (threadIdx.x % (2 * offset) == 0) res[threadIdx.x] += res[threadIdx.x + offset];
__syncthreads();
}
} |
3494b59e5dd3f29490d014081c9e8d251d0284be.hip | // !!! This is a file automatically generated by hipify!!!
#include "bloomfilter.h"
#include <stdlib.h>
#include <iostream>
#include <semaphore.h>
#include <vector>
#include <bitset>
#include <cstring>
#include <ctime>
#include <omp.h>
#include <inttypes.h>
#include <iomanip>
#include <iomanip>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cstdio>
#include <chrono>
using namespace std;
#define BIG_CONSTANT(x) (x)
#define ROTL64(x,y) rotl64(x,y)
#define FORCE_INLINE inline __attribute__((always_inline))
#define BIT_ARRAY_SIZE 100000
#define SEED_VALUE_1 27
#define SEED_VALUE_2 58
#define SEED_VALUE_3 99
const int MAX = 26;
sem_t semaphore;
__device__ inline uint64_t rotl64(uint64_t x, int8_t r){
return (x << r) | (x >> (64 - r));
}
__device__ FORCE_INLINE uint64_t fmix64 ( uint64_t k )
{
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
__device__ FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
{
return p[i];
}
__device__ void MurmurHash3_x64_128(const void* key, const int len, const uint32_t seed, uint64_t* hash, uint64_t k1, uint64_t k2, uint64_t k3, uint64_t k4){
const uint8_t* data = (const uint8_t*)key;
const int nblocks = len/16;
uint64_t h1 = seed;
uint64_t h2 = seed;
uint64_t c1;
uint64_t c2;
c1 = BIG_CONSTANT(0x87c37b91114253d5);
c2 = BIG_CONSTANT(0x4cf5ad432745937f);
//const uint64_t *blocks = (const uint64_t *)(data);
h1 ^= k1;
h1 = ROTL64(h1,27);
h1 += h2;
h1 = h1*5+0x52dce729;
// cout << "h1: " << h1 << "\n";
h2 ^= k2;
h2 = ROTL64(h2,31);
h2 += h1;
h2 = h2*5+0x38495ab5;
// cout << "h2: " << h2 << "\n";
h1 ^= k3;
h1 = ROTL64(h1,27);
h1 += h2;
h1 = h1*5+0x52dce729;
h2 ^= k4;
h2 = ROTL64(h2,31);
h2 += h1;
h2 = h2*5+0x38495ab5;
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
// uint64_t
k1 = 0;
//uint64_t
k2 = 0;
switch(len & 15){
case 15: k2 ^= ((uint64_t)tail[14]) << 48;
case 14: k2 ^= ((uint64_t)tail[13]) << 40;
case 13: k2 ^= ((uint64_t)tail[12]) << 32;
case 12: k2 ^= ((uint64_t)tail[11]) << 24;
case 11: k2 ^= ((uint64_t)tail[10]) << 16;
case 10: k2 ^= ((uint64_t)tail[ 9]) << 8;
case 9: k2 ^= ((uint64_t)tail[ 8]) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)tail[ 7]) << 56;
case 7: k1 ^= ((uint64_t)tail[ 6]) << 48;
case 6: k1 ^= ((uint64_t)tail[ 5]) << 40;
case 5: k1 ^= ((uint64_t)tail[ 4]) << 32;
case 4: k1 ^= ((uint64_t)tail[ 3]) << 24;
case 3: k1 ^= ((uint64_t)tail[ 2]) << 16;
case 2: k1 ^= ((uint64_t)tail[ 1]) << 8;
case 1: k1 ^= ((uint64_t)tail[ 0]) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
((uint64_t*)hash)[0] = h1;
((uint64_t*)hash)[1] = h2;
}
string genRandomString(int n)
{
char alphabet[MAX] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z' };
string res = "";
for (int i = 0; i < n; i++)
res = res + alphabet[rand() % MAX];
return res;
}
__device__ void insertInHashTable(int* d_HashTable, char* key, int length){
// Calculate 3 hashes and insert
uint64_t hash1[2];
uint64_t hash2[2];
uint64_t hash3[2];
int bit1, bit2, bit3;
const uint8_t* data = (const uint8_t*)key;
//const int nblocks = length/16;
uint64_t c1;
uint64_t c2;
c1 = BIG_CONSTANT(0x87c37b91114253d5);
c2 = BIG_CONSTANT(0x4cf5ad432745937f);
const uint64_t *blocks = (const uint64_t *)(data);
int i = 0;
uint64_t k1, k2, k3, k4;
k1 = getblock64(blocks,i*2+0);
k1 *= c1;
k1 = ROTL64(k1,31);
k1 *= c2;
k2 = getblock64(blocks,i*2+1);
k2 *= c2;
k2 = ROTL64(k2,33);
k2 *= c1;
k3 = getblock64(blocks,i*2+2);
k3 *= c1;
k3 = ROTL64(k3,31);
k3 *= c2;
k4 = getblock64(blocks,i*2+3);
k4 *= c2;
k4 = ROTL64(k4,33);
k4 *= c1;
MurmurHash3_x64_128(key, length, SEED_VALUE_1, hash1, k1, k2, k3, k4);
bit1 = (hash1[0] % BIT_ARRAY_SIZE + hash1[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
MurmurHash3_x64_128(key, length, SEED_VALUE_2, hash2, k1, k2, k3, k4);
bit2 = (hash2[0] % BIT_ARRAY_SIZE + hash2[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
MurmurHash3_x64_128(key, length, SEED_VALUE_3, hash3, k1, k2, k3, k4);
bit3 = (hash3[0] % BIT_ARRAY_SIZE + hash3[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
// cout << "Bits set are: " << bit1 << "," << bit2 << " and " << bit3 << "\n";
d_HashTable[bit1] = 1;
d_HashTable[bit2] = 1;
d_HashTable[bit3] = 1;
//cout << "Set bits: " << bit1 << ", " << bit2 << ", " << bit3 << "\n";
}
/*
void checkIfPresent(bitset<BIT_ARRAY_SIZE> HashTable, char* key, int length){
// Calculate 3 hashes and check bit
uint64_t hash1[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_1, hash1);
int bit1 = (hash1[0] % BIT_ARRAY_SIZE + hash1[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
uint64_t hash2[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_2, hash2);
int bit2 = (hash2[0] % BIT_ARRAY_SIZE + hash2[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
uint64_t hash3[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_3, hash3);
int bit3 = (hash3[0] % BIT_ARRAY_SIZE + hash3[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
if(HashTable.test(bit1) == 1 && HashTable.test(bit2) == 1 && HashTable.test(bit3) == 1){
cout << key << " might be present" << "\n";
}
else{
cout << key << " is definitely not present" << "\n";
}
}*/
__device__ char* getword(char* d_wordsToInsert, int idx, int lenOfWord){
char* temp = new char[lenOfWord + 1];
for(int i=0; i<lenOfWord; i++){
temp[i] = d_wordsToInsert[idx*lenOfWord+i];
}
temp[lenOfWord] = '\0';
return temp;
}
__global__ void parallelInsertion(char* d_wordsToInsert, int lenOfWord, int *d_HashTable, int numIterations){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = blockDim.x * gridDim.x;
for(int i=idx; i<numIterations; i += gridStride){
char* cstr = getword(d_wordsToInsert, idx, lenOfWord);
insertInHashTable(d_HashTable, cstr, lenOfWord);
}
}
int main(){
int lenOfWord = 32;
int numIterations = 100000;
string str;
char wordsToInsert[lenOfWord * numIterations];
for(int i = 0; i < numIterations; i++){
str = genRandomString(lenOfWord);
char* cstr = new char[lenOfWord + 1];
strcpy(cstr, str.c_str());
for(int j = 0; j < lenOfWord; j++){
wordsToInsert[i*lenOfWord+j] = cstr[j];
}
}
char* d_wordsToInsert;
hipMalloc((void**)&d_wordsToInsert, lenOfWord*numIterations*sizeof(char));
hipMemcpy(d_wordsToInsert, wordsToInsert, lenOfWord*numIterations*sizeof(char), hipMemcpyHostToDevice);
int* HashTable = (int*)calloc(BIT_ARRAY_SIZE, sizeof(int));
int* d_HashTable;
hipMalloc((void**)&d_HashTable, BIT_ARRAY_SIZE*sizeof(int));
hipMemset(d_HashTable, 0, BIT_ARRAY_SIZE*sizeof(int));
hipMemcpy(d_HashTable, HashTable, BIT_ARRAY_SIZE*sizeof(int), hipMemcpyHostToDevice);
//time and call function here
auto t_start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( parallelInsertion), dim3(1024), dim3(1024), 0, 0, d_wordsToInsert, lenOfWord, d_HashTable, numIterations);
hipDeviceSynchronize();
auto t_end = std::chrono::high_resolution_clock::now();
hipMemcpy(HashTable, d_HashTable, BIT_ARRAY_SIZE*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_HashTable);
hipFree(d_wordsToInsert);
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count();
cout << "Time taken for inserting " << numIterations << " records in Cuda parallelized version: " << fixed << elapsed_time_ms << setprecision(9);
cout << " ms" << endl;
}
| 3494b59e5dd3f29490d014081c9e8d251d0284be.cu | #include "bloomfilter.h"
#include <stdlib.h>
#include <iostream>
#include <semaphore.h>
#include <vector>
#include <bitset>
#include <cstring>
#include <ctime>
#include <omp.h>
#include <inttypes.h>
#include <iomanip>
#include <iomanip>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cstdio>
#include <chrono>
using namespace std;
#define BIG_CONSTANT(x) (x)
#define ROTL64(x,y) rotl64(x,y)
#define FORCE_INLINE inline __attribute__((always_inline))
#define BIT_ARRAY_SIZE 100000
#define SEED_VALUE_1 27
#define SEED_VALUE_2 58
#define SEED_VALUE_3 99
const int MAX = 26;
sem_t semaphore;
__device__ inline uint64_t rotl64(uint64_t x, int8_t r){
return (x << r) | (x >> (64 - r));
}
__device__ FORCE_INLINE uint64_t fmix64 ( uint64_t k )
{
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
__device__ FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
{
return p[i];
}
__device__ void MurmurHash3_x64_128(const void* key, const int len, const uint32_t seed, uint64_t* hash, uint64_t k1, uint64_t k2, uint64_t k3, uint64_t k4){
const uint8_t* data = (const uint8_t*)key;
const int nblocks = len/16;
uint64_t h1 = seed;
uint64_t h2 = seed;
uint64_t c1;
uint64_t c2;
c1 = BIG_CONSTANT(0x87c37b91114253d5);
c2 = BIG_CONSTANT(0x4cf5ad432745937f);
//const uint64_t *blocks = (const uint64_t *)(data);
h1 ^= k1;
h1 = ROTL64(h1,27);
h1 += h2;
h1 = h1*5+0x52dce729;
// cout << "h1: " << h1 << "\n";
h2 ^= k2;
h2 = ROTL64(h2,31);
h2 += h1;
h2 = h2*5+0x38495ab5;
// cout << "h2: " << h2 << "\n";
h1 ^= k3;
h1 = ROTL64(h1,27);
h1 += h2;
h1 = h1*5+0x52dce729;
h2 ^= k4;
h2 = ROTL64(h2,31);
h2 += h1;
h2 = h2*5+0x38495ab5;
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
// uint64_t
k1 = 0;
//uint64_t
k2 = 0;
switch(len & 15){
case 15: k2 ^= ((uint64_t)tail[14]) << 48;
case 14: k2 ^= ((uint64_t)tail[13]) << 40;
case 13: k2 ^= ((uint64_t)tail[12]) << 32;
case 12: k2 ^= ((uint64_t)tail[11]) << 24;
case 11: k2 ^= ((uint64_t)tail[10]) << 16;
case 10: k2 ^= ((uint64_t)tail[ 9]) << 8;
case 9: k2 ^= ((uint64_t)tail[ 8]) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)tail[ 7]) << 56;
case 7: k1 ^= ((uint64_t)tail[ 6]) << 48;
case 6: k1 ^= ((uint64_t)tail[ 5]) << 40;
case 5: k1 ^= ((uint64_t)tail[ 4]) << 32;
case 4: k1 ^= ((uint64_t)tail[ 3]) << 24;
case 3: k1 ^= ((uint64_t)tail[ 2]) << 16;
case 2: k1 ^= ((uint64_t)tail[ 1]) << 8;
case 1: k1 ^= ((uint64_t)tail[ 0]) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
((uint64_t*)hash)[0] = h1;
((uint64_t*)hash)[1] = h2;
}
string genRandomString(int n)
{
char alphabet[MAX] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z' };
string res = "";
for (int i = 0; i < n; i++)
res = res + alphabet[rand() % MAX];
return res;
}
__device__ void insertInHashTable(int* d_HashTable, char* key, int length){
// Calculate 3 hashes and insert
uint64_t hash1[2];
uint64_t hash2[2];
uint64_t hash3[2];
int bit1, bit2, bit3;
const uint8_t* data = (const uint8_t*)key;
//const int nblocks = length/16;
uint64_t c1;
uint64_t c2;
c1 = BIG_CONSTANT(0x87c37b91114253d5);
c2 = BIG_CONSTANT(0x4cf5ad432745937f);
const uint64_t *blocks = (const uint64_t *)(data);
int i = 0;
uint64_t k1, k2, k3, k4;
k1 = getblock64(blocks,i*2+0);
k1 *= c1;
k1 = ROTL64(k1,31);
k1 *= c2;
k2 = getblock64(blocks,i*2+1);
k2 *= c2;
k2 = ROTL64(k2,33);
k2 *= c1;
k3 = getblock64(blocks,i*2+2);
k3 *= c1;
k3 = ROTL64(k3,31);
k3 *= c2;
k4 = getblock64(blocks,i*2+3);
k4 *= c2;
k4 = ROTL64(k4,33);
k4 *= c1;
MurmurHash3_x64_128(key, length, SEED_VALUE_1, hash1, k1, k2, k3, k4);
bit1 = (hash1[0] % BIT_ARRAY_SIZE + hash1[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
MurmurHash3_x64_128(key, length, SEED_VALUE_2, hash2, k1, k2, k3, k4);
bit2 = (hash2[0] % BIT_ARRAY_SIZE + hash2[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
MurmurHash3_x64_128(key, length, SEED_VALUE_3, hash3, k1, k2, k3, k4);
bit3 = (hash3[0] % BIT_ARRAY_SIZE + hash3[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
// cout << "Bits set are: " << bit1 << "," << bit2 << " and " << bit3 << "\n";
d_HashTable[bit1] = 1;
d_HashTable[bit2] = 1;
d_HashTable[bit3] = 1;
//cout << "Set bits: " << bit1 << ", " << bit2 << ", " << bit3 << "\n";
}
/*
void checkIfPresent(bitset<BIT_ARRAY_SIZE> HashTable, char* key, int length){
// Calculate 3 hashes and check bit
uint64_t hash1[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_1, hash1);
int bit1 = (hash1[0] % BIT_ARRAY_SIZE + hash1[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
uint64_t hash2[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_2, hash2);
int bit2 = (hash2[0] % BIT_ARRAY_SIZE + hash2[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
uint64_t hash3[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_3, hash3);
int bit3 = (hash3[0] % BIT_ARRAY_SIZE + hash3[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
if(HashTable.test(bit1) == 1 && HashTable.test(bit2) == 1 && HashTable.test(bit3) == 1){
cout << key << " might be present" << "\n";
}
else{
cout << key << " is definitely not present" << "\n";
}
}*/
__device__ char* getword(char* d_wordsToInsert, int idx, int lenOfWord){
char* temp = new char[lenOfWord + 1];
for(int i=0; i<lenOfWord; i++){
temp[i] = d_wordsToInsert[idx*lenOfWord+i];
}
temp[lenOfWord] = '\0';
return temp;
}
__global__ void parallelInsertion(char* d_wordsToInsert, int lenOfWord, int *d_HashTable, int numIterations){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = blockDim.x * gridDim.x;
for(int i=idx; i<numIterations; i += gridStride){
char* cstr = getword(d_wordsToInsert, idx, lenOfWord);
insertInHashTable(d_HashTable, cstr, lenOfWord);
}
}
int main(){
int lenOfWord = 32;
int numIterations = 100000;
string str;
char wordsToInsert[lenOfWord * numIterations];
for(int i = 0; i < numIterations; i++){
str = genRandomString(lenOfWord);
char* cstr = new char[lenOfWord + 1];
strcpy(cstr, str.c_str());
for(int j = 0; j < lenOfWord; j++){
wordsToInsert[i*lenOfWord+j] = cstr[j];
}
}
char* d_wordsToInsert;
cudaMalloc((void**)&d_wordsToInsert, lenOfWord*numIterations*sizeof(char));
cudaMemcpy(d_wordsToInsert, wordsToInsert, lenOfWord*numIterations*sizeof(char), cudaMemcpyHostToDevice);
int* HashTable = (int*)calloc(BIT_ARRAY_SIZE, sizeof(int));
int* d_HashTable;
cudaMalloc((void**)&d_HashTable, BIT_ARRAY_SIZE*sizeof(int));
cudaMemset(d_HashTable, 0, BIT_ARRAY_SIZE*sizeof(int));
cudaMemcpy(d_HashTable, HashTable, BIT_ARRAY_SIZE*sizeof(int), cudaMemcpyHostToDevice);
//time and call function here
auto t_start = std::chrono::high_resolution_clock::now();
parallelInsertion<<<1024, 1024>>>(d_wordsToInsert, lenOfWord, d_HashTable, numIterations);
cudaDeviceSynchronize();
auto t_end = std::chrono::high_resolution_clock::now();
cudaMemcpy(HashTable, d_HashTable, BIT_ARRAY_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_HashTable);
cudaFree(d_wordsToInsert);
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count();
cout << "Time taken for inserting " << numIterations << " records in Cuda parallelized version: " << fixed << elapsed_time_ms << setprecision(9);
cout << " ms" << endl;
}
|
7d7263d1a7743134b0f87b808b26367ac7b33417.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
__global__ void myfirstkernel(void) {
}
int main(void) {
hipLaunchKernelGGL(( myfirstkernel) , dim3(1), dim3(1) , 0, 0, );
printf("Hello, CUDA!\n");
return 0;
}
| 7d7263d1a7743134b0f87b808b26367ac7b33417.cu | #include <iostream>
#include <stdio.h>
__global__ void myfirstkernel(void) {
}
int main(void) {
myfirstkernel <<<1, 1 >>>();
printf("Hello, CUDA!\n");
return 0;
}
|
37135522b0798e3c373b79debc4c62717a02ffa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "SequenceVisitor.cuh"
#include "GlobalEventCut.cuh"
DEFINE_EMPTY_SET_ARGUMENTS_SIZE(global_event_cut_t)
template<>
void SequenceVisitor::visit<global_event_cut_t>(
global_event_cut_t& state,
const global_event_cut_t::arguments_t& arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
HostBuffers& host_buffers,
hipStream_t& cuda_stream,
hipEvent_t& cuda_generic_event)
{
host_buffers.host_number_of_selected_events[0] = 0;
cudaCheck(hipMemcpyAsync(
arguments.offset<dev_number_of_selected_events>(),
host_buffers.host_number_of_selected_events,
sizeof(uint),
hipMemcpyHostToDevice,
cuda_stream));
// Setup opts and arguments for kernel call
state.set_opts(dim3(runtime_options.number_of_events), dim3(32), cuda_stream);
state.set_arguments(
arguments.offset<dev_ut_raw_input>(),
arguments.offset<dev_ut_raw_input_offsets>(),
arguments.offset<dev_scifi_raw_input>(),
arguments.offset<dev_scifi_raw_input_offsets>(),
arguments.offset<dev_number_of_selected_events>(),
arguments.offset<dev_event_list>());
state.invoke();
cudaCheck(hipMemcpyAsync(
host_buffers.host_number_of_selected_events,
arguments.offset<dev_number_of_selected_events>(),
sizeof(uint),
hipMemcpyDeviceToHost,
cuda_stream));
// TODO: This is not needed here
cudaCheck(hipMemcpyAsync(
host_buffers.host_event_list,
arguments.offset<dev_event_list>(),
runtime_options.number_of_events * sizeof(uint),
hipMemcpyDeviceToHost,
cuda_stream));
hipEventRecord(cuda_generic_event, cuda_stream);
hipEventSynchronize(cuda_generic_event);
if (logger::ll.verbosityLevel >= logger::debug) {
debug_cout << "Selected " << host_buffers.host_number_of_selected_events[0] << " / "
<< runtime_options.number_of_events << " events with global event cuts" << std::endl;
}
}
| 37135522b0798e3c373b79debc4c62717a02ffa4.cu | #include "SequenceVisitor.cuh"
#include "GlobalEventCut.cuh"
DEFINE_EMPTY_SET_ARGUMENTS_SIZE(global_event_cut_t)
template<>
void SequenceVisitor::visit<global_event_cut_t>(
global_event_cut_t& state,
const global_event_cut_t::arguments_t& arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
HostBuffers& host_buffers,
cudaStream_t& cuda_stream,
cudaEvent_t& cuda_generic_event)
{
host_buffers.host_number_of_selected_events[0] = 0;
cudaCheck(cudaMemcpyAsync(
arguments.offset<dev_number_of_selected_events>(),
host_buffers.host_number_of_selected_events,
sizeof(uint),
cudaMemcpyHostToDevice,
cuda_stream));
// Setup opts and arguments for kernel call
state.set_opts(dim3(runtime_options.number_of_events), dim3(32), cuda_stream);
state.set_arguments(
arguments.offset<dev_ut_raw_input>(),
arguments.offset<dev_ut_raw_input_offsets>(),
arguments.offset<dev_scifi_raw_input>(),
arguments.offset<dev_scifi_raw_input_offsets>(),
arguments.offset<dev_number_of_selected_events>(),
arguments.offset<dev_event_list>());
state.invoke();
cudaCheck(cudaMemcpyAsync(
host_buffers.host_number_of_selected_events,
arguments.offset<dev_number_of_selected_events>(),
sizeof(uint),
cudaMemcpyDeviceToHost,
cuda_stream));
// TODO: This is not needed here
cudaCheck(cudaMemcpyAsync(
host_buffers.host_event_list,
arguments.offset<dev_event_list>(),
runtime_options.number_of_events * sizeof(uint),
cudaMemcpyDeviceToHost,
cuda_stream));
cudaEventRecord(cuda_generic_event, cuda_stream);
cudaEventSynchronize(cuda_generic_event);
if (logger::ll.verbosityLevel >= logger::debug) {
debug_cout << "Selected " << host_buffers.host_number_of_selected_events[0] << " / "
<< runtime_options.number_of_events << " events with global event cuts" << std::endl;
}
}
|
7f106bcc4b668c89f1ef8578611c72be6d571131.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "split_pairwise.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
//System size <= ROW_SIZE number of rows for decompose,
// in pfound and pair classification we don't need last line
template <int BLOCK_SIZE>
__launch_bounds__(BLOCK_SIZE)
__global__ void ExtractMatricesAndTargetsImpl(const float* linearSystem,
const int matCount,
const int rowSize,
float* matrices,
float* targets,
float* matrixDiag
) {
const int lineSize = 32;
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int localMatrixIdx = threadIdx.x / lineSize;
int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx;
if (matrixIdx >= matCount) {
return;
}
linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize);
matrices += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2);
targets += ((size_t)matrixIdx) * rowSize;
matrixDiag += ((size_t)matrixIdx) * rowSize;
const int x = threadIdx.x & (lineSize - 1);
#pragma unroll 8
for (int i = x; i < rowSize * (rowSize + 1) / 2; i += lineSize) {
matrices[i] = linearSystem[i];
}
#pragma unroll 8
for (int i = x; i < rowSize; i += lineSize) {
targets[i] = linearSystem[rowSize * (rowSize + 1) / 2 + i];
}
#pragma unroll 8
for (int i = x; i < rowSize; i += lineSize) {
matrixDiag[i] = linearSystem[i * (i + 1) / 2 + i];
}
}
void ExtractMatricesAndTargets(const float* linearSystem, int matCount, int rowSize, float* matrices, float* targets, float* matrixDiag, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * 32 + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ExtractMatricesAndTargetsImpl<blockSize> << < numBlocks, blockSize, 0, stream >> > (linearSystem, matCount, rowSize, matrices, targets, matrixDiag);
}
}
//System size <= ROW_SIZE number of rows for decompose,
// in pfound and pair classification we don't need last line
template <int BlockSize, int RowSize, int SystemSize>
__launch_bounds__(BlockSize)
__global__ void CholeskyDecompositionImpl(float* lower, int matCount) {
const int logicalWarpSize = (RowSize < 32 ? RowSize : 32);
const int matricesPerBlock = BlockSize / logicalWarpSize;
const int localMatrixIdx = threadIdx.x / logicalWarpSize;
const int N = RowSize / logicalWarpSize;
int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx;
if (matrixIdx >= matCount)
return;
lower += ((size_t)matrixIdx) * (RowSize * (RowSize + 1) / 2);
const int x = threadIdx.x & (logicalWarpSize - 1);
float currentLine[N];
__shared__ float LjjData[matricesPerBlock];
volatile float* Ljj = &LjjData[localMatrixIdx];
if (x == 0) {
const float l00 = __ldg(lower);
lower[0] = sqrtf(l00);
}
__syncwarp();
// #pragma unroll
for (int row = 1; row < SystemSize; ++row) {
//we don't modify this value in matrix, so it's pretty safe to load it with ldg.
#pragma unroll
for (int k = 0; k < N; ++k) {
const int col = x + 32 * k;
currentLine[k] = col <= row ? LdgWithFallback(lower, row * (row + 1) / 2 + col) : 0.0f;
}
__syncwarp();
int reduceSize = 1;
#pragma unroll
for (int col = 0; col < row; ++col) {
if (col & reduceSize) {
reduceSize <<= 1;
}
float tmp = 0.0f;
{
#pragma unroll
for (int k = 0; k < N; ++k) {
const int colIdx = x + k * 32;
if (colIdx <= col) {
const float val = lower[col * (col + 1) / 2 + colIdx];
tmp += colIdx < col ? val * currentLine[k] : 0;
if (colIdx == col) {
Ljj[0] = val;
}
}
}
}
float sum = ShuffleReduce(x, tmp, min(reduceSize, 32));
sum = __shfl_sync(0xFFFFFF, sum, 0, logicalWarpSize);
const float ljj = Ljj[0];
#pragma unroll
for (int k = 0; k < N; ++k) {
const int colIdx = x + 32 * k;
if (colIdx == col) {
currentLine[k] = ljj > 0 ? (currentLine[k] - sum) / (ljj + 1e-7f) : 0.0f;
}
}
__syncwarp();
}
{
float tmp = 0;
#pragma unroll
for (int k = 0; k < N; ++k) {
const int col = x + 32 * k;
if (col < row) {
tmp += currentLine[k] * currentLine[k];
}
}
float sum = ShuffleReduce(x, tmp, min(reduceSize, 32));
sum = __shfl_sync(0xFFFFFF, sum, 0, logicalWarpSize);
__syncwarp();
#pragma unroll
for (int k = 0; k < N; ++k) {
const int rowIdx = x + 32 * k;
if (rowIdx == row) {
const float tmp2 = currentLine[k] - sum;
currentLine[k] = tmp2 > 1e-8f ? sqrtf(tmp2) : 1e-4f;
}
}
__syncwarp();
}
#pragma unroll
for (int k = 0; k < N; ++k) {
const int colIdx = x + 32 * k;
if (colIdx <= row) {
WriteThrough(lower + row * (row + 1) / 2 + colIdx, currentLine[k]);
}
}
__syncwarp();
}
}
class TDirectSystem {
private:
const float* Data;
float* Target;
public:
__device__ TDirectSystem(const float* data, float* target, int rowSize)
: Data(data)
, Target(target)
{
(void)rowSize;
}
__forceinline__ __device__ float Get(int row, int col) const {
return LdgWithFallback(Data, row * (row + 1) / 2 + col);
}
__forceinline__ __device__ float GetTarget(int row) const {
return LdgWithFallback(Target, row);
}
__forceinline__ __device__ void WriteSolution(int row, float solution) const {
WriteThrough(Target + row, solution);
}
};
class TTransposedSystem {
private:
const float* Data;
float* Target;
int RowSize;
public:
__device__ TTransposedSystem(const float* data, float* target, int rowSize)
: Data(data)
, Target(target)
, RowSize(rowSize) {
}
__forceinline__ __device__ float Get(int row, int col) const {
row = RowSize - row - 1;
col = RowSize - col - 1;
return LdgWithFallback(Data, col * (col + 1) / 2 + row);
}
__forceinline__ __device__ float GetTarget(int row) const {
return LdgWithFallback(Target, RowSize - row - 1);
}
__forceinline__ __device__ void WriteSolution(int row, float solution) const {
WriteThrough(Target + RowSize - row - 1, solution);
}
};
template <class TLowerMatrixSystem, int BlockSize>
__global__ void SolveForwardImpl(const float* lower, int rowSize, int systemSize, int matCount, float* targets) {
const int matricesPerBlock = BlockSize / rowSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ float solutionsData[BlockSize];
__shared__ float dotProductCacheData[BlockSize];
if (matrixIdx >= matCount) {
return;
}
lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2;
targets += matrixIdx * rowSize;
float* solutions = &solutionsData[inBlockOffset * rowSize];
float* dotProductCache = &dotProductCacheData[inBlockOffset * rowSize];
TLowerMatrixSystem system(lower, targets, systemSize);
solutions[col] = col < systemSize ? system.GetTarget(col) : 0;
__syncthreads();
int reduceSize = 1;
#pragma unroll
for (int row = 0; row < systemSize; ++row) {
if (row & reduceSize) {
reduceSize <<= 1;
}
dotProductCache[col] = col <= row ? system.Get(row, col) : 0.0f;
__syncthreads();
float lastCoeff = 0.0f;
if (col == 0) {
lastCoeff = dotProductCache[row];
dotProductCache[row] = 0;
}
__syncthreads();
dotProductCache[col] *= solutions[col];
__syncthreads();
const float sum = FastInBlockReduce(col, dotProductCache, reduceSize);
if (col == 0) {
solutions[row] = lastCoeff > 1e-20f ? (solutions[row] - sum) / (lastCoeff + 1e-20f) : 0;
}
__syncthreads();
}
if (col < systemSize) {
system.WriteSolution(col, solutions[col]);
}
}
template <int BLOCK_SIZE>
__global__ void RegularizeImpl(float* lower, int rowSize,
int matCount, float lambda0, float lambda1) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2;
const int col = threadIdx.x & (rowSize - 1);
if (matrixIdx >= matCount) {
return;
}
const float cellPrior = 1.0f / rowSize;
float trace = 0;
float pseudoRank = 0;
for (int row = 0; row < rowSize; ++row) {
const float val = __ldg(lower + row * (row + 1) / 2 + row);
trace += val;
pseudoRank += val > 1e-9f;
}
__syncthreads();
#pragma unroll 8
for (int row = 0; row < rowSize; ++row) {
//beta prior (uniform). Makes rank(lower) = rowSize - 1
if (col <= row) {
float val = __ldg(lower + row * (row + 1) / 2 + col);
if (col == row && val <= 1e-7f) {
val += trace / pseudoRank + 0.1f;
}
if (col == row) {
val += 0.05f * trace / pseudoRank + 1e-20f;
}
val += col < row ? -lambda0 * cellPrior : (lambda0 * (1 - cellPrior) + lambda1);
WriteThrough(lower + row * (row + 1) / 2 + col, val);
}
}
}
void Regularize(float* matrices, int rowSize, int matCount, double lambdaNonDiag, double lambdaDiag, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( RegularizeImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, matrices, rowSize, matCount, lambdaNonDiag, lambdaDiag);
}
}
template <int BLOCK_SIZE>
__global__ void ZeroMeanImpl(float* solutions, int rowSize, int matCount) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ double beta[BLOCK_SIZE];
__shared__ double line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
solutions += matrixIdx * rowSize;
beta[tid] = col != (rowSize - 1) ? solutions[col] : 0;
line[tid] = beta[tid];
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
beta[tid] -= line[rowSize * inBlockOffset] / rowSize;
solutions[col] = beta[tid];
}
template <int BLOCK_SIZE>
__global__ void CalcScoresCholeskyImpl(const float* linearSystem,
const float* solutions,
int rowSize,
int matCount,
float* scores) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ float beta[BLOCK_SIZE];
__shared__ float line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize);
solutions += matrixIdx * rowSize;
scores += matrixIdx;
beta[tid] = solutions[col];
line[tid] = beta[tid];
const float tidTarget = linearSystem[rowSize * (rowSize + 1) / 2 + col];
__syncthreads();
//we store matrix cholesky-decomposition. For score we need to maximize ||beta^{T}L||^2 - 2 <beta, y> (1)
//score to minimize: (A\beta - y)^{T}W(A\beta - y) + \beta^{T} J \beta, where J some positive-defined matrix
//we don't need square sum, so we maximize (1)
{
float partb1 = 0;
#pragma unroll 4
for (int row = 0; row < rowSize; ++row) {
double val = col <= row ? LdgWithFallback(linearSystem, row * (row + 1) / 2 + col)
: LdgWithFallback(linearSystem, col * (col + 1) / 2 + row);
val *= beta[rowSize * inBlockOffset + row];
partb1 += val;
}
line[tid] = beta[tid] * (tidTarget - 0.5 * partb1);
}
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
if (col == 0) {
scores[0] = line[tid];
}
}
//Inplace solver
template <int BLOCK_SIZE, int SOLVER_BLOCK_SIZE, int REMOVE_LAST>
inline void RunCholeskySolver(float* matrices, float* solutions,
int rowSize, int matCount,
TCudaStream stream) {
const int numBlocksCholesky = (matCount * min(rowSize, 32) + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (numBlocksCholesky > 0) {
#define CHOLESKY_DECOMPOSITION(ROW_SIZE) \
const int SYSTEM_SIZE = ROW_SIZE - REMOVE_LAST; \
hipLaunchKernelGGL(( CholeskyDecompositionImpl<BLOCK_SIZE, ROW_SIZE, SYSTEM_SIZE>) , dim3(numBlocksCholesky), dim3(BLOCK_SIZE), 0, stream, matrices, matCount); \
break;
switch (rowSize) {
case 1: {
CHOLESKY_DECOMPOSITION(1);
}
case 2: {
CHOLESKY_DECOMPOSITION(2);
}
case 4: {
CHOLESKY_DECOMPOSITION(4);
}
case 8: {
CHOLESKY_DECOMPOSITION(8);
}
case 16: {
CHOLESKY_DECOMPOSITION(16);
}
case 32: {
CHOLESKY_DECOMPOSITION(32);
}
case 64: {
CHOLESKY_DECOMPOSITION(64);
}
case 128: {
CHOLESKY_DECOMPOSITION(128);
}
case 256: {
CHOLESKY_DECOMPOSITION(256);
}
}
const int solverNumBlocks = (matCount * rowSize + SOLVER_BLOCK_SIZE - 1) / SOLVER_BLOCK_SIZE;
if (solverNumBlocks) {
SolveForwardImpl<TDirectSystem, SOLVER_BLOCK_SIZE> << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions);
SolveForwardImpl<TTransposedSystem, SOLVER_BLOCK_SIZE> << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions);
}
}
}
template <int BLOCK_SIZE>
inline void RunCalcScores(const float* linearSystem, const float* solutions, int rowSize, float* scores,
int matCount, TCudaStream stream) {
const int numBlocks = (matCount * BLOCK_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE;
CalcScoresCholeskyImpl<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> >(linearSystem, solutions, rowSize, matCount, scores);
}
void ZeroMean(float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ZeroMeanImpl<blockSize> << < numBlocks, blockSize, 0, stream >> > (solutions, rowSize, matCount);
}
}
void CalcScores(const float* linearSystem, const float* solutions,
float* scores, int rowSize, int matCount, TCudaStream stream)
{
if (rowSize == 256) {
RunCalcScores<256>(linearSystem, solutions, rowSize, scores, matCount, stream);
} else {
RunCalcScores<128>(linearSystem, solutions, rowSize, scores, matCount, stream);
}
}
void CholeskySolver(float* matrices, float* solutions, int rowSize, int matCount, bool removeLast, TCudaStream stream)
{
if (removeLast) {
RunCholeskySolver<128, 256, 1>(matrices, solutions, rowSize, matCount, stream);
} else {
RunCholeskySolver<128, 256, 0>(matrices, solutions, rowSize, matCount, stream);
}
}
void SolverForward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( SolveForwardImpl<TDirectSystem, blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, matrices, rowSize, rowSize - 1, matCount, solutions);
}
}
void SolverBackward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( SolveForwardImpl<TTransposedSystem, blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, matrices, rowSize, rowSize - 1, matCount, solutions);
}
}
}
| 7f106bcc4b668c89f1ef8578611c72be6d571131.cu | #include "split_pairwise.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
//System size <= ROW_SIZE — number of rows for decompose,
// in pfound and pair classification we don't need last line
template <int BLOCK_SIZE>
__launch_bounds__(BLOCK_SIZE)
__global__ void ExtractMatricesAndTargetsImpl(const float* linearSystem,
const int matCount,
const int rowSize,
float* matrices,
float* targets,
float* matrixDiag
) {
const int lineSize = 32;
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int localMatrixIdx = threadIdx.x / lineSize;
int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx;
if (matrixIdx >= matCount) {
return;
}
linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize);
matrices += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2);
targets += ((size_t)matrixIdx) * rowSize;
matrixDiag += ((size_t)matrixIdx) * rowSize;
const int x = threadIdx.x & (lineSize - 1);
#pragma unroll 8
for (int i = x; i < rowSize * (rowSize + 1) / 2; i += lineSize) {
matrices[i] = linearSystem[i];
}
#pragma unroll 8
for (int i = x; i < rowSize; i += lineSize) {
targets[i] = linearSystem[rowSize * (rowSize + 1) / 2 + i];
}
#pragma unroll 8
for (int i = x; i < rowSize; i += lineSize) {
matrixDiag[i] = linearSystem[i * (i + 1) / 2 + i];
}
}
void ExtractMatricesAndTargets(const float* linearSystem, int matCount, int rowSize, float* matrices, float* targets, float* matrixDiag, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * 32 + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ExtractMatricesAndTargetsImpl<blockSize> << < numBlocks, blockSize, 0, stream >> > (linearSystem, matCount, rowSize, matrices, targets, matrixDiag);
}
}
//System size <= ROW_SIZE — number of rows for decompose,
// in pfound and pair classification we don't need last line
template <int BlockSize, int RowSize, int SystemSize>
__launch_bounds__(BlockSize)
__global__ void CholeskyDecompositionImpl(float* lower, int matCount) {
const int logicalWarpSize = (RowSize < 32 ? RowSize : 32);
const int matricesPerBlock = BlockSize / logicalWarpSize;
const int localMatrixIdx = threadIdx.x / logicalWarpSize;
const int N = RowSize / logicalWarpSize;
int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx;
if (matrixIdx >= matCount)
return;
lower += ((size_t)matrixIdx) * (RowSize * (RowSize + 1) / 2);
const int x = threadIdx.x & (logicalWarpSize - 1);
float currentLine[N];
__shared__ float LjjData[matricesPerBlock];
volatile float* Ljj = &LjjData[localMatrixIdx];
if (x == 0) {
const float l00 = __ldg(lower);
lower[0] = sqrtf(l00);
}
__syncwarp();
// #pragma unroll
for (int row = 1; row < SystemSize; ++row) {
//we don't modify this value in matrix, so it's pretty safe to load it with ldg.
#pragma unroll
for (int k = 0; k < N; ++k) {
const int col = x + 32 * k;
currentLine[k] = col <= row ? LdgWithFallback(lower, row * (row + 1) / 2 + col) : 0.0f;
}
__syncwarp();
int reduceSize = 1;
#pragma unroll
for (int col = 0; col < row; ++col) {
if (col & reduceSize) {
reduceSize <<= 1;
}
float tmp = 0.0f;
{
#pragma unroll
for (int k = 0; k < N; ++k) {
const int colIdx = x + k * 32;
if (colIdx <= col) {
const float val = lower[col * (col + 1) / 2 + colIdx];
tmp += colIdx < col ? val * currentLine[k] : 0;
if (colIdx == col) {
Ljj[0] = val;
}
}
}
}
float sum = ShuffleReduce(x, tmp, min(reduceSize, 32));
sum = __shfl_sync(0xFFFFFF, sum, 0, logicalWarpSize);
const float ljj = Ljj[0];
#pragma unroll
for (int k = 0; k < N; ++k) {
const int colIdx = x + 32 * k;
if (colIdx == col) {
currentLine[k] = ljj > 0 ? (currentLine[k] - sum) / (ljj + 1e-7f) : 0.0f;
}
}
__syncwarp();
}
{
float tmp = 0;
#pragma unroll
for (int k = 0; k < N; ++k) {
const int col = x + 32 * k;
if (col < row) {
tmp += currentLine[k] * currentLine[k];
}
}
float sum = ShuffleReduce(x, tmp, min(reduceSize, 32));
sum = __shfl_sync(0xFFFFFF, sum, 0, logicalWarpSize);
__syncwarp();
#pragma unroll
for (int k = 0; k < N; ++k) {
const int rowIdx = x + 32 * k;
if (rowIdx == row) {
const float tmp2 = currentLine[k] - sum;
currentLine[k] = tmp2 > 1e-8f ? sqrtf(tmp2) : 1e-4f;
}
}
__syncwarp();
}
#pragma unroll
for (int k = 0; k < N; ++k) {
const int colIdx = x + 32 * k;
if (colIdx <= row) {
WriteThrough(lower + row * (row + 1) / 2 + colIdx, currentLine[k]);
}
}
__syncwarp();
}
}
class TDirectSystem {
private:
const float* Data;
float* Target;
public:
__device__ TDirectSystem(const float* data, float* target, int rowSize)
: Data(data)
, Target(target)
{
(void)rowSize;
}
__forceinline__ __device__ float Get(int row, int col) const {
return LdgWithFallback(Data, row * (row + 1) / 2 + col);
}
__forceinline__ __device__ float GetTarget(int row) const {
return LdgWithFallback(Target, row);
}
__forceinline__ __device__ void WriteSolution(int row, float solution) const {
WriteThrough(Target + row, solution);
}
};
class TTransposedSystem {
private:
const float* Data;
float* Target;
int RowSize;
public:
__device__ TTransposedSystem(const float* data, float* target, int rowSize)
: Data(data)
, Target(target)
, RowSize(rowSize) {
}
__forceinline__ __device__ float Get(int row, int col) const {
row = RowSize - row - 1;
col = RowSize - col - 1;
return LdgWithFallback(Data, col * (col + 1) / 2 + row);
}
__forceinline__ __device__ float GetTarget(int row) const {
return LdgWithFallback(Target, RowSize - row - 1);
}
__forceinline__ __device__ void WriteSolution(int row, float solution) const {
WriteThrough(Target + RowSize - row - 1, solution);
}
};
template <class TLowerMatrixSystem, int BlockSize>
__global__ void SolveForwardImpl(const float* lower, int rowSize, int systemSize, int matCount, float* targets) {
const int matricesPerBlock = BlockSize / rowSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ float solutionsData[BlockSize];
__shared__ float dotProductCacheData[BlockSize];
if (matrixIdx >= matCount) {
return;
}
lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2;
targets += matrixIdx * rowSize;
float* solutions = &solutionsData[inBlockOffset * rowSize];
float* dotProductCache = &dotProductCacheData[inBlockOffset * rowSize];
TLowerMatrixSystem system(lower, targets, systemSize);
solutions[col] = col < systemSize ? system.GetTarget(col) : 0;
__syncthreads();
int reduceSize = 1;
#pragma unroll
for (int row = 0; row < systemSize; ++row) {
if (row & reduceSize) {
reduceSize <<= 1;
}
dotProductCache[col] = col <= row ? system.Get(row, col) : 0.0f;
__syncthreads();
float lastCoeff = 0.0f;
if (col == 0) {
lastCoeff = dotProductCache[row];
dotProductCache[row] = 0;
}
__syncthreads();
dotProductCache[col] *= solutions[col];
__syncthreads();
const float sum = FastInBlockReduce(col, dotProductCache, reduceSize);
if (col == 0) {
solutions[row] = lastCoeff > 1e-20f ? (solutions[row] - sum) / (lastCoeff + 1e-20f) : 0;
}
__syncthreads();
}
if (col < systemSize) {
system.WriteSolution(col, solutions[col]);
}
}
template <int BLOCK_SIZE>
__global__ void RegularizeImpl(float* lower, int rowSize,
int matCount, float lambda0, float lambda1) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2;
const int col = threadIdx.x & (rowSize - 1);
if (matrixIdx >= matCount) {
return;
}
const float cellPrior = 1.0f / rowSize;
float trace = 0;
float pseudoRank = 0;
for (int row = 0; row < rowSize; ++row) {
const float val = __ldg(lower + row * (row + 1) / 2 + row);
trace += val;
pseudoRank += val > 1e-9f;
}
__syncthreads();
#pragma unroll 8
for (int row = 0; row < rowSize; ++row) {
//beta prior (uniform). Makes rank(lower) = rowSize - 1
if (col <= row) {
float val = __ldg(lower + row * (row + 1) / 2 + col);
if (col == row && val <= 1e-7f) {
val += trace / pseudoRank + 0.1f;
}
if (col == row) {
val += 0.05f * trace / pseudoRank + 1e-20f;
}
val += col < row ? -lambda0 * cellPrior : (lambda0 * (1 - cellPrior) + lambda1);
WriteThrough(lower + row * (row + 1) / 2 + col, val);
}
}
}
void Regularize(float* matrices, int rowSize, int matCount, double lambdaNonDiag, double lambdaDiag, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
RegularizeImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(matrices, rowSize, matCount, lambdaNonDiag, lambdaDiag);
}
}
template <int BLOCK_SIZE>
__global__ void ZeroMeanImpl(float* solutions, int rowSize, int matCount) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ double beta[BLOCK_SIZE];
__shared__ double line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
solutions += matrixIdx * rowSize;
beta[tid] = col != (rowSize - 1) ? solutions[col] : 0;
line[tid] = beta[tid];
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
beta[tid] -= line[rowSize * inBlockOffset] / rowSize;
solutions[col] = beta[tid];
}
template <int BLOCK_SIZE>
__global__ void CalcScoresCholeskyImpl(const float* linearSystem,
const float* solutions,
int rowSize,
int matCount,
float* scores) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ float beta[BLOCK_SIZE];
__shared__ float line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize);
solutions += matrixIdx * rowSize;
scores += matrixIdx;
beta[tid] = solutions[col];
line[tid] = beta[tid];
const float tidTarget = linearSystem[rowSize * (rowSize + 1) / 2 + col];
__syncthreads();
//we store matrix cholesky-decomposition. For score we need to maximize ||beta^{T}L||^2 - 2 <beta, y> (1)
//score to minimize: (A\beta - y)^{T}W(A\beta - y) + \beta^{T} J \beta, where J — some positive-defined matrix
//we don't need square sum, so we maximize (1)
{
float partb1 = 0;
#pragma unroll 4
for (int row = 0; row < rowSize; ++row) {
double val = col <= row ? LdgWithFallback(linearSystem, row * (row + 1) / 2 + col)
: LdgWithFallback(linearSystem, col * (col + 1) / 2 + row);
val *= beta[rowSize * inBlockOffset + row];
partb1 += val;
}
line[tid] = beta[tid] * (tidTarget - 0.5 * partb1);
}
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
if (col == 0) {
scores[0] = line[tid];
}
}
//Inplace solver
template <int BLOCK_SIZE, int SOLVER_BLOCK_SIZE, int REMOVE_LAST>
inline void RunCholeskySolver(float* matrices, float* solutions,
int rowSize, int matCount,
TCudaStream stream) {
const int numBlocksCholesky = (matCount * min(rowSize, 32) + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (numBlocksCholesky > 0) {
#define CHOLESKY_DECOMPOSITION(ROW_SIZE) \
const int SYSTEM_SIZE = ROW_SIZE - REMOVE_LAST; \
CholeskyDecompositionImpl<BLOCK_SIZE, ROW_SIZE, SYSTEM_SIZE> <<< numBlocksCholesky, BLOCK_SIZE, 0, stream>>> (matrices, matCount); \
break;
switch (rowSize) {
case 1: {
CHOLESKY_DECOMPOSITION(1);
}
case 2: {
CHOLESKY_DECOMPOSITION(2);
}
case 4: {
CHOLESKY_DECOMPOSITION(4);
}
case 8: {
CHOLESKY_DECOMPOSITION(8);
}
case 16: {
CHOLESKY_DECOMPOSITION(16);
}
case 32: {
CHOLESKY_DECOMPOSITION(32);
}
case 64: {
CHOLESKY_DECOMPOSITION(64);
}
case 128: {
CHOLESKY_DECOMPOSITION(128);
}
case 256: {
CHOLESKY_DECOMPOSITION(256);
}
}
const int solverNumBlocks = (matCount * rowSize + SOLVER_BLOCK_SIZE - 1) / SOLVER_BLOCK_SIZE;
if (solverNumBlocks) {
SolveForwardImpl<TDirectSystem, SOLVER_BLOCK_SIZE> << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions);
SolveForwardImpl<TTransposedSystem, SOLVER_BLOCK_SIZE> << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions);
}
}
}
template <int BLOCK_SIZE>
inline void RunCalcScores(const float* linearSystem, const float* solutions, int rowSize, float* scores,
int matCount, TCudaStream stream) {
const int numBlocks = (matCount * BLOCK_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE;
CalcScoresCholeskyImpl<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> >(linearSystem, solutions, rowSize, matCount, scores);
}
void ZeroMean(float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ZeroMeanImpl<blockSize> << < numBlocks, blockSize, 0, stream >> > (solutions, rowSize, matCount);
}
}
void CalcScores(const float* linearSystem, const float* solutions,
float* scores, int rowSize, int matCount, TCudaStream stream)
{
if (rowSize == 256) {
RunCalcScores<256>(linearSystem, solutions, rowSize, scores, matCount, stream);
} else {
RunCalcScores<128>(linearSystem, solutions, rowSize, scores, matCount, stream);
}
}
void CholeskySolver(float* matrices, float* solutions, int rowSize, int matCount, bool removeLast, TCudaStream stream)
{
if (removeLast) {
RunCholeskySolver<128, 256, 1>(matrices, solutions, rowSize, matCount, stream);
} else {
RunCholeskySolver<128, 256, 0>(matrices, solutions, rowSize, matCount, stream);
}
}
void SolverForward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
SolveForwardImpl<TDirectSystem, blockSize><<<numBlocks, blockSize, 0, stream>>>(matrices, rowSize, rowSize - 1, matCount, solutions);
}
}
void SolverBackward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
SolveForwardImpl<TTransposedSystem, blockSize><<<numBlocks, blockSize, 0, stream>>>(matrices, rowSize, rowSize - 1, matCount, solutions);
}
}
}
|
339d4a8df42a4cf6202b7211206077dc9d9345d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/model_update_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, typename G>
__global__ void SGDUpdateGpu(int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
SGDUpdateFunctor<T, G>()(model_diff + i, model + i, scale, l1, l2, weight_decay,
learning_rate_val);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesSGDUpdateGpu(float weight_decay, const IDX feature_size,
const int64_t lower_bound, const int64_t upper_bound,
const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model) {
const int64_t n = *num_unique_instance * feature_size;
const T lr = *learning_rate;
CUDA_1D_KERNEL_LOOP_T(IDX, i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
SGDUpdateFunctor<T, T>()(values + i, model + model_idx, static_cast<T>(1), 0.0, 0.0,
weight_decay, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model);
};
template<typename T, typename G>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model) {
hipLaunchKernelGGL(( SGDUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, weight_decay, learning_rate_val, learning_rate, scale_by_ptr, skip_if,
model_diff, model);
}
template<typename T>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model);
};
template<typename T>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model) {
SGDUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, weight_decay, learning_rate_val, learning_rate, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), model);
}
template struct SGDUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, float weight_decay, int64_t num_indices, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, float weight_decay, int64_t num_indices, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model) {
hipLaunchKernelGGL(( IndexedSlicesSGDUpdateGpu<T, K, IDX>)
, dim3(BlocksNum4ThreadsNum(num_indices * feature_size)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->cuda_stream(), weight_decay, feature_size, lower_bound, upper_bound,
num_unique_instance, learning_rate, indices, values, model);
}
#define INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesSGDUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU
namespace {
template<typename T, typename G>
__global__ void MomentumUpdateGpu(int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, float learning_rate_val,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* momentum) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
MomentumUpdateFunctor<T, G>()(model_diff + i, model + i, momentum + i, scale, l1, l2, beta,
weight_decay, learning_rate_val);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesMomentumUpdateGpu(T beta, float weight_decay, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum) {
const int64_t n = *num_unique_instance * feature_size;
const T lr = *learning_rate;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
MomentumUpdateFunctor<T, T>()(values + i, model + model_idx, momentum + model_idx,
static_cast<T>(1), 0.0, 0.0, beta, weight_decay, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* momentum);
};
template<typename T, typename G>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum) {
hipLaunchKernelGGL(( MomentumUpdateGpu<T, G>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, beta, weight_decay, learning_rate_val, learning_rate, scale_by_ptr,
skip_if, model_diff, model, momentum);
}
template<typename T>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, T* momentum);
};
template<typename T>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum) {
MomentumUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta, weight_decay, learning_rate_val, learning_rate, scale_by_ptr,
skip_if, reinterpret_cast<const half*>(model_diff), model, momentum);
}
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, T beta, float weight_decay, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, T beta, float weight_decay, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model, T* momentum) {
hipLaunchKernelGGL(( IndexedSlicesMomentumUpdateGpu<T, K, IDX>), dim3(BlocksNum4ThreadsNum(num_instance * feature_size)),
dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
beta, weight_decay, feature_size, lower_bound, upper_bound, num_unique_instance,
learning_rate, indices, values, model, momentum);
}
#define INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesMomentumMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU
namespace {
__global__ void BiasCorrectionFactorKernelGpu(float beta, const int64_t* train_step, float* out) {
const auto exponent = static_cast<double>(*train_step + 1);
const float bias_correction_factor = 1.0 - static_cast<float>(pow(beta, exponent));
*out = bias_correction_factor;
}
template<typename T, typename G>
__global__ void AdamUpdateGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const G* model_diff, T* model,
T* m, T* v, T* max_v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
if (bias_correction1_ptr != nullptr) { bias_correction1_val = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2_val = *bias_correction2_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
AdamUpdateFunctor<T, G>()(model_diff + i, model + i, m + i, v + i, max_v + i, scale, l1, l2,
beta1, beta2, epsilon, weight_decay, amsgrad, bias_correction1_val,
bias_correction2_val, learning_rate_val);
}
}
template<typename T>
__global__ void AdamUpdateBetaTGpu(const T beta1, const T beta2, const int64_t* skip_if, T* beta1_t,
T* beta2_t) {
if (skip_if != nullptr && *skip_if != 0) { return; }
*beta1_t *= beta1;
*beta2_t *= beta2;
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesAdamUpdateGpu(
float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float lr, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const K* indices,
const T* values, T* model, T* m, T* v, T* max_v) {
if (learning_rate != nullptr) { lr = *learning_rate; }
float bias_correction1 = 1.0;
float bias_correction2 = 1.0;
if (bias_correction1_ptr != nullptr) { bias_correction1 = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2 = *bias_correction2_ptr; }
const int64_t n = *num_unique_instance * feature_size;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
AdamUpdateFunctor<T, T>()(values + i, model + model_idx, m + model_idx, v + model_idx,
max_v + i, static_cast<T>(1), 0, 0, beta1, beta2, epsilon,
weight_decay, amsgrad, bias_correction1, bias_correction2, lr);
}
}
}
template<typename T, typename G>
__global__ void LambGradGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, const T* beta1_t, const T* beta2_t,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
LambGradFunctor<T, G>()(beta1_t, beta2_t, model_diff + i, adam_diff + i, model + i, m + i,
v + i, scale, l1, l2, beta1, beta2, epsilon);
}
}
template<typename T>
__global__ void LambUpdateGpu(int64_t n, float weight_decay, const float* learning_rate,
const int64_t* skip_if, const T* w_norm, const T* g_norm,
const T* beta1_t, const T* beta2_t, const T* adam_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const float lr = LambLRFunctor<T>()(*learning_rate, w_norm, g_norm);
CUDA_1D_KERNEL_LOOP(i, n) { LambUpdateFunctor<T>()(lr, weight_decay, adam_diff + i, model + i); }
}
} // namespace
template<typename T, typename G>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float bias_correction1_val,
float bias_correction2_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const G* model_diff, T* model, T* m, T* v,
T* max_v);
};
template<typename T, typename G>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val,
float bias_correction1_val, float bias_correction2_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const G* model_diff, T* model, T* m, T* v, T* max_v) {
hipLaunchKernelGGL(( AdamUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, bias_correction1_val, bias_correction2_val, learning_rate, scale_by_ptr,
skip_if, bias_correction1_ptr, bias_correction2_ptr, model_diff, model, m, v, max_v);
}
template<typename T>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float bias_correction1_val,
float bias_correction2_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const float16* model_diff, T* model, T* m,
T* v, T* max_v);
};
template<typename T>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val,
float bias_correction1_val, float bias_correction2_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const float16* model_diff, T* model, T* m, T* v, T* max_v) {
AdamUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, bias_correction1_val, bias_correction2_val, learning_rate, scale_by_ptr,
skip_if, bias_correction1_ptr, bias_correction2_ptr,
reinterpret_cast<const half*>(model_diff), model, m, v, max_v);
}
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename G>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T, typename G>
void LambUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer,
T* beta1_t, T* beta2_t) {
hipLaunchKernelGGL(( AdamUpdateBetaTGpu<T>), dim3(1), dim3(1), 0, ctx->cuda_stream(), beta1, beta2, skip_if, beta1_t, beta2_t);
hipLaunchKernelGGL(( LambGradGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, beta1, beta2, epsilon, beta1_t, beta2_t, scale_by_ptr, skip_if, model_diff,
adam_diff, model, m, v);
T* w_norm = norm_buffer;
T* g_norm = norm_buffer + 1;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, w_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, adam_diff, 1, adam_diff, 1, g_norm);
KernelUtil<DeviceType::kGPU, T>::Sqrt(ctx, 2, norm_buffer, norm_buffer);
hipLaunchKernelGGL(( LambUpdateGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, weight_decay, learning_rate, skip_if, w_norm, g_norm, beta1_t, beta2_t, adam_diff, model);
}
template<typename T>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T>
void LambUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* adam_diff, T* model, T* m, T* v,
T* norm_buffer, T* beta1_t, T* beta2_t) {
LambUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
skip_if, reinterpret_cast<const half*>(model_diff), adam_diff, model, m, v, norm_buffer,
beta1_t, beta2_t);
}
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, float beta1, float beta2, float epsilon, float weight_decay,
bool amsgrad, bool do_bias_correction, float lr, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const K* indices, const T* values, T* model, T* m, T* v, T* max_v);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float lr, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const K* indices, const T* values, T* model, T* m, T* v,
T* max_v) {
hipLaunchKernelGGL(( IndexedSlicesAdamUpdateGpu<T, K, IDX>), dim3(BlocksNum4ThreadsNum(num_instance * feature_size)),
dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, lr, feature_size,
lower_bound, upper_bound, num_unique_instance, learning_rate, bias_correction1_ptr,
bias_correction2_ptr, indices, values, model, m, v, max_v);
}
#define INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesAdamMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU
template<>
struct BiasCorrectionFactorKernelUtil<DeviceType::kGPU> {
static void BiasCorrectionFactorCompute(DeviceCtx* ctx, float beta, const int64_t* train_step,
float* out);
};
void BiasCorrectionFactorKernelUtil<DeviceType::kGPU>::BiasCorrectionFactorCompute(
DeviceCtx* ctx, float beta, const int64_t* train_step, float* out) {
hipLaunchKernelGGL(( BiasCorrectionFactorKernelGpu), dim3(1), dim3(1), 0, ctx->cuda_stream(), beta, train_step, out);
}
namespace {
template<typename T, typename G, bool centered>
__global__ void RmsPropUpdateGpu(int64_t n, T scale, float l1, float l2, T* mean_square,
T* mean_gradient, float epsilon, float weight_decay,
float decay_rate, float learning_rate_val,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
RmsPropUpdateFunctor<T, G, centered>()(model_diff + i, model + i, n, scale, l1, l2,
mean_square + i,
(centered ? mean_gradient + i : nullptr), epsilon,
weight_decay, decay_rate, learning_rate_val);
}
}
} // namespace
template<typename T, typename G>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate, float learning_rate_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* mean_square, T* mean_gradient);
};
template<typename T, typename G>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* mean_square,
T* mean_gradient) {
if (centered) {
hipLaunchKernelGGL(( RmsPropUpdateGpu<T, G, true>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate_val, learning_rate, scale_by_ptr, skip_if, model_diff, model);
} else {
hipLaunchKernelGGL(( RmsPropUpdateGpu<T, G, false>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate_val, learning_rate, scale_by_ptr, skip_if, model_diff, model);
}
}
template<typename T>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate, float learning_rate_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* mean_square, T* mean_gradient);
};
template<typename T>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model,
T* mean_square, T* mean_gradient) {
RmsPropUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, centered, epsilon, weight_decay, decay_rate, learning_rate_val,
learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model,
mean_square, mean_gradient);
}
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float16>;
namespace {
template<typename T, typename G>
__global__ void LarsScaleModelDiffGpu(int64_t n, T scale, float l1, float l2, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* model_diff_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
model_diff_tmp[i] =
CastScaleRegularizeGradientFunctor<T, G>()(model_diff[i], model[i], scale, l1, l2);
}
}
template<typename T>
__global__ void LarsGetLocalLearningRateGpu(const float* learning_rate, T weight_decay, T epsilon,
T lars_coefficient, const int64_t* skip_if,
T* data_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
T* model_norm = &data_tmp[0];
T* model_diff_norm = &data_tmp[1];
T* local_learning_rate = &data_tmp[2];
*model_norm = std::sqrt(*model_norm);
*model_diff_norm = std::sqrt(*model_diff_norm);
T lars = static_cast<T>(1);
if (*model_norm > 0 && *model_diff_norm > 0) {
lars = lars_coefficient * (*model_norm)
/ (epsilon + (*model_diff_norm) + weight_decay * (*model_norm));
}
*local_learning_rate = *learning_rate * lars;
}
template<typename T>
__global__ void LarsUpdateGpu(int64_t n, float momentum_beta, T* momentum, float weight_decay,
const int64_t* skip_if, T* local_learning_rate, T* model_diff_tmp,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
CUDA_1D_KERNEL_LOOP(i, n) {
LarsUpdateFunctor<T>()(model_diff_tmp + i, model + i, momentum_beta, momentum + i, weight_decay,
*local_learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp);
};
template<typename T, typename G>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum, T* data_tmp,
T* model_diff_tmp) {
hipLaunchKernelGGL(( LarsScaleModelDiffGpu<T, G>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, scale_by_ptr, skip_if, model_diff, model, model_diff_tmp);
T* model_norm = data_tmp;
T* model_diff_norm = data_tmp + 1;
T* local_learning_rate = data_tmp + 2;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, model_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model_diff_tmp, 1, model_diff_tmp, 1,
model_diff_norm);
hipLaunchKernelGGL(( LarsGetLocalLearningRateGpu<T>), dim3(1), dim3(1), 0, ctx->cuda_stream(),
learning_rate, weight_decay, epsilon, lars_coefficient, skip_if, data_tmp);
hipLaunchKernelGGL(( LarsUpdateGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, momentum_beta, momentum, weight_decay, skip_if, local_learning_rate, model_diff_tmp,
model);
}
template<typename T>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* momentum, T* data_tmp,
T* model_diff_tmp);
};
template<typename T>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum, T* data_tmp,
T* model_diff_tmp) {
LarsUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, momentum_beta, epsilon, lars_coefficient, weight_decay, learning_rate,
scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model, momentum, data_tmp,
model_diff_tmp);
}
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float16>;
} // namespace oneflow
| 339d4a8df42a4cf6202b7211206077dc9d9345d2.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/model_update_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, typename G>
__global__ void SGDUpdateGpu(int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
SGDUpdateFunctor<T, G>()(model_diff + i, model + i, scale, l1, l2, weight_decay,
learning_rate_val);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesSGDUpdateGpu(float weight_decay, const IDX feature_size,
const int64_t lower_bound, const int64_t upper_bound,
const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model) {
const int64_t n = *num_unique_instance * feature_size;
const T lr = *learning_rate;
CUDA_1D_KERNEL_LOOP_T(IDX, i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
SGDUpdateFunctor<T, T>()(values + i, model + model_idx, static_cast<T>(1), 0.0, 0.0,
weight_decay, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model);
};
template<typename T, typename G>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model) {
SGDUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, weight_decay, learning_rate_val, learning_rate, scale_by_ptr, skip_if,
model_diff, model);
}
template<typename T>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model);
};
template<typename T>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model) {
SGDUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, weight_decay, learning_rate_val, learning_rate, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), model);
}
template struct SGDUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, float weight_decay, int64_t num_indices, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, float weight_decay, int64_t num_indices, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model) {
IndexedSlicesSGDUpdateGpu<T, K, IDX>
<<<BlocksNum4ThreadsNum(num_indices * feature_size), kCudaThreadsNumPerBlock, 0,
ctx->cuda_stream()>>>(weight_decay, feature_size, lower_bound, upper_bound,
num_unique_instance, learning_rate, indices, values, model);
}
#define INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesSGDUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU
namespace {
template<typename T, typename G>
__global__ void MomentumUpdateGpu(int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, float learning_rate_val,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* momentum) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
MomentumUpdateFunctor<T, G>()(model_diff + i, model + i, momentum + i, scale, l1, l2, beta,
weight_decay, learning_rate_val);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesMomentumUpdateGpu(T beta, float weight_decay, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum) {
const int64_t n = *num_unique_instance * feature_size;
const T lr = *learning_rate;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
MomentumUpdateFunctor<T, T>()(values + i, model + model_idx, momentum + model_idx,
static_cast<T>(1), 0.0, 0.0, beta, weight_decay, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* momentum);
};
template<typename T, typename G>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum) {
MomentumUpdateGpu<T, G>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, beta, weight_decay, learning_rate_val, learning_rate, scale_by_ptr,
skip_if, model_diff, model, momentum);
}
template<typename T>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, T* momentum);
};
template<typename T>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
float learning_rate_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum) {
MomentumUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta, weight_decay, learning_rate_val, learning_rate, scale_by_ptr,
skip_if, reinterpret_cast<const half*>(model_diff), model, momentum);
}
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, T beta, float weight_decay, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, T beta, float weight_decay, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model, T* momentum) {
IndexedSlicesMomentumUpdateGpu<T, K, IDX><<<BlocksNum4ThreadsNum(num_instance * feature_size),
kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
beta, weight_decay, feature_size, lower_bound, upper_bound, num_unique_instance,
learning_rate, indices, values, model, momentum);
}
#define INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesMomentumMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU
namespace {
__global__ void BiasCorrectionFactorKernelGpu(float beta, const int64_t* train_step, float* out) {
const auto exponent = static_cast<double>(*train_step + 1);
const float bias_correction_factor = 1.0 - static_cast<float>(pow(beta, exponent));
*out = bias_correction_factor;
}
template<typename T, typename G>
__global__ void AdamUpdateGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const G* model_diff, T* model,
T* m, T* v, T* max_v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
if (bias_correction1_ptr != nullptr) { bias_correction1_val = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2_val = *bias_correction2_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
AdamUpdateFunctor<T, G>()(model_diff + i, model + i, m + i, v + i, max_v + i, scale, l1, l2,
beta1, beta2, epsilon, weight_decay, amsgrad, bias_correction1_val,
bias_correction2_val, learning_rate_val);
}
}
template<typename T>
__global__ void AdamUpdateBetaTGpu(const T beta1, const T beta2, const int64_t* skip_if, T* beta1_t,
T* beta2_t) {
if (skip_if != nullptr && *skip_if != 0) { return; }
*beta1_t *= beta1;
*beta2_t *= beta2;
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesAdamUpdateGpu(
float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float lr, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const K* indices,
const T* values, T* model, T* m, T* v, T* max_v) {
if (learning_rate != nullptr) { lr = *learning_rate; }
float bias_correction1 = 1.0;
float bias_correction2 = 1.0;
if (bias_correction1_ptr != nullptr) { bias_correction1 = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2 = *bias_correction2_ptr; }
const int64_t n = *num_unique_instance * feature_size;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
AdamUpdateFunctor<T, T>()(values + i, model + model_idx, m + model_idx, v + model_idx,
max_v + i, static_cast<T>(1), 0, 0, beta1, beta2, epsilon,
weight_decay, amsgrad, bias_correction1, bias_correction2, lr);
}
}
}
template<typename T, typename G>
__global__ void LambGradGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, const T* beta1_t, const T* beta2_t,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
LambGradFunctor<T, G>()(beta1_t, beta2_t, model_diff + i, adam_diff + i, model + i, m + i,
v + i, scale, l1, l2, beta1, beta2, epsilon);
}
}
template<typename T>
__global__ void LambUpdateGpu(int64_t n, float weight_decay, const float* learning_rate,
const int64_t* skip_if, const T* w_norm, const T* g_norm,
const T* beta1_t, const T* beta2_t, const T* adam_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const float lr = LambLRFunctor<T>()(*learning_rate, w_norm, g_norm);
CUDA_1D_KERNEL_LOOP(i, n) { LambUpdateFunctor<T>()(lr, weight_decay, adam_diff + i, model + i); }
}
} // namespace
template<typename T, typename G>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float bias_correction1_val,
float bias_correction2_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const G* model_diff, T* model, T* m, T* v,
T* max_v);
};
template<typename T, typename G>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val,
float bias_correction1_val, float bias_correction2_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const G* model_diff, T* model, T* m, T* v, T* max_v) {
AdamUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, bias_correction1_val, bias_correction2_val, learning_rate, scale_by_ptr,
skip_if, bias_correction1_ptr, bias_correction2_ptr, model_diff, model, m, v, max_v);
}
template<typename T>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float bias_correction1_val,
float bias_correction2_val, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const float16* model_diff, T* model, T* m,
T* v, T* max_v);
};
template<typename T>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val,
float bias_correction1_val, float bias_correction2_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const float16* model_diff, T* model, T* m, T* v, T* max_v) {
AdamUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, bias_correction1_val, bias_correction2_val, learning_rate, scale_by_ptr,
skip_if, bias_correction1_ptr, bias_correction2_ptr,
reinterpret_cast<const half*>(model_diff), model, m, v, max_v);
}
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename G>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T, typename G>
void LambUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer,
T* beta1_t, T* beta2_t) {
AdamUpdateBetaTGpu<T><<<1, 1, 0, ctx->cuda_stream()>>>(beta1, beta2, skip_if, beta1_t, beta2_t);
LambGradGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, beta1, beta2, epsilon, beta1_t, beta2_t, scale_by_ptr, skip_if, model_diff,
adam_diff, model, m, v);
T* w_norm = norm_buffer;
T* g_norm = norm_buffer + 1;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, w_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, adam_diff, 1, adam_diff, 1, g_norm);
KernelUtil<DeviceType::kGPU, T>::Sqrt(ctx, 2, norm_buffer, norm_buffer);
LambUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, weight_decay, learning_rate, skip_if, w_norm, g_norm, beta1_t, beta2_t, adam_diff, model);
}
template<typename T>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T>
void LambUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* adam_diff, T* model, T* m, T* v,
T* norm_buffer, T* beta1_t, T* beta2_t) {
LambUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
skip_if, reinterpret_cast<const half*>(model_diff), adam_diff, model, m, v, norm_buffer,
beta1_t, beta2_t);
}
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, float beta1, float beta2, float epsilon, float weight_decay,
bool amsgrad, bool do_bias_correction, float lr, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const K* indices, const T* values, T* model, T* m, T* v, T* max_v);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float lr, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const K* indices, const T* values, T* model, T* m, T* v,
T* max_v) {
IndexedSlicesAdamUpdateGpu<T, K, IDX><<<BlocksNum4ThreadsNum(num_instance * feature_size),
kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, lr, feature_size,
lower_bound, upper_bound, num_unique_instance, learning_rate, bias_correction1_ptr,
bias_correction2_ptr, indices, values, model, m, v, max_v);
}
#define INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesAdamMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU
template<>
struct BiasCorrectionFactorKernelUtil<DeviceType::kGPU> {
static void BiasCorrectionFactorCompute(DeviceCtx* ctx, float beta, const int64_t* train_step,
float* out);
};
void BiasCorrectionFactorKernelUtil<DeviceType::kGPU>::BiasCorrectionFactorCompute(
DeviceCtx* ctx, float beta, const int64_t* train_step, float* out) {
BiasCorrectionFactorKernelGpu<<<1, 1, 0, ctx->cuda_stream()>>>(beta, train_step, out);
}
namespace {
template<typename T, typename G, bool centered>
__global__ void RmsPropUpdateGpu(int64_t n, T scale, float l1, float l2, T* mean_square,
T* mean_gradient, float epsilon, float weight_decay,
float decay_rate, float learning_rate_val,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
RmsPropUpdateFunctor<T, G, centered>()(model_diff + i, model + i, n, scale, l1, l2,
mean_square + i,
(centered ? mean_gradient + i : nullptr), epsilon,
weight_decay, decay_rate, learning_rate_val);
}
}
} // namespace
template<typename T, typename G>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate, float learning_rate_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* mean_square, T* mean_gradient);
};
template<typename T, typename G>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* mean_square,
T* mean_gradient) {
if (centered) {
RmsPropUpdateGpu<T, G, true>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate_val, learning_rate, scale_by_ptr, skip_if, model_diff, model);
} else {
RmsPropUpdateGpu<T, G, false>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate_val, learning_rate, scale_by_ptr, skip_if, model_diff, model);
}
}
template<typename T>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate, float learning_rate_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* mean_square, T* mean_gradient);
};
template<typename T>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, float learning_rate_val, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model,
T* mean_square, T* mean_gradient) {
RmsPropUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, centered, epsilon, weight_decay, decay_rate, learning_rate_val,
learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model,
mean_square, mean_gradient);
}
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float16>;
namespace {
template<typename T, typename G>
__global__ void LarsScaleModelDiffGpu(int64_t n, T scale, float l1, float l2, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* model_diff_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
model_diff_tmp[i] =
CastScaleRegularizeGradientFunctor<T, G>()(model_diff[i], model[i], scale, l1, l2);
}
}
template<typename T>
__global__ void LarsGetLocalLearningRateGpu(const float* learning_rate, T weight_decay, T epsilon,
T lars_coefficient, const int64_t* skip_if,
T* data_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
T* model_norm = &data_tmp[0];
T* model_diff_norm = &data_tmp[1];
T* local_learning_rate = &data_tmp[2];
*model_norm = std::sqrt(*model_norm);
*model_diff_norm = std::sqrt(*model_diff_norm);
T lars = static_cast<T>(1);
if (*model_norm > 0 && *model_diff_norm > 0) {
lars = lars_coefficient * (*model_norm)
/ (epsilon + (*model_diff_norm) + weight_decay * (*model_norm));
}
*local_learning_rate = *learning_rate * lars;
}
template<typename T>
__global__ void LarsUpdateGpu(int64_t n, float momentum_beta, T* momentum, float weight_decay,
const int64_t* skip_if, T* local_learning_rate, T* model_diff_tmp,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
CUDA_1D_KERNEL_LOOP(i, n) {
LarsUpdateFunctor<T>()(model_diff_tmp + i, model + i, momentum_beta, momentum + i, weight_decay,
*local_learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp);
};
template<typename T, typename G>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum, T* data_tmp,
T* model_diff_tmp) {
LarsScaleModelDiffGpu<T, G>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, scale_by_ptr, skip_if, model_diff, model, model_diff_tmp);
T* model_norm = data_tmp;
T* model_diff_norm = data_tmp + 1;
T* local_learning_rate = data_tmp + 2;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, model_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model_diff_tmp, 1, model_diff_tmp, 1,
model_diff_norm);
LarsGetLocalLearningRateGpu<T><<<1, 1, 0, ctx->cuda_stream()>>>(
learning_rate, weight_decay, epsilon, lars_coefficient, skip_if, data_tmp);
LarsUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, momentum_beta, momentum, weight_decay, skip_if, local_learning_rate, model_diff_tmp,
model);
}
template<typename T>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* momentum, T* data_tmp,
T* model_diff_tmp);
};
template<typename T>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum, T* data_tmp,
T* model_diff_tmp) {
LarsUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, momentum_beta, epsilon, lars_coefficient, weight_decay, learning_rate,
scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model, momentum, data_tmp,
model_diff_tmp);
}
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float16>;
} // namespace oneflow
|
02fa9c4a08db2419271f072f1546f974dffb089b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define NN 200
__global__ void add_cuda_good(int *x,int *y)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
for (int i = 0; i< NN ; i++) {
y[bid*blockDim.x + tid ] += x[bid*blockDim.x + tid];
y[bid*blockDim.x + tid ] *= 2;
y[bid*blockDim.x + tid ] += bid*blockDim.x + tid;
y[bid*blockDim.x + tid ] += 3;
y[bid*blockDim.x + tid ] += x[bid*blockDim.x + tid];
y[bid*blockDim.x + tid ] *= 2;
y[bid*blockDim.x + tid ] += bid*blockDim.x + tid;
y[bid*blockDim.x + tid ] += 3;
y[bid*blockDim.x + tid ] += x[bid*blockDim.x + tid];
y[bid*blockDim.x + tid ] *= 2;
y[bid*blockDim.x + tid ] += bid*blockDim.x + tid;
y[bid*blockDim.x + tid ] += 3;
}
}
void add_cpu_bad(int *x ,int *y, int size)
{
for (int i=0; i< size; i++){
for (int i = 0; i< NN; i++) {
y[i] += x[i];
y[i] *= 2;
y[i] += i;
y[i] += 3;
y[i] += x[i];
y[i] *= 2;
y[i] += i;
y[i] += 3;
y[i] += x[i];
y[i] *= 2;
y[i] += i;
y[i] += 3;
}
}
}
void print_1D_arr(const char *text,int arr[], int size)
{
if (text == NULL) printf("\n");
else printf("--%s--\n",text);
for (int i=0;i<size;i++)
{
printf(":%d:",arr[i]);
}
printf("\n");
}
int64_t timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p)
{
return ((timeA_p->tv_sec * 1000000000) + timeA_p->tv_nsec) -
((timeB_p->tv_sec * 1000000000) + timeB_p->tv_nsec);
}
int64_t timeDiffSec(struct timespec *timeA_p, struct timespec *timeB_p)
{
return timeA_p->tv_sec - timeB_p->tv_sec ;
}
void arr_init(int *x,int *y, int N)
{
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 10;
y[i] = 2;
}
}
int main(int argc, char** argv)
{
struct timespec start, end;
int *x,*y;
uint64_t timeElapsedGPU;
uint64_t timeElapsedCPU;
int N,T;
sscanf(argv[1] ,"%d", &N);
sscanf(argv[2], "%d", &T);
clock_gettime(CLOCK_MONOTONIC, &start);
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(int));
hipMallocManaged(&y, N*sizeof(int));
clock_gettime(CLOCK_MONOTONIC, &end);
timeElapsedGPU = timespecDiff(&end, &start);
printf("\n\n\n timeElapsed for init = %d\n",timeElapsedGPU);
arr_init(x,y,N);
// print_1D_arr("CUDA:Input",x,10);
int blockSize = T;
int numBlocks = N/blockSize;
printf(" numBlocks=%d, blockSize=%d\n", numBlocks, blockSize);
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( add_cuda_good), dim3(numBlocks), dim3(blockSize), 0, 0, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
timeElapsedGPU = timespecDiff(&end, &start);
printf("\n\n\n timeElapsed GPU = %d\n",timeElapsedGPU);
printf("\n Time Diff Sec GPU = %d\n",timeDiffSec(&end,&start));
// print_1D_arr("CUDA:Output",y,10);
// printf("\n\n\n----Final check:%d\n", y[N-1]);
arr_init(x,y,N);
// print_1D_arr("CUDA:Input",x,10);
clock_gettime(CLOCK_MONOTONIC, &start);
// Some code I am interested in measuring
add_cpu_bad(x,y,N);
clock_gettime(CLOCK_MONOTONIC, &end);
timeElapsedCPU = timespecDiff(&end, &start);
printf("\n\n\n timeElapsed CPU= %d ratio:%f\n",timeElapsedCPU, (float)timeElapsedCPU/timeElapsedGPU);
printf("\n Time Diff Sec CPU = %d\n",timeDiffSec(&end,&start));
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 02fa9c4a08db2419271f072f1546f974dffb089b.cu | #include <iostream>
#include <math.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define NN 200
__global__ void add_cuda_good(int *x,int *y)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
for (int i = 0; i< NN ; i++) {
y[bid*blockDim.x + tid ] += x[bid*blockDim.x + tid];
y[bid*blockDim.x + tid ] *= 2;
y[bid*blockDim.x + tid ] += bid*blockDim.x + tid;
y[bid*blockDim.x + tid ] += 3;
y[bid*blockDim.x + tid ] += x[bid*blockDim.x + tid];
y[bid*blockDim.x + tid ] *= 2;
y[bid*blockDim.x + tid ] += bid*blockDim.x + tid;
y[bid*blockDim.x + tid ] += 3;
y[bid*blockDim.x + tid ] += x[bid*blockDim.x + tid];
y[bid*blockDim.x + tid ] *= 2;
y[bid*blockDim.x + tid ] += bid*blockDim.x + tid;
y[bid*blockDim.x + tid ] += 3;
}
}
void add_cpu_bad(int *x ,int *y, int size)
{
for (int i=0; i< size; i++){
for (int i = 0; i< NN; i++) {
y[i] += x[i];
y[i] *= 2;
y[i] += i;
y[i] += 3;
y[i] += x[i];
y[i] *= 2;
y[i] += i;
y[i] += 3;
y[i] += x[i];
y[i] *= 2;
y[i] += i;
y[i] += 3;
}
}
}
void print_1D_arr(const char *text,int arr[], int size)
{
if (text == NULL) printf("\n");
else printf("--%s--\n",text);
for (int i=0;i<size;i++)
{
printf(":%d:",arr[i]);
}
printf("\n");
}
int64_t timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p)
{
return ((timeA_p->tv_sec * 1000000000) + timeA_p->tv_nsec) -
((timeB_p->tv_sec * 1000000000) + timeB_p->tv_nsec);
}
int64_t timeDiffSec(struct timespec *timeA_p, struct timespec *timeB_p)
{
return timeA_p->tv_sec - timeB_p->tv_sec ;
}
void arr_init(int *x,int *y, int N)
{
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 10;
y[i] = 2;
}
}
int main(int argc, char** argv)
{
struct timespec start, end;
int *x,*y;
uint64_t timeElapsedGPU;
uint64_t timeElapsedCPU;
int N,T;
sscanf(argv[1] ,"%d", &N);
sscanf(argv[2], "%d", &T);
clock_gettime(CLOCK_MONOTONIC, &start);
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(int));
cudaMallocManaged(&y, N*sizeof(int));
clock_gettime(CLOCK_MONOTONIC, &end);
timeElapsedGPU = timespecDiff(&end, &start);
printf("\n\n\n timeElapsed for init = %d\n",timeElapsedGPU);
arr_init(x,y,N);
// print_1D_arr("CUDA:Input",x,10);
int blockSize = T;
int numBlocks = N/blockSize;
printf(" numBlocks=%d, blockSize=%d\n", numBlocks, blockSize);
clock_gettime(CLOCK_MONOTONIC, &start);
add_cuda_good<<<numBlocks, blockSize>>>( x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
timeElapsedGPU = timespecDiff(&end, &start);
printf("\n\n\n timeElapsed GPU = %d\n",timeElapsedGPU);
printf("\n Time Diff Sec GPU = %d\n",timeDiffSec(&end,&start));
// print_1D_arr("CUDA:Output",y,10);
// printf("\n\n\n----Final check:%d\n", y[N-1]);
arr_init(x,y,N);
// print_1D_arr("CUDA:Input",x,10);
clock_gettime(CLOCK_MONOTONIC, &start);
// Some code I am interested in measuring
add_cpu_bad(x,y,N);
clock_gettime(CLOCK_MONOTONIC, &end);
timeElapsedCPU = timespecDiff(&end, &start);
printf("\n\n\n timeElapsed CPU= %d ratio:%f\n",timeElapsedCPU, (float)timeElapsedCPU/timeElapsedGPU);
printf("\n Time Diff Sec CPU = %d\n",timeDiffSec(&end,&start));
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.