hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
32db28275a0579be6a17775e1aa88f49f396f8b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "projektcuda.h"
#include "kernels/sparseMatrixMul_kernel.h"
#include "kernels/dotMul_cuda_gpu.h"
#include "kernels/norm_cuda_gpu.h"
#include "kernels/gausskernel.h"
#include "bastianortho.h"
#include "kernels/matrixMul_kernel.h"
int debugmode = 0; /* No debugging as default. 1 = printf, 2=check all Operations in CPU */
typedef struct idrs_context {
void* devmem1stcall;
t_SparseMatrix A;
t_ve* b;
t_ve* r;
t_ve* v;
t_ve* x;
t_ve* om1;
t_ve* om2;
} t_idrs_context;
static t_idrs_context ctxholder[4];
extern "C" void set_debuglevel( int debuglevel ) {
debugmode = debuglevel;
};
extern "C" int get_debuglevel( ) {
return debugmode ;
};
extern "C" size_t idrs_sizetve() {
return sizeof(t_ve);
}
__host__ void testortholinkcompileonly() {
t_ve dummyRes;
t_ve dummyP;
orthogonalize( &dummyP, &dummyRes, 12345, 6 );
}
/* ------------------------------------------------------------------------------------- */
__global__ void kernel_vec_mul_skalar( t_ve *invec, t_ve scalar, t_ve *out, t_mindex N )
{
t_mindex i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < N )
out[i] = invec[i] * scalar;
}
__host__ void dbg_vec_mul_skalar(
t_ve* in1_in,
t_ve* out1_in,
t_ve scalar_in,
t_mindex N,
char* debugname
)
{
hipError_t e;
t_ve* v = (t_ve*) malloc( sizeof( t_ve ) * N );
if ( v == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you C"); exit( -1 ); }
t_ve* vresult = (t_ve*) malloc( sizeof( t_ve ) * N );
if ( vresult == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you C"); exit( -1 ); }
e = hipMemcpy( v, in1_in, sizeof(t_ve) * N , hipMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("hipMemcpy");
e = hipMemcpy( vresult, out1_in, sizeof(t_ve) * N , hipMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("hipMemcpy");
for ( t_mindex i = 0; i < N ; i++ ) {
t_ve prod = v[i] * scalar_in;
if ( prod != vresult[i] ) {
fprintf(stderr, "\n vecmul NOT OK");
exit( -3);
}
}
free( v );
free( vresult );
}
/* ------------------------------------------------------------------------------------- */
__host__ void dbg_dump_mtx(
t_ve* dv,
t_mindex m,
t_mindex n,
char* mname
)
{
hipError_t e;
t_ve* v = (t_ve*) malloc( sizeof( t_ve ) * m * n );
if ( v == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you C"); exit( -1 ); }
e = hipMemcpy( v, dv, sizeof(t_ve) * m * n , hipMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK(" hipMemcpy debugbuffer");
for( t_mindex s=0; s < n; s++ ) {
for( t_mindex r=0; r < m; r++ ) {
t_mindex i = s * m + r;
printf("\n %s(%u,%u)=%s[%u] = %f ",mname, r+1, s+1, mname, i, v[i] );
}
}
free( v);
}
/* ------------------------------------------------------------------------------------- */
__global__ void sub_arrays_gpu( t_ve *in1, t_ve *in2, t_ve *out, t_mindex N)
{
t_mindex i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < N )
out[i] = in1[i] - in2[i];
}
__global__ void add_and_mul_arrays_gpu(
t_ve *in1,
t_ve *in2,
t_ve coefficient,
t_ve *out,
t_mindex N
)
{
t_mindex i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < N )
out[i] = in1[i] + coefficient * in2[i];
}
__global__ void add_arrays_gpu( t_ve *in1, t_ve *in2, t_ve *out, t_mindex N)
{
t_mindex i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < N )
out[i] = in1[i] + in2[i];
}
__host__ size_t smat_size( int cnt_elements, int cnt_cols ) {
return ( sizeof(t_ve) + sizeof(t_mindex) ) * cnt_elements
+ sizeof(t_mindex) * (cnt_cols + 1);
}
extern "C" void idrs2nd(
t_FullMatrix P_in,
t_ve tolr,
unsigned int s,
unsigned int maxit,
t_idrshandle ih_in, /* Context Handle we got from idrs_1st */
t_ve* x_out,
t_ve* resvec,
unsigned int* piter
) {
hipError_t e;
t_idrshandle ctx;
t_FullMatrix mv;
t_FullMatrix mr;
t_FullMatrix mt;
int cnt_multiprozessors;
int deviceCount;
hipGetDeviceCount(&deviceCount);
t_ve* om1;
t_ve* om2;
t_ve* v;
t_mindex resveci = 1;
void* devmem;
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
cnt_multiprozessors = deviceProp.multiProcessorCount;
}
ctx = ih_in;
t_SparseMatrix A = ctxholder[ctx].A ;
t_mindex N = A.m;
size_t h_memblocksize = N * sizeof( t_ve ) /* om1 */
+ N * sizeof( t_ve ) /* om2 */
+ N * s * sizeof( t_ve ) /* debugbuffer1 */
+ N * sizeof( t_ve ) /* h_norm */
;
size_t d_memblocksize = (N*s ) * sizeof( t_ve ) /* P */
+ s * (s+1+1) * sizeof( t_ve ) /* M m c */
+ ( N + 512 ) * sizeof( t_ve ) /* v */
+ (N*s ) * sizeof( t_ve ) /* dR */
+ (N*s ) * sizeof( t_ve ) /* dX */
+ (N ) * sizeof( t_ve ) /* dnormv */
+ (N ) * sizeof( t_ve ) /* q */
+ (N + 512 ) * sizeof( t_ve ) /* t */
+ (N + 512 ) * sizeof( t_ve ) /* buffer1 */
+ (N + 512 ) * sizeof( t_ve ) /* dm */
+ maxit * sizeof( t_ve ) /* dm */
// + (N ) * sizeof( t_ve ) /* x */
;
e = hipMalloc ( &devmem , d_memblocksize );
CUDA_UTIL_ERRORCHECK("hipMalloc");
e = hipMemset (devmem, 0, d_memblocksize );
CUDA_UTIL_ERRORCHECK("hipMalloc");
e = hipMemcpy( devmem, P_in.pElement, N*s* sizeof( t_ve ) , hipMemcpyHostToDevice);
CUDA_UTIL_ERRORCHECK("hipMemcpyHostToDevice");
printf("\n additional using %u bytes in Device memory", d_memblocksize);
t_ve* P = (t_ve*) devmem ;
t_ve* M = &P[ N * s ];
t_ve* m = &M[ s * s ];
t_ve* c = &m[ s ];
v = &c[ s ];
t_ve* dR = &v[N + 512 ];
t_ve* dX = &dR[ N * s ];
t_ve* dnormv = &dX[ N * s ];
t_ve* q = &dnormv[ N ];
t_ve* t = &q[ N ];
t_ve* buffer1 = &t[N + 512 ];
t_ve* dm = &buffer1[N + 512 ];
t_ve* x = ctxholder[ctx].x;
void* hostmem = malloc( h_memblocksize );
if ( hostmem == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you hostmem"); exit( -1 ); }
t_ve* h_om1 = (t_ve*) hostmem;
t_ve* h_om2 = &h_om1[N];
t_ve* debugbuffer1 = &h_om2[N];
t_ve* h_norm = &debugbuffer1[N*s];
t_ve norm;
mr.m = A.m;
mr.n = 1;
mr.pElement = ctxholder[ctx].r;
mt.m = A.m;
mt.n = 1;
mt.pElement = t;
t_ve* r = mr.pElement;
mv.m = A.m;
mv.n = 1;
mv.pElement = v ;
om1 = ctxholder[ctx].om1;
om2 = ctxholder[ctx].om2;
dim3 dimGrid ( cnt_multiprozessors );
dim3 dimGrids( s );
dim3 dimGridN( N );
dim3 dimBlock(512);
dim3 dimGridsub( A.m / 512 + 1 );
dim3 dimGridgauss( 1 );
dim3 dimBlockgauss(512);
// t_ve som ;
//dbg_dump_mtx( dX,N,s, "dX" );
//dbg_dump_mtx( dR,N,s, "dR" );
//dbg_dump_mtx( P,s,N, "P" );
//dbg_dump_mtx( r,N,1, "r" );
//dbg_dump_mtx( x,N,1, "x" );
//if ( debugmode > 0 ) { printf("\n DEBUGMODE %u - starting L1", debugmode ); }
for ( t_mindex k = 1; k <= s; k++ ) {
t_ve* dR_k = &dR[ N * (k-1) ];
t_ve* dX_k = &dX[ N * (k-1) ];
// e = hipMemset (v, 0, sizeof(t_ve) * N );
// CUDA_UTIL_ERRORCHECK("hipMemset");
/* 22 v = A*r; */
//sparseMatrixMul<<<dimGrid,dimBlock>>>( mt, A, mv ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("sparseMatrixMul<<<dimGrid,dimBlock>>>( mt, A, mv )");
hipLaunchKernelGGL(( sparseMatrixMul), dim3(dimGrid),dim3(dimBlock), 0, 0, mv, A, mr ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("testsparseMatrixMul");
e = hipStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("hipStreamSynchronize(0)");
//dbg_dump_mtx( v,N,1, "v" );
//dbg_dump_mtx( r,N,1, "r" );
hipLaunchKernelGGL(( kernel_dotmul), dim3(dimGridsub),dim3(dimBlock), 0, 0, v, r, om1 ) ; e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("device_dotMul");
hipLaunchKernelGGL(( kernel_dotmul), dim3(dimGridsub),dim3(dimBlock), 0, 0, v, v, om2 ) ; e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("device_dotMul");
e = hipStreamSynchronize(0); CUDA_UTIL_ERRORCHECK("hipStreamSynchronize(0)");
// if ( debugmode > 0 ) { printf("\n DEBUGMODE %u - L1, k = %u, after Dotmul", debugmode, k ); }
e = hipMemcpy( h_om1, om1, sizeof(t_ve) * N * 2, hipMemcpyDeviceToHost); CUDA_UTIL_ERRORCHECK("hipMemcpy( h_om1, om1, sizeof(t_ve) * N * 2, hipMemcpyDeviceToHost)");
t_ve om;
t_ve som1 = 0;
t_ve som2 = 0;
for ( t_mindex blockidx = 0; blockidx < A.m / 512 + 1; blockidx++ ) {
som1 += h_om1[blockidx];
som2 += h_om2[blockidx];
}
om = som1 / som2;
if( debugmode > 1 ) { dbg_dotmul_checkresult( v, r, som1, N, "loop1, som1"); };
if( debugmode > 1 ) { dbg_dotmul_checkresult( v, v, som2, N, "loop1, som2"); };
hipLaunchKernelGGL(( kernel_vec_mul_skalar), dim3(dimGridsub),dim3(dimBlock), 0, 0, mr.pElement, om , dX_k, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mr.pElement, som , dX_k, N )");
if( debugmode > 1 ) { dbg_vec_mul_skalar( r, dX_k, om, N, "mr.pElement, om , dX_k, N" ); }
hipLaunchKernelGGL(( kernel_vec_mul_skalar), dim3(dimGridsub),dim3(dimBlock), 0, 0, mv.pElement, - om , dR_k, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
if( debugmode > 1 ) { dbg_vec_mul_skalar( v, dR_k, -1 * om, N, "mv.pElement, - om , dR_k, N" ); }
e = hipStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("hipStreamSynchronize(0)");
hipLaunchKernelGGL(( add_arrays_gpu), dim3(dimGridsub),dim3(dimBlock), 0, 0, x, dX_k, x, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( x, dX_k, x, N )");
hipLaunchKernelGGL(( add_arrays_gpu), dim3(dimGridsub),dim3(dimBlock), 0, 0, r, dR_k, r, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( mr.pElement, dR_k, mr.pElement, N );");
/* 26 normr = norm(r) */
e = hipMemset (dnormv, 0, sizeof(t_ve) * N );
CUDA_UTIL_ERRORCHECK("hipMemset");
hipLaunchKernelGGL(( kernel_norm), dim3(dimGridsub),dim3(dimBlock), 0, 0, mr.pElement, dnormv ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_norm<<<dimGridsub,dimBlock>>>( mr.pElement, dnormv )");
//dbg_dump_mtx( dnormv,N,1, "dnormv" );
e = hipMemcpy( h_norm, dnormv, sizeof(t_ve) * N , hipMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK(" hipMemcpy debugbuffer");
t_ve snorm = 0;
for ( t_mindex i = 0; i < N / 512 + 1 ; i++ ) {
snorm += h_norm[i];
}
norm = sqrt(snorm);
if( debugmode > 1 ) { dbg_norm_checkresult( r, norm , N, "loop1, norm"); }
resvec[ resveci++ ] = norm;
/* 28 M(:,k) = P*dR(:,k); */
t_ve* Mk = &M[ s * (k-1) ];
hipLaunchKernelGGL(( matrixMul), dim3(dimGrids),dim3(dimBlock), 0, 0, Mk, P, dR_k , s, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( P, r , m, s, 1 )");
//matrixMul_long_mA<<<dimGrids,dimBlock>>>( Mk, P, dR_k , s, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( P, r , m, s, 1 )");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( Mk, P, dR_k , s, N, "28 M(:,k) = P*dR(:,k);" ); }
if( debugmode > 0 ) { printf("\n L1 k=%u, norm = %f 1 %f 2 %f", k, norm, som1, som2 ); }
e = hipStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("hipStreamSynchronize(0)");
}
//dbg_dump_mtx( dX,N,s, "dX" );
//dbg_dump_mtx( dR,N,s, "dR" );
//dbg_dump_mtx( M,s,s, "M" );
t_mindex iter = s; /* iter.m line 31 */
t_mindex oldest = 0; /* iter.m line 32 */
/* 33 m = P* r */
hipLaunchKernelGGL(( matrixMul), dim3(dimGrids),dim3(dimBlock), 0, 0, m, P, r , s, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( P, r , m, s, 1 )");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( m, P, r , s, N, " 33 m = P* r " ) ; }
while ( (norm > tolr ) && ( iter < maxit ) ) {
for ( t_mindex k = 0; k <= s; k++ ) {
t_ve om;
t_ve* dRoldest = &dR[ oldest * N ];
t_ve* dXoldest = &dX[ oldest * N ];
//sgstag
/* 36 c = M\n iter.m line 36 */
hipLaunchKernelGGL(( device_gauss_solver), dim3(dimGridgauss),dim3(dimBlockgauss), 0, 0, M, s, c ); /* vec m is s+1 column of M - see memory allocation plan */
e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("device_gauss_solver<<<dimGridgauss,dimBlockgauss>>>( M, s, c )");
if( debugmode > 1 ) { dbg_solver_check_result( M, s, c ); }
/* 37 q = -dR * c */
// if ( N > 2000 ) {
hipLaunchKernelGGL(( matrixMul_long_mA), dim3(dimGrid),dim3(dimBlock), 0, 0, q, dR , c, N, s ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGridgauss,dimBlockgauss>>>( q, dR , c, N, 1 )");
// }
// else {
// matrixMul<<<dimGridN,dimBlock>>>( q, dR , c, N, s ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGridgauss,dimBlockgauss>>>( q, dR , c, N, 1 )");
// }
if( debugmode > 1 ) { dbg_matrixMul_checkresult( q, dR , c, N, s, "37 q = -dR * c " ); }
hipLaunchKernelGGL(( kernel_vec_mul_skalar), dim3(dimGridsub),dim3(dimBlock), 0, 0, q, -1 , q, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
/* 38 v = r + q */
hipLaunchKernelGGL(( add_arrays_gpu), dim3(dimGridsub),dim3(dimBlock), 0, 0, r, q, v, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( x, dX_k, x, N )");
if ( k == 0 ) {
/* 40 t = A*v idrs.m */
e = hipMemset (t, 0, sizeof(t_ve) * N );
CUDA_UTIL_ERRORCHECK("hipMalloc");
hipLaunchKernelGGL(( sparseMatrixMul), dim3(dimGrid),dim3(dimBlock), 0, 0, mt, A, mv ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("sparseMatrixMul<<<dimGrid,dimBlock>>>( mt, A, mv )");
e = hipStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("hipStreamSynchronize(0)");
hipLaunchKernelGGL(( kernel_dotmul), dim3(dimGridsub),dim3(dimBlock), 0, 0, t, v, om1 ) ; e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("device_dotMul");
hipLaunchKernelGGL(( kernel_dotmul), dim3(dimGridsub),dim3(dimBlock), 0, 0, t, t, om2 ) ; e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("device_dotMul");
e = hipStreamSynchronize(0); CUDA_UTIL_ERRORCHECK("hipStreamSynchronize(0)");
e = hipMemcpy( h_om1, om1, sizeof(t_ve) * N * 2, hipMemcpyDeviceToHost); CUDA_UTIL_ERRORCHECK("hipMemcpy( h_om1, om1, sizeof(t_ve) * N * 2, hipMemcpyDeviceToHost)");
t_ve som1 = 0;
t_ve som2 = 0;
for ( t_mindex blockidx = 0; blockidx < A.m / 512 + 1; blockidx++ ) {
som1 += h_om1[blockidx];
som2 += h_om2[blockidx];
//printf("\n h_om1[%u] = %f ", blockidx, h_om1[blockidx] );
}
om = som1 / som2;
if( debugmode > 1 ) { dbg_dotmul_checkresult( t, v, som1, N, "loop2, som1"); }
if( debugmode > 1 ) { dbg_dotmul_checkresult( t, t, som2, N, "loop2, som2"); }
if( debugmode > 0 ) { printf("\n L2 k = %u om = %f om=%f om2=%f", k, om, som1, som2 ); }
/* 42 dR(:,oldest) = q - om*t; % 1 update */
hipLaunchKernelGGL(( add_and_mul_arrays_gpu), dim3(dimGridsub),dim3(dimBlock), 0, 0, q, t, -om, dRoldest , N); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("sub_and_mul_arrays_gpu");
/* 43 dX(:,oldest) = -dX*c + om*v; % s updates + 1 scaling */
hipLaunchKernelGGL(( matrixMul_long_mA), dim3(dimGrid),dim3(dimBlock), 0, 0, buffer1, dX, c , N, s ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( dX, c , dXoldest, N, 1 )");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( buffer1, dX, c , N, s, "43 dX(:,oldest) = -dX*c + om*v; % s updates + 1 scaling" ); }
hipLaunchKernelGGL(( kernel_vec_mul_skalar), dim3(dimGridsub),dim3(dimBlock), 0, 0, buffer1, -1 , buffer1, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
hipLaunchKernelGGL(( add_and_mul_arrays_gpu), dim3(dimGridsub),dim3(dimBlock), 0, 0, buffer1, v, om, dXoldest , N); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("add_and_mul_arrays_gpu");
//if( debugmode > 0 ) { printf("\n k = %u om = %f om1=%f om2=%f", k, om, som1, som2 ); }
}
else {
t_FullMatrix mdRoldest;
t_FullMatrix mdXoldest;
mdRoldest.m = 1;
mdRoldest.n = N;
mdRoldest.pElement = dRoldest;
mdXoldest.m = 1;
mdXoldest.n = N;
mdXoldest.pElement = dXoldest;
/* 45 dX(:,oldest) = -dX*c + om*v; % s updates + 1 scaling */
hipLaunchKernelGGL(( matrixMul_long_mA), dim3(dimGrid),dim3(dimBlock), 0, 0, buffer1, dX, c , N, s ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( dX, c , dXoldest, N, 1 )");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( buffer1, dX, c , N, s, "45 dX(:,oldest) = -dX*c + om*v"); }
hipLaunchKernelGGL(( kernel_vec_mul_skalar), dim3(dimGridsub),dim3(dimBlock), 0, 0, buffer1, -1 , buffer1, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
hipLaunchKernelGGL(( add_and_mul_arrays_gpu), dim3(dimGridsub),dim3(dimBlock), 0, 0, buffer1, v, om, dXoldest , N); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("add_and_mul_arrays_gpu");
/* 46 dR(:,oldest) = -A*dX(:,oldest); % 1 matmul */
e = hipMemset (mdRoldest.pElement, 0, sizeof(t_ve) * N );
CUDA_UTIL_ERRORCHECK("hipMalloc");
hipLaunchKernelGGL(( sparseMatrixMul), dim3(dimGrid),dim3(dimBlock), 0, 0, mdRoldest, A, mdXoldest ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("sparseMatrixMul<<<dimGrid,dimBlock>>>( mt, A, mv )");
hipLaunchKernelGGL(( kernel_vec_mul_skalar), dim3(dimGridsub),dim3(dimBlock), 0, 0, dRoldest, -1 , dRoldest, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
}
/* 48 r = r + dR(:,oldest); % simple addition */
hipLaunchKernelGGL(( add_arrays_gpu), dim3(dimGridsub),dim3(dimBlock), 0, 0, r, dRoldest, r, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( r, dRoldest, r, N )");
/* 49 x = x + dX(:,oldest); % simple addition */
hipLaunchKernelGGL(( add_arrays_gpu), dim3(dimGridsub),dim3(dimBlock), 0, 0, x, dXoldest, x, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( r, dRoldest, r, N )");
iter++;
e = hipMemset (dnormv, 0, sizeof(t_ve) * N );
CUDA_UTIL_ERRORCHECK("hipMalloc");
hipLaunchKernelGGL(( kernel_norm), dim3(dimGridsub),dim3(dimBlock), 0, 0, r, dnormv ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_norm<<<dimGridsub,dimBlock>>>( mr.pElement, dnormv )");
e = hipMemcpy( h_norm, dnormv, sizeof(t_ve) * N , hipMemcpyDeviceToHost); CUDA_UTIL_ERRORCHECK(" hipMemcpy debugbuffer");
t_ve snorm = 0;
for ( t_mindex i = 0; i < N / 512 + 1 ; i++ ) {
snorm += h_norm[i];
}
norm = sqrt( snorm ); resvec[ resveci++ ] = norm ;
if( debugmode > 1 ) { dbg_norm_checkresult( r, norm , N, "loop2, norm"); }
if( debugmode > 0 ) { printf( "\n L2 iteration %u k=%u, oldest=%u, norm %f", iter, k, oldest, norm ); }
/* 53 dm = P*dR(:,oldest); % s inner products */
t_ve* Moldest = &M[ s * oldest ];
dm = Moldest;
e = hipStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("hipStreamSynchronize(0)");
hipLaunchKernelGGL(( matrixMul), dim3(dimGrids),dim3(dimBlock), 0, 0, Moldest, P, dRoldest , s, N ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( P, dRoldest , Moldest, s, 1 )");
e = hipStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("hipStreamSynchronize(0)");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( Moldest, P, dRoldest , s, N, "53 dm = P*dR(:,oldest)" ); }
/* 55 m = m + dm; */
hipLaunchKernelGGL(( add_arrays_gpu), dim3(dimGridgauss),dim3(dimBlock), 0, 0, m, dm, m, s ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( r, dRoldest, r, N )");
oldest++;
if ( oldest > s - 1 ) {
oldest = 0 ;
}
}
}
*piter = iter;
e = hipMemcpy( x_out, x, sizeof(t_ve) * N , hipMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("hipMemcpy");
e = hipFree( devmem );
CUDA_UTIL_ERRORCHECK("e = hipFree( devmem );");
e = hipFree( ctxholder[ctx].devmem1stcall );
CUDA_UTIL_ERRORCHECK("hipFree ctxholder[ctx].devmem1stcall ");
free( hostmem );
}
/*
__global__ void testsparseMatrixMul( t_FullMatrix pResultVector,t_SparseMatrix pSparseMatrix, t_FullMatrix b ) {
t_mindex tix = blockIdx.x * blockDim.x + threadIdx.x;
if ( tix < pSparseMatrix.m ) {
//printf ( "\n block %u thread %u tix %u N %u", blockIdx.x, threadIdx.x, tix, pSparseMatrix.m );
//printf("\n %u %f", tix, b.pElement[tix] );
pResultVector.pElement[tix] = b.pElement[tix] - 1;
}
if ( tix == 0 ) {
for ( t_mindex i = 0; i < pSparseMatrix.m + 1 ; i++ ) {
printf("\n pRow[%u] = %u", i, pSparseMatrix.pRow[i] );
}
for ( t_mindex i = 0; i < pSparseMatrix.nzmax ; i++ ) {
printf("\n pNZElement[%u] = %f", i, pSparseMatrix.pNZElement[i] );
}
for ( t_mindex i = 0; i < pSparseMatrix.nzmax ; i++ ) {
printf("\n pCol[%u] = %u", i, pSparseMatrix.pCol[i] );
}
}
}
*/
__host__ void set_sparse_data( t_SparseMatrix A_in, t_SparseMatrix* A_out, void* mv ) {
A_out->m = A_in.m;
A_out->n = A_in.n;
A_out->nzmax = A_in.nzmax;
A_out->pNZElement = (t_ve *) mv ;
A_out->pCol = (t_mindex *) &A_out->pNZElement[ A_out->nzmax ];
A_out->pRow = (t_mindex *) (&A_out->pCol[A_out->nzmax]);
// A_out->pCol = (t_mindex *) mv;
// A_out->pNZElement = (t_ve *) (&A_out->pCol[A_out->nzmax] ) ;
// A_out->pRow = (t_mindex *) (&A_out->pNZElement[A_out->nzmax]);
}
extern "C" void idrs_1st(
t_SparseMatrix A_in, /* A Matrix in buyu-sparse-format */
t_ve* b_in, /* b as in A * b = x */
t_ve* xe_in,
t_mindex N,
t_ve* r_out, /* the r from idrs.m line 6 : r = b - A*x; */
t_idrshandle* ih_out, /* handle for haloding all the device pointers between matlab calls */
t_ve* resvec_out
) {
t_idrshandle ctx;
hipError_t e;
size_t h_memblocksize;
size_t d_memblocksize;
t_SparseMatrix A_d;
t_ve* d_tmpAb;
t_ve* d_b;
t_ve* d_xe;
t_ve* d_r;
t_ve* xe;
void *hostmem;
void *devmem;
ctx = 0;
int cnt_multiprozessors;
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
cnt_multiprozessors = deviceProp.multiProcessorCount;
}
h_memblocksize = smat_size( A_in.nzmax, A_in.m ) /* A sparse */
+ ( N + 512 ) * sizeof( t_ve ) /* b full */
+ N * sizeof( t_ve ) /* xe */
;
d_memblocksize = h_memblocksize
+ (N + 512) * sizeof( t_ve ) /* d_tmpAb */
+ (N + 512) * sizeof( t_ve ) /* d_r */
+ N * sizeof( t_ve ) /* om1 */
+ N * sizeof( t_ve ) /* om2 */
+ N * sizeof( t_ve ) /* x */
+ N * sizeof( t_ve ) /* normv */
;
printf("\n using N = %u (full vector size )", N );
printf("\n using %u bytes in Host memory", h_memblocksize);
printf("\n using %u bytes in Device memory", d_memblocksize);
hostmem = malloc( h_memblocksize );
memset(hostmem, 0, h_memblocksize);
if ( hostmem == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you hostmem"); exit( -1 ); }
/*
pcol | t_mindex | .nzmax
pNZElement | t_ve | .nzmax
pRow | t_mindex | N
b | t_ve | N
d_xe | t_ve | N
d_tmpAb | t_ve | N
d_r | t_ve | N
d_om1 | t_ve | N
d_om2 | t_ve | N
*/
/* copy all parameter vectors to ony monoliythic block starting at hostmem */
t_ve* b = (t_ve *) hostmem;
memcpy( b, b_in, N * sizeof(t_ve) );
xe = (t_ve *) &b[N + 512];
memcpy( xe, xe_in, N * sizeof(t_ve) );
t_ve* pNZElement = (t_ve *) &xe[N] ;
memcpy( pNZElement, A_in.pNZElement, A_in.nzmax * sizeof(t_ve) );
t_mindex *pcol = (t_mindex *) &pNZElement[A_in.nzmax];
memcpy( pcol, A_in.pCol, A_in.nzmax * sizeof(t_mindex) );
t_mindex* pRow = (t_mindex *) (&pcol[A_in.nzmax]);
memcpy( pRow, A_in.pRow, ( A_in.m + 1 ) * sizeof(t_mindex) );
e = hipMalloc ( &devmem , d_memblocksize );
CUDA_UTIL_ERRORCHECK("hipMalloc")
e = hipMemset (devmem, 0, d_memblocksize );
CUDA_UTIL_ERRORCHECK("hipMemset");
d_tmpAb = (pt_ve) devmem;
d_r = (t_ve *) &d_tmpAb[ N + 512 ];
ctxholder[ctx].om1 = (t_ve *) &d_r[N + 512 ];
ctxholder[ctx].om2 = (t_ve *) &ctxholder[ctx].om1[N];
t_ve* normv = (t_ve *) &ctxholder[ctx].om2[N];
pt_ve devinputmem = &normv[N];
// set_sparse_data( A_in, &A_d, devinputmem );
d_b = (t_ve *) devinputmem ;
d_xe = (t_ve *) &d_b[N + 512 ];
set_sparse_data( A_in, &A_d, &d_xe[N] );
e = hipMemcpy( devinputmem, hostmem, h_memblocksize , hipMemcpyHostToDevice);
CUDA_UTIL_ERRORCHECK("hipMemcpyHostToDevice");
free(hostmem);
dim3 dimGrid ( cnt_multiprozessors );
dim3 dimGridsub( N / 512 + 1 );
dim3 dimBlock(512);
/* --------------------------------------------------------------------- */
t_FullMatrix mxe;
t_FullMatrix result;
mxe.m = N;
mxe.n = 1;
mxe.pElement = d_xe;
result.pElement = d_tmpAb;
result.m = N ;
result.n = 1;
//testsparseMatrixMul<<<dimGrid,dimBlock>>>( result, A_d, mb );
hipLaunchKernelGGL(( sparseMatrixMul), dim3(dimGrid),dim3(dimBlock), 0, 0, result, A_d, mxe );
e = hipGetLastError();
CUDA_UTIL_ERRORCHECK("testsparseMatrixMul");
// add_arrays_gpu( t_ve *in1, t_ve *in2, t_ve *out, t_mindex N)
hipLaunchKernelGGL(( sub_arrays_gpu), dim3(dimGridsub),dim3(dimBlock), 0, 0, d_b, d_tmpAb, d_r, N);
e = hipGetLastError();
CUDA_UTIL_ERRORCHECK("sub_arrays_gpu");
/* --------------------------------------------------------------------- */
e = hipMemcpy( r_out, d_r, sizeof(t_ve) * N, hipMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("hipMemcpyDeviceToHost");
/* 7 normr = norm(r); */
if ( debugmode > 0 ) { printf("\n %s %u: dimGridsub = %u", __FILE__, __LINE__, dimGridsub.x ); }
hipLaunchKernelGGL(( kernel_norm), dim3(dimGridsub),dim3(dimBlock), 0, 0, d_r, normv ); e = hipGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_norm<<<dimGridsub,dimBlock>>>( mr.pElement, dnormv )");
t_ve* h_norm = (t_ve*) malloc( sizeof( t_ve ) * N );
if ( h_norm == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you B"); exit( -1 ); }
e = hipMemcpy( h_norm, normv, sizeof(t_ve) * N, hipMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("hipMemcpyDeviceToHost");
t_ve snorm = 0;
for ( t_mindex i = 0; i < N / 512 + 1 ; i++ ) {
snorm += h_norm[i];
}
t_ve norm = sqrt(snorm);
//dbg_dump_mtx( d_b,N + 10,1, "b" );
//dbg_dump_mtx( normv,N,1, "normv" );
if( debugmode > 1 ) { dbg_norm_checkresult( d_r, norm , N, "1st norm for scaling, norm"); }
/* 9 */
resvec_out[0] = norm;
if ( debugmode > 0 ) { printf("\n %s %u: trying free", __FILE__, __LINE__ ); }
free(h_norm);
if ( debugmode > 0 ) { printf("\n %s %u: sucessfull free", __FILE__, __LINE__ ); }
ctxholder[ctx].devmem1stcall = devmem;
ctxholder[ctx].A = A_d;
ctxholder[ctx].b = d_b;
ctxholder[ctx].r = d_r;
ctxholder[ctx].v = d_tmpAb; /* memory reusage */
ctxholder[ctx].x = d_xe;
*ih_out = ctx; /* context handle for later use in later calls */
} /* end idrs1st */
extern "C" void idrswhole(
t_SparseMatrix A_in, /* A Matrix in buyu-sparse-format */
t_ve* b_in, /* b as in A * b = x */
t_mindex s,
t_ve tol,
t_mindex maxit,
t_ve* x0_in,
t_mindex N,
t_ve* x_out,
t_ve* resvec_out,
unsigned int* piter
) {
t_ve* r;
t_idrshandle irdshandle;
t_FullMatrix P;
t_ve* P_init;
t_ve* P_ortho;
t_ve* P_transp;
r = ( t_ve* ) malloc( sizeof( t_ve ) * N );
if ( r == NULL) { fprintf(stderr, "sorry, can not allocate memory for you b"); exit( -1 ); }
P_init = ( t_ve* ) malloc( sizeof( t_ve ) * N * s * 3 );
if ( P_init == NULL) { fprintf(stderr, "sorry, can not allocate memory for you b"); exit( -1 ); }
P_ortho = &P_init[ N * s ];
P_transp = &P_ortho[ N * s ];
printf("\n this is debugmode %u \n", debugmode);
idrs_1st( A_in, b_in, x0_in, N, r, &irdshandle, resvec_out );
orthogonalize( P_init, r, N, s );
for ( t_mindex i = 1; i <= N; i++ ) {
for ( t_mindex j = 1; j <= s; j++ ) {
P_transp[ as(j, i) ] = P_init[ a(i, j) ];
}
}
// for (int i = 0; i < N *s; i++ ) {
// printf("\n P_transp[%u]=%f", i, P_transp[i]);
// }
P.m = s;
P.n = N;
P.pElement = P_transp;
idrs2nd(
P,
tol * resvec_out[0], /* tolr = tol * norm(b) */
s, /* s - as discussed with Bastian on 2010-01-27 */
maxit,
irdshandle, /* Context Handle we got from idrs_1st */
x_out,
resvec_out,
piter
);
free(r);
free(P_init);
}
| 32db28275a0579be6a17775e1aa88f49f396f8b5.cu | #include <stdlib.h>
#include <stdio.h>
#include "projektcuda.h"
#include "kernels/sparseMatrixMul_kernel.h"
#include "kernels/dotMul_cuda_gpu.h"
#include "kernels/norm_cuda_gpu.h"
#include "kernels/gausskernel.h"
#include "bastianortho.h"
#include "kernels/matrixMul_kernel.h"
int debugmode = 0; /* No debugging as default. 1 = printf, 2=check all Operations in CPU */
typedef struct idrs_context {
void* devmem1stcall;
t_SparseMatrix A;
t_ve* b;
t_ve* r;
t_ve* v;
t_ve* x;
t_ve* om1;
t_ve* om2;
} t_idrs_context;
static t_idrs_context ctxholder[4];
extern "C" void set_debuglevel( int debuglevel ) {
debugmode = debuglevel;
};
extern "C" int get_debuglevel( ) {
return debugmode ;
};
extern "C" size_t idrs_sizetve() {
return sizeof(t_ve);
}
__host__ void testortholinkcompileonly() {
t_ve dummyRes;
t_ve dummyP;
orthogonalize( &dummyP, &dummyRes, 12345, 6 );
}
/* ------------------------------------------------------------------------------------- */
__global__ void kernel_vec_mul_skalar( t_ve *invec, t_ve scalar, t_ve *out, t_mindex N )
{
t_mindex i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < N )
out[i] = invec[i] * scalar;
}
__host__ void dbg_vec_mul_skalar(
t_ve* in1_in,
t_ve* out1_in,
t_ve scalar_in,
t_mindex N,
char* debugname
)
{
cudaError_t e;
t_ve* v = (t_ve*) malloc( sizeof( t_ve ) * N );
if ( v == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you C"); exit( -1 ); }
t_ve* vresult = (t_ve*) malloc( sizeof( t_ve ) * N );
if ( vresult == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you C"); exit( -1 ); }
e = cudaMemcpy( v, in1_in, sizeof(t_ve) * N , cudaMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("cudaMemcpy");
e = cudaMemcpy( vresult, out1_in, sizeof(t_ve) * N , cudaMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("cudaMemcpy");
for ( t_mindex i = 0; i < N ; i++ ) {
t_ve prod = v[i] * scalar_in;
if ( prod != vresult[i] ) {
fprintf(stderr, "\n vecmul NOT OK");
exit( -3);
}
}
free( v );
free( vresult );
}
/* ------------------------------------------------------------------------------------- */
__host__ void dbg_dump_mtx(
t_ve* dv,
t_mindex m,
t_mindex n,
char* mname
)
{
cudaError_t e;
t_ve* v = (t_ve*) malloc( sizeof( t_ve ) * m * n );
if ( v == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you C"); exit( -1 ); }
e = cudaMemcpy( v, dv, sizeof(t_ve) * m * n , cudaMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK(" cudaMemcpy debugbuffer");
for( t_mindex s=0; s < n; s++ ) {
for( t_mindex r=0; r < m; r++ ) {
t_mindex i = s * m + r;
printf("\n %s(%u,%u)=%s[%u] = %f ",mname, r+1, s+1, mname, i, v[i] );
}
}
free( v);
}
/* ------------------------------------------------------------------------------------- */
__global__ void sub_arrays_gpu( t_ve *in1, t_ve *in2, t_ve *out, t_mindex N)
{
t_mindex i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < N )
out[i] = in1[i] - in2[i];
}
__global__ void add_and_mul_arrays_gpu(
t_ve *in1,
t_ve *in2,
t_ve coefficient,
t_ve *out,
t_mindex N
)
{
t_mindex i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < N )
out[i] = in1[i] + coefficient * in2[i];
}
__global__ void add_arrays_gpu( t_ve *in1, t_ve *in2, t_ve *out, t_mindex N)
{
t_mindex i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < N )
out[i] = in1[i] + in2[i];
}
__host__ size_t smat_size( int cnt_elements, int cnt_cols ) {
return ( sizeof(t_ve) + sizeof(t_mindex) ) * cnt_elements
+ sizeof(t_mindex) * (cnt_cols + 1);
}
extern "C" void idrs2nd(
t_FullMatrix P_in,
t_ve tolr,
unsigned int s,
unsigned int maxit,
t_idrshandle ih_in, /* Context Handle we got from idrs_1st */
t_ve* x_out,
t_ve* resvec,
unsigned int* piter
) {
cudaError_t e;
t_idrshandle ctx;
t_FullMatrix mv;
t_FullMatrix mr;
t_FullMatrix mt;
int cnt_multiprozessors;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
t_ve* om1;
t_ve* om2;
t_ve* v;
t_mindex resveci = 1;
void* devmem;
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
cnt_multiprozessors = deviceProp.multiProcessorCount;
}
ctx = ih_in;
t_SparseMatrix A = ctxholder[ctx].A ;
t_mindex N = A.m;
size_t h_memblocksize = N * sizeof( t_ve ) /* om1 */
+ N * sizeof( t_ve ) /* om2 */
+ N * s * sizeof( t_ve ) /* debugbuffer1 */
+ N * sizeof( t_ve ) /* h_norm */
;
size_t d_memblocksize = (N*s ) * sizeof( t_ve ) /* P */
+ s * (s+1+1) * sizeof( t_ve ) /* M m c */
+ ( N + 512 ) * sizeof( t_ve ) /* v */
+ (N*s ) * sizeof( t_ve ) /* dR */
+ (N*s ) * sizeof( t_ve ) /* dX */
+ (N ) * sizeof( t_ve ) /* dnormv */
+ (N ) * sizeof( t_ve ) /* q */
+ (N + 512 ) * sizeof( t_ve ) /* t */
+ (N + 512 ) * sizeof( t_ve ) /* buffer1 */
+ (N + 512 ) * sizeof( t_ve ) /* dm */
+ maxit * sizeof( t_ve ) /* dm */
// + (N ) * sizeof( t_ve ) /* x */
;
e = cudaMalloc ( &devmem , d_memblocksize );
CUDA_UTIL_ERRORCHECK("cudaMalloc");
e = cudaMemset (devmem, 0, d_memblocksize );
CUDA_UTIL_ERRORCHECK("cudaMalloc");
e = cudaMemcpy( devmem, P_in.pElement, N*s* sizeof( t_ve ) , cudaMemcpyHostToDevice);
CUDA_UTIL_ERRORCHECK("cudaMemcpyHostToDevice");
printf("\n additional using %u bytes in Device memory", d_memblocksize);
t_ve* P = (t_ve*) devmem ;
t_ve* M = &P[ N * s ];
t_ve* m = &M[ s * s ];
t_ve* c = &m[ s ];
v = &c[ s ];
t_ve* dR = &v[N + 512 ];
t_ve* dX = &dR[ N * s ];
t_ve* dnormv = &dX[ N * s ];
t_ve* q = &dnormv[ N ];
t_ve* t = &q[ N ];
t_ve* buffer1 = &t[N + 512 ];
t_ve* dm = &buffer1[N + 512 ];
t_ve* x = ctxholder[ctx].x;
void* hostmem = malloc( h_memblocksize );
if ( hostmem == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you hostmem"); exit( -1 ); }
t_ve* h_om1 = (t_ve*) hostmem;
t_ve* h_om2 = &h_om1[N];
t_ve* debugbuffer1 = &h_om2[N];
t_ve* h_norm = &debugbuffer1[N*s];
t_ve norm;
mr.m = A.m;
mr.n = 1;
mr.pElement = ctxholder[ctx].r;
mt.m = A.m;
mt.n = 1;
mt.pElement = t;
t_ve* r = mr.pElement;
mv.m = A.m;
mv.n = 1;
mv.pElement = v ;
om1 = ctxholder[ctx].om1;
om2 = ctxholder[ctx].om2;
dim3 dimGrid ( cnt_multiprozessors );
dim3 dimGrids( s );
dim3 dimGridN( N );
dim3 dimBlock(512);
dim3 dimGridsub( A.m / 512 + 1 );
dim3 dimGridgauss( 1 );
dim3 dimBlockgauss(512);
// t_ve som ;
//dbg_dump_mtx( dX,N,s, "dX" );
//dbg_dump_mtx( dR,N,s, "dR" );
//dbg_dump_mtx( P,s,N, "P" );
//dbg_dump_mtx( r,N,1, "r" );
//dbg_dump_mtx( x,N,1, "x" );
//if ( debugmode > 0 ) { printf("\n DEBUGMODE %u - starting L1", debugmode ); }
for ( t_mindex k = 1; k <= s; k++ ) {
t_ve* dR_k = &dR[ N * (k-1) ];
t_ve* dX_k = &dX[ N * (k-1) ];
// e = cudaMemset (v, 0, sizeof(t_ve) * N );
// CUDA_UTIL_ERRORCHECK("cudaMemset");
/* 22 v = A*r; */
//sparseMatrixMul<<<dimGrid,dimBlock>>>( mt, A, mv ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("sparseMatrixMul<<<dimGrid,dimBlock>>>( mt, A, mv )");
sparseMatrixMul<<<dimGrid,dimBlock>>>( mv, A, mr ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("testsparseMatrixMul");
e = cudaStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("cudaStreamSynchronize(0)");
//dbg_dump_mtx( v,N,1, "v" );
//dbg_dump_mtx( r,N,1, "r" );
kernel_dotmul<<<dimGridsub,dimBlock>>>( v, r, om1 ) ; e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("device_dotMul");
kernel_dotmul<<<dimGridsub,dimBlock>>>( v, v, om2 ) ; e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("device_dotMul");
e = cudaStreamSynchronize(0); CUDA_UTIL_ERRORCHECK("cudaStreamSynchronize(0)");
// if ( debugmode > 0 ) { printf("\n DEBUGMODE %u - L1, k = %u, after Dotmul", debugmode, k ); }
e = cudaMemcpy( h_om1, om1, sizeof(t_ve) * N * 2, cudaMemcpyDeviceToHost); CUDA_UTIL_ERRORCHECK("cudaMemcpy( h_om1, om1, sizeof(t_ve) * N * 2, cudaMemcpyDeviceToHost)");
t_ve om;
t_ve som1 = 0;
t_ve som2 = 0;
for ( t_mindex blockidx = 0; blockidx < A.m / 512 + 1; blockidx++ ) {
som1 += h_om1[blockidx];
som2 += h_om2[blockidx];
}
om = som1 / som2;
if( debugmode > 1 ) { dbg_dotmul_checkresult( v, r, som1, N, "loop1, som1"); };
if( debugmode > 1 ) { dbg_dotmul_checkresult( v, v, som2, N, "loop1, som2"); };
kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mr.pElement, om , dX_k, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mr.pElement, som , dX_k, N )");
if( debugmode > 1 ) { dbg_vec_mul_skalar( r, dX_k, om, N, "mr.pElement, om , dX_k, N" ); }
kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - om , dR_k, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
if( debugmode > 1 ) { dbg_vec_mul_skalar( v, dR_k, -1 * om, N, "mv.pElement, - om , dR_k, N" ); }
e = cudaStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("cudaStreamSynchronize(0)");
add_arrays_gpu<<<dimGridsub,dimBlock>>>( x, dX_k, x, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( x, dX_k, x, N )");
add_arrays_gpu<<<dimGridsub,dimBlock>>>( r, dR_k, r, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( mr.pElement, dR_k, mr.pElement, N );");
/* 26 normr = norm(r) */
e = cudaMemset (dnormv, 0, sizeof(t_ve) * N );
CUDA_UTIL_ERRORCHECK("cudaMemset");
kernel_norm<<<dimGridsub,dimBlock>>>( mr.pElement, dnormv ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_norm<<<dimGridsub,dimBlock>>>( mr.pElement, dnormv )");
//dbg_dump_mtx( dnormv,N,1, "dnormv" );
e = cudaMemcpy( h_norm, dnormv, sizeof(t_ve) * N , cudaMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK(" cudaMemcpy debugbuffer");
t_ve snorm = 0;
for ( t_mindex i = 0; i < N / 512 + 1 ; i++ ) {
snorm += h_norm[i];
}
norm = sqrt(snorm);
if( debugmode > 1 ) { dbg_norm_checkresult( r, norm , N, "loop1, norm"); }
resvec[ resveci++ ] = norm;
/* 28 M(:,k) = P*dR(:,k); */
t_ve* Mk = &M[ s * (k-1) ];
matrixMul<<<dimGrids,dimBlock>>>( Mk, P, dR_k , s, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( P, r , m, s, 1 )");
//matrixMul_long_mA<<<dimGrids,dimBlock>>>( Mk, P, dR_k , s, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( P, r , m, s, 1 )");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( Mk, P, dR_k , s, N, "28 M(:,k) = P*dR(:,k);" ); }
if( debugmode > 0 ) { printf("\n L1 k=%u, norm = %f 1 %f 2 %f", k, norm, som1, som2 ); }
e = cudaStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("cudaStreamSynchronize(0)");
}
//dbg_dump_mtx( dX,N,s, "dX" );
//dbg_dump_mtx( dR,N,s, "dR" );
//dbg_dump_mtx( M,s,s, "M" );
t_mindex iter = s; /* iter.m line 31 */
t_mindex oldest = 0; /* iter.m line 32 */
/* 33 m = P* r */
matrixMul<<<dimGrids,dimBlock>>>( m, P, r , s, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( P, r , m, s, 1 )");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( m, P, r , s, N, " 33 m = P* r " ) ; }
while ( (norm > tolr ) && ( iter < maxit ) ) {
for ( t_mindex k = 0; k <= s; k++ ) {
t_ve om;
t_ve* dRoldest = &dR[ oldest * N ];
t_ve* dXoldest = &dX[ oldest * N ];
//sgstag
/* 36 c = M\n iter.m line 36 */
device_gauss_solver<<<dimGridgauss,dimBlockgauss>>>( M, s, c ); /* vec m is s+1 column of M - see memory allocation plan */
e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("device_gauss_solver<<<dimGridgauss,dimBlockgauss>>>( M, s, c )");
if( debugmode > 1 ) { dbg_solver_check_result( M, s, c ); }
/* 37 q = -dR * c */
// if ( N > 2000 ) {
matrixMul_long_mA<<<dimGrid,dimBlock>>>( q, dR , c, N, s ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGridgauss,dimBlockgauss>>>( q, dR , c, N, 1 )");
// }
// else {
// matrixMul<<<dimGridN,dimBlock>>>( q, dR , c, N, s ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGridgauss,dimBlockgauss>>>( q, dR , c, N, 1 )");
// }
if( debugmode > 1 ) { dbg_matrixMul_checkresult( q, dR , c, N, s, "37 q = -dR * c " ); }
kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( q, -1 , q, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
/* 38 v = r + q */
add_arrays_gpu<<<dimGridsub,dimBlock>>>( r, q, v, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( x, dX_k, x, N )");
if ( k == 0 ) {
/* 40 t = A*v idrs.m */
e = cudaMemset (t, 0, sizeof(t_ve) * N );
CUDA_UTIL_ERRORCHECK("cudaMalloc");
sparseMatrixMul<<<dimGrid,dimBlock>>>( mt, A, mv ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("sparseMatrixMul<<<dimGrid,dimBlock>>>( mt, A, mv )");
e = cudaStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("cudaStreamSynchronize(0)");
kernel_dotmul<<<dimGridsub,dimBlock>>>( t, v, om1 ) ; e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("device_dotMul");
kernel_dotmul<<<dimGridsub,dimBlock>>>( t, t, om2 ) ; e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("device_dotMul");
e = cudaStreamSynchronize(0); CUDA_UTIL_ERRORCHECK("cudaStreamSynchronize(0)");
e = cudaMemcpy( h_om1, om1, sizeof(t_ve) * N * 2, cudaMemcpyDeviceToHost); CUDA_UTIL_ERRORCHECK("cudaMemcpy( h_om1, om1, sizeof(t_ve) * N * 2, cudaMemcpyDeviceToHost)");
t_ve som1 = 0;
t_ve som2 = 0;
for ( t_mindex blockidx = 0; blockidx < A.m / 512 + 1; blockidx++ ) {
som1 += h_om1[blockidx];
som2 += h_om2[blockidx];
//printf("\n h_om1[%u] = %f ", blockidx, h_om1[blockidx] );
}
om = som1 / som2;
if( debugmode > 1 ) { dbg_dotmul_checkresult( t, v, som1, N, "loop2, som1"); }
if( debugmode > 1 ) { dbg_dotmul_checkresult( t, t, som2, N, "loop2, som2"); }
if( debugmode > 0 ) { printf("\n L2 k = %u om = %f om=%f om2=%f", k, om, som1, som2 ); }
/* 42 dR(:,oldest) = q - om*t; % 1 update */
add_and_mul_arrays_gpu<<<dimGridsub,dimBlock>>>( q, t, -om, dRoldest , N); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("sub_and_mul_arrays_gpu");
/* 43 dX(:,oldest) = -dX*c + om*v; % s updates + 1 scaling */
matrixMul_long_mA<<<dimGrid,dimBlock>>>( buffer1, dX, c , N, s ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( dX, c , dXoldest, N, 1 )");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( buffer1, dX, c , N, s, "43 dX(:,oldest) = -dX*c + om*v; % s updates + 1 scaling" ); }
kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( buffer1, -1 , buffer1, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
add_and_mul_arrays_gpu<<<dimGridsub,dimBlock>>>( buffer1, v, om, dXoldest , N); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("add_and_mul_arrays_gpu");
//if( debugmode > 0 ) { printf("\n k = %u om = %f om1=%f om2=%f", k, om, som1, som2 ); }
}
else {
t_FullMatrix mdRoldest;
t_FullMatrix mdXoldest;
mdRoldest.m = 1;
mdRoldest.n = N;
mdRoldest.pElement = dRoldest;
mdXoldest.m = 1;
mdXoldest.n = N;
mdXoldest.pElement = dXoldest;
/* 45 dX(:,oldest) = -dX*c + om*v; % s updates + 1 scaling */
matrixMul_long_mA<<<dimGrid,dimBlock>>>( buffer1, dX, c , N, s ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( dX, c , dXoldest, N, 1 )");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( buffer1, dX, c , N, s, "45 dX(:,oldest) = -dX*c + om*v"); }
kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( buffer1, -1 , buffer1, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
add_and_mul_arrays_gpu<<<dimGridsub,dimBlock>>>( buffer1, v, om, dXoldest , N); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("add_and_mul_arrays_gpu");
/* 46 dR(:,oldest) = -A*dX(:,oldest); % 1 matmul */
e = cudaMemset (mdRoldest.pElement, 0, sizeof(t_ve) * N );
CUDA_UTIL_ERRORCHECK("cudaMalloc");
sparseMatrixMul<<<dimGrid,dimBlock>>>( mdRoldest, A, mdXoldest ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("sparseMatrixMul<<<dimGrid,dimBlock>>>( mt, A, mv )");
kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( dRoldest, -1 , dRoldest, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_vec_mul_skalar<<<dimGridsub,dimBlock>>>( mv.pElement, - som , dR_k, N )");
}
/* 48 r = r + dR(:,oldest); % simple addition */
add_arrays_gpu<<<dimGridsub,dimBlock>>>( r, dRoldest, r, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( r, dRoldest, r, N )");
/* 49 x = x + dX(:,oldest); % simple addition */
add_arrays_gpu<<<dimGridsub,dimBlock>>>( x, dXoldest, x, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( r, dRoldest, r, N )");
iter++;
e = cudaMemset (dnormv, 0, sizeof(t_ve) * N );
CUDA_UTIL_ERRORCHECK("cudaMalloc");
kernel_norm<<<dimGridsub,dimBlock>>>( r, dnormv ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_norm<<<dimGridsub,dimBlock>>>( mr.pElement, dnormv )");
e = cudaMemcpy( h_norm, dnormv, sizeof(t_ve) * N , cudaMemcpyDeviceToHost); CUDA_UTIL_ERRORCHECK(" cudaMemcpy debugbuffer");
t_ve snorm = 0;
for ( t_mindex i = 0; i < N / 512 + 1 ; i++ ) {
snorm += h_norm[i];
}
norm = sqrt( snorm ); resvec[ resveci++ ] = norm ;
if( debugmode > 1 ) { dbg_norm_checkresult( r, norm , N, "loop2, norm"); }
if( debugmode > 0 ) { printf( "\n L2 iteration %u k=%u, oldest=%u, norm %f", iter, k, oldest, norm ); }
/* 53 dm = P*dR(:,oldest); % s inner products */
t_ve* Moldest = &M[ s * oldest ];
dm = Moldest;
e = cudaStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("cudaStreamSynchronize(0)");
matrixMul<<<dimGrids,dimBlock>>>( Moldest, P, dRoldest , s, N ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("matrixMul<<<dimGrid,dimBlock>>>( P, dRoldest , Moldest, s, 1 )");
e = cudaStreamSynchronize(0);
CUDA_UTIL_ERRORCHECK("cudaStreamSynchronize(0)");
if( debugmode > 1 ) { dbg_matrixMul_checkresult( Moldest, P, dRoldest , s, N, "53 dm = P*dR(:,oldest)" ); }
/* 55 m = m + dm; */
add_arrays_gpu<<<dimGridgauss,dimBlock>>>( m, dm, m, s ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("add_arrays_gpu<<<dimGridsub,dimBlock>>>( r, dRoldest, r, N )");
oldest++;
if ( oldest > s - 1 ) {
oldest = 0 ;
}
}
}
*piter = iter;
e = cudaMemcpy( x_out, x, sizeof(t_ve) * N , cudaMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("cudaMemcpy");
e = cudaFree( devmem );
CUDA_UTIL_ERRORCHECK("e = cudaFree( devmem );");
e = cudaFree( ctxholder[ctx].devmem1stcall );
CUDA_UTIL_ERRORCHECK("cudaFree ctxholder[ctx].devmem1stcall ");
free( hostmem );
}
/*
__global__ void testsparseMatrixMul( t_FullMatrix pResultVector,t_SparseMatrix pSparseMatrix, t_FullMatrix b ) {
t_mindex tix = blockIdx.x * blockDim.x + threadIdx.x;
if ( tix < pSparseMatrix.m ) {
//printf ( "\n block %u thread %u tix %u N %u", blockIdx.x, threadIdx.x, tix, pSparseMatrix.m );
//printf("\n %u %f", tix, b.pElement[tix] );
pResultVector.pElement[tix] = b.pElement[tix] - 1;
}
if ( tix == 0 ) {
for ( t_mindex i = 0; i < pSparseMatrix.m + 1 ; i++ ) {
printf("\n pRow[%u] = %u", i, pSparseMatrix.pRow[i] );
}
for ( t_mindex i = 0; i < pSparseMatrix.nzmax ; i++ ) {
printf("\n pNZElement[%u] = %f", i, pSparseMatrix.pNZElement[i] );
}
for ( t_mindex i = 0; i < pSparseMatrix.nzmax ; i++ ) {
printf("\n pCol[%u] = %u", i, pSparseMatrix.pCol[i] );
}
}
}
*/
__host__ void set_sparse_data( t_SparseMatrix A_in, t_SparseMatrix* A_out, void* mv ) {
A_out->m = A_in.m;
A_out->n = A_in.n;
A_out->nzmax = A_in.nzmax;
A_out->pNZElement = (t_ve *) mv ;
A_out->pCol = (t_mindex *) &A_out->pNZElement[ A_out->nzmax ];
A_out->pRow = (t_mindex *) (&A_out->pCol[A_out->nzmax]);
// A_out->pCol = (t_mindex *) mv;
// A_out->pNZElement = (t_ve *) (&A_out->pCol[A_out->nzmax] ) ;
// A_out->pRow = (t_mindex *) (&A_out->pNZElement[A_out->nzmax]);
}
extern "C" void idrs_1st(
t_SparseMatrix A_in, /* A Matrix in buyu-sparse-format */
t_ve* b_in, /* b as in A * b = x */
t_ve* xe_in,
t_mindex N,
t_ve* r_out, /* the r from idrs.m line 6 : r = b - A*x; */
t_idrshandle* ih_out, /* handle for haloding all the device pointers between matlab calls */
t_ve* resvec_out
) {
t_idrshandle ctx;
cudaError_t e;
size_t h_memblocksize;
size_t d_memblocksize;
t_SparseMatrix A_d;
t_ve* d_tmpAb;
t_ve* d_b;
t_ve* d_xe;
t_ve* d_r;
t_ve* xe;
void *hostmem;
void *devmem;
ctx = 0;
int cnt_multiprozessors;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
cnt_multiprozessors = deviceProp.multiProcessorCount;
}
h_memblocksize = smat_size( A_in.nzmax, A_in.m ) /* A sparse */
+ ( N + 512 ) * sizeof( t_ve ) /* b full */
+ N * sizeof( t_ve ) /* xe */
;
d_memblocksize = h_memblocksize
+ (N + 512) * sizeof( t_ve ) /* d_tmpAb */
+ (N + 512) * sizeof( t_ve ) /* d_r */
+ N * sizeof( t_ve ) /* om1 */
+ N * sizeof( t_ve ) /* om2 */
+ N * sizeof( t_ve ) /* x */
+ N * sizeof( t_ve ) /* normv */
;
printf("\n using N = %u (full vector size )", N );
printf("\n using %u bytes in Host memory", h_memblocksize);
printf("\n using %u bytes in Device memory", d_memblocksize);
hostmem = malloc( h_memblocksize );
memset(hostmem, 0, h_memblocksize);
if ( hostmem == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you hostmem"); exit( -1 ); }
/*
pcol | t_mindex | .nzmax
pNZElement | t_ve | .nzmax
pRow | t_mindex | N
b | t_ve | N
d_xe | t_ve | N
d_tmpAb | t_ve | N
d_r | t_ve | N
d_om1 | t_ve | N
d_om2 | t_ve | N
*/
/* copy all parameter vectors to ony monoliythic block starting at hostmem */
t_ve* b = (t_ve *) hostmem;
memcpy( b, b_in, N * sizeof(t_ve) );
xe = (t_ve *) &b[N + 512];
memcpy( xe, xe_in, N * sizeof(t_ve) );
t_ve* pNZElement = (t_ve *) &xe[N] ;
memcpy( pNZElement, A_in.pNZElement, A_in.nzmax * sizeof(t_ve) );
t_mindex *pcol = (t_mindex *) &pNZElement[A_in.nzmax];
memcpy( pcol, A_in.pCol, A_in.nzmax * sizeof(t_mindex) );
t_mindex* pRow = (t_mindex *) (&pcol[A_in.nzmax]);
memcpy( pRow, A_in.pRow, ( A_in.m + 1 ) * sizeof(t_mindex) );
e = cudaMalloc ( &devmem , d_memblocksize );
CUDA_UTIL_ERRORCHECK("cudaMalloc")
e = cudaMemset (devmem, 0, d_memblocksize );
CUDA_UTIL_ERRORCHECK("cudaMemset");
d_tmpAb = (pt_ve) devmem;
d_r = (t_ve *) &d_tmpAb[ N + 512 ];
ctxholder[ctx].om1 = (t_ve *) &d_r[N + 512 ];
ctxholder[ctx].om2 = (t_ve *) &ctxholder[ctx].om1[N];
t_ve* normv = (t_ve *) &ctxholder[ctx].om2[N];
pt_ve devinputmem = &normv[N];
// set_sparse_data( A_in, &A_d, devinputmem );
d_b = (t_ve *) devinputmem ;
d_xe = (t_ve *) &d_b[N + 512 ];
set_sparse_data( A_in, &A_d, &d_xe[N] );
e = cudaMemcpy( devinputmem, hostmem, h_memblocksize , cudaMemcpyHostToDevice);
CUDA_UTIL_ERRORCHECK("cudaMemcpyHostToDevice");
free(hostmem);
dim3 dimGrid ( cnt_multiprozessors );
dim3 dimGridsub( N / 512 + 1 );
dim3 dimBlock(512);
/* --------------------------------------------------------------------- */
t_FullMatrix mxe;
t_FullMatrix result;
mxe.m = N;
mxe.n = 1;
mxe.pElement = d_xe;
result.pElement = d_tmpAb;
result.m = N ;
result.n = 1;
//testsparseMatrixMul<<<dimGrid,dimBlock>>>( result, A_d, mb );
sparseMatrixMul<<<dimGrid,dimBlock>>>( result, A_d, mxe );
e = cudaGetLastError();
CUDA_UTIL_ERRORCHECK("testsparseMatrixMul");
// add_arrays_gpu( t_ve *in1, t_ve *in2, t_ve *out, t_mindex N)
sub_arrays_gpu<<<dimGridsub,dimBlock>>>( d_b, d_tmpAb, d_r, N);
e = cudaGetLastError();
CUDA_UTIL_ERRORCHECK("sub_arrays_gpu");
/* --------------------------------------------------------------------- */
e = cudaMemcpy( r_out, d_r, sizeof(t_ve) * N, cudaMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("cudaMemcpyDeviceToHost");
/* 7 normr = norm(r); */
if ( debugmode > 0 ) { printf("\n %s %u: dimGridsub = %u", __FILE__, __LINE__, dimGridsub.x ); }
kernel_norm<<<dimGridsub,dimBlock>>>( d_r, normv ); e = cudaGetLastError(); CUDA_UTIL_ERRORCHECK("kernel_norm<<<dimGridsub,dimBlock>>>( mr.pElement, dnormv )");
t_ve* h_norm = (t_ve*) malloc( sizeof( t_ve ) * N );
if ( h_norm == NULL ) { fprintf(stderr, "sorry, can not allocate memory for you B"); exit( -1 ); }
e = cudaMemcpy( h_norm, normv, sizeof(t_ve) * N, cudaMemcpyDeviceToHost);
CUDA_UTIL_ERRORCHECK("cudaMemcpyDeviceToHost");
t_ve snorm = 0;
for ( t_mindex i = 0; i < N / 512 + 1 ; i++ ) {
snorm += h_norm[i];
}
t_ve norm = sqrt(snorm);
//dbg_dump_mtx( d_b,N + 10,1, "b" );
//dbg_dump_mtx( normv,N,1, "normv" );
if( debugmode > 1 ) { dbg_norm_checkresult( d_r, norm , N, "1st norm for scaling, norm"); }
/* 9 */
resvec_out[0] = norm;
if ( debugmode > 0 ) { printf("\n %s %u: trying free", __FILE__, __LINE__ ); }
free(h_norm);
if ( debugmode > 0 ) { printf("\n %s %u: sucessfull free", __FILE__, __LINE__ ); }
ctxholder[ctx].devmem1stcall = devmem;
ctxholder[ctx].A = A_d;
ctxholder[ctx].b = d_b;
ctxholder[ctx].r = d_r;
ctxholder[ctx].v = d_tmpAb; /* memory reusage */
ctxholder[ctx].x = d_xe;
*ih_out = ctx; /* context handle for later use in later calls */
} /* end idrs1st */
extern "C" void idrswhole(
t_SparseMatrix A_in, /* A Matrix in buyu-sparse-format */
t_ve* b_in, /* b as in A * b = x */
t_mindex s,
t_ve tol,
t_mindex maxit,
t_ve* x0_in,
t_mindex N,
t_ve* x_out,
t_ve* resvec_out,
unsigned int* piter
) {
t_ve* r;
t_idrshandle irdshandle;
t_FullMatrix P;
t_ve* P_init;
t_ve* P_ortho;
t_ve* P_transp;
r = ( t_ve* ) malloc( sizeof( t_ve ) * N );
if ( r == NULL) { fprintf(stderr, "sorry, can not allocate memory for you b"); exit( -1 ); }
P_init = ( t_ve* ) malloc( sizeof( t_ve ) * N * s * 3 );
if ( P_init == NULL) { fprintf(stderr, "sorry, can not allocate memory for you b"); exit( -1 ); }
P_ortho = &P_init[ N * s ];
P_transp = &P_ortho[ N * s ];
printf("\n this is debugmode %u \n", debugmode);
idrs_1st( A_in, b_in, x0_in, N, r, &irdshandle, resvec_out );
orthogonalize( P_init, r, N, s );
for ( t_mindex i = 1; i <= N; i++ ) {
for ( t_mindex j = 1; j <= s; j++ ) {
P_transp[ as(j, i) ] = P_init[ a(i, j) ];
}
}
// for (int i = 0; i < N *s; i++ ) {
// printf("\n P_transp[%u]=%f", i, P_transp[i]);
// }
P.m = s;
P.n = N;
P.pElement = P_transp;
idrs2nd(
P,
tol * resvec_out[0], /* tolr = tol * norm(b) */
s, /* s - as discussed with Bastian on 2010-01-27 */
maxit,
irdshandle, /* Context Handle we got from idrs_1st */
x_out,
resvec_out,
piter
);
free(r);
free(P_init);
}
|
8f7f5a19097b2b3f929afd9945ea7c19e2fa3d62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void mykernel(void) {
}
int main(void) {
hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, );
printf("Hello World!\n");
return 0;
}
| 8f7f5a19097b2b3f929afd9945ea7c19e2fa3d62.cu | #include <stdio.h>
__global__ void mykernel(void) {
}
int main(void) {
mykernel<<<1,1>>>();
printf("Hello World!\n");
return 0;
}
|
4a2ea367ec82719a8b066400b6cf8cb279d2177f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#define N 256
__global__ void matrix_vector_multi(float *A_d, float * B_d, float *C_d) {
int i, j;
for (j = 0; i < N; j++) {
A_d[j] = 0.0F;
for (i = 0; i < N; i++) {
A_d[j] = A_d[j] + B_d[j*N + i] * C_d[i];
}
}
}
int main(void) {
int i, j;
float A[N], B[N*N], C[N];
float *A_d, *B_d, *C_d;
dim3 blocks(1, 1, 1);
dim3 threads(1, 1, 1);
for (j = 0; j < N; j++) {
for (i = 0; i < N; i++) {
B[j*N + i] = ((float)j) / 256.0;
}
}
for (j = 0; j < N; j++)
C[j] = 1.0F;
hipMalloc((void**)& A_d, N * sizeof(float));
hipMalloc((void**)& B_d, N*N * sizeof(float));
hipMalloc((void**)& C_d, N * sizeof(float));
hipMemcpy(B_d, B, N*N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(C_d, C, N * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matrix_vector_multi) , dim3(blocks), dim3(threads) , 0, 0, A_d, B_d, C_d);
hipMemcpy(A, A_d, N * sizeof(float), hipMemcpyDeviceToHost);
for (j = 0; j < N; j++) {
printf("A[%d]=%f \n", j, A[j]);
}
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
}
/*
==11856== Profiling application: .\Debug\chap2_02.exe
==11856== Profiling result:
Time(%) Time Calls Avg Min Max Name
73.21% 133.63us 1 133.63us 133.63us 133.63us matrix_vector_multi(float*, float*, float*)
25.46% 46.465us 2 23.232us 1.1840us 45.281us [CUDA memcpy HtoD]
1.33% 2.4320us 1 2.4320us 2.4320us 2.4320us [CUDA memcpy DtoH]
==11856== API calls:
Time(%) Time Calls Avg Min Max Name
98.47% 132.94ms 3 44.313ms 13.998us 132.47ms hipMalloc
0.47% 633.40us 91 6.9600us 0ns 278.91us hipDeviceGetAttribute
0.46% 621.85us 3 207.28us 29.745us 321.25us hipFree
0.40% 539.97us 3 179.99us 106.38us 270.86us hipMemcpy
0.13% 181.97us 1 181.97us 181.97us 181.97us hipDeviceGetName
0.04% 56.341us 1 56.341us 56.341us 56.341us hipLaunch
0.01% 11.198us 1 11.198us 11.198us 11.198us cuDeviceTotalMem
0.01% 9.0990us 1 9.0990us 9.0990us 9.0990us hipConfigureCall
0.00% 3.1500us 3 1.0500us 350ns 1.7500us hipGetDeviceCount
0.00% 3.1490us 3 1.0490us 350ns 2.0990us hipDeviceGet
0.00% 1.7490us 3 583ns 350ns 700ns hipSetupArgument
*/ | 4a2ea367ec82719a8b066400b6cf8cb279d2177f.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#define N 256
__global__ void matrix_vector_multi(float *A_d, float * B_d, float *C_d) {
int i, j;
for (j = 0; i < N; j++) {
A_d[j] = 0.0F;
for (i = 0; i < N; i++) {
A_d[j] = A_d[j] + B_d[j*N + i] * C_d[i];
}
}
}
int main(void) {
int i, j;
float A[N], B[N*N], C[N];
float *A_d, *B_d, *C_d;
dim3 blocks(1, 1, 1);
dim3 threads(1, 1, 1);
for (j = 0; j < N; j++) {
for (i = 0; i < N; i++) {
B[j*N + i] = ((float)j) / 256.0;
}
}
for (j = 0; j < N; j++)
C[j] = 1.0F;
cudaMalloc((void**)& A_d, N * sizeof(float));
cudaMalloc((void**)& B_d, N*N * sizeof(float));
cudaMalloc((void**)& C_d, N * sizeof(float));
cudaMemcpy(B_d, B, N*N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, N * sizeof(float), cudaMemcpyHostToDevice);
matrix_vector_multi <<< blocks, threads >>> (A_d, B_d, C_d);
cudaMemcpy(A, A_d, N * sizeof(float), cudaMemcpyDeviceToHost);
for (j = 0; j < N; j++) {
printf("A[%d]=%f \n", j, A[j]);
}
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
/*
==11856== Profiling application: .\Debug\chap2_02.exe
==11856== Profiling result:
Time(%) Time Calls Avg Min Max Name
73.21% 133.63us 1 133.63us 133.63us 133.63us matrix_vector_multi(float*, float*, float*)
25.46% 46.465us 2 23.232us 1.1840us 45.281us [CUDA memcpy HtoD]
1.33% 2.4320us 1 2.4320us 2.4320us 2.4320us [CUDA memcpy DtoH]
==11856== API calls:
Time(%) Time Calls Avg Min Max Name
98.47% 132.94ms 3 44.313ms 13.998us 132.47ms cudaMalloc
0.47% 633.40us 91 6.9600us 0ns 278.91us cuDeviceGetAttribute
0.46% 621.85us 3 207.28us 29.745us 321.25us cudaFree
0.40% 539.97us 3 179.99us 106.38us 270.86us cudaMemcpy
0.13% 181.97us 1 181.97us 181.97us 181.97us cuDeviceGetName
0.04% 56.341us 1 56.341us 56.341us 56.341us cudaLaunch
0.01% 11.198us 1 11.198us 11.198us 11.198us cuDeviceTotalMem
0.01% 9.0990us 1 9.0990us 9.0990us 9.0990us cudaConfigureCall
0.00% 3.1500us 3 1.0500us 350ns 1.7500us cuDeviceGetCount
0.00% 3.1490us 3 1.0490us 350ns 2.0990us cuDeviceGet
0.00% 1.7490us 3 583ns 350ns 700ns cudaSetupArgument
*/ |
1b1351be30efaeca09e0acb50f576e001eea53e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gPasteRows(float* out, const float* in, size_t cols, const size_t* targetRowIdx, size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = targetRowIdx[j];
size_t srcId = j;
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
atomicAdd(rowOut + i, rowIn[i]);
}
}
}
} | 1b1351be30efaeca09e0acb50f576e001eea53e3.cu | #include "includes.h"
__global__ void gPasteRows(float* out, const float* in, size_t cols, const size_t* targetRowIdx, size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = targetRowIdx[j];
size_t srcId = j;
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
atomicAdd(rowOut + i, rowIn[i]);
}
}
}
} |
25c09907775843572ebae16dfd0e6a6775ab647d.hip | // !!! This is a file automatically generated by hipify!!!
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
#include <wb.h>
//@@ The purpose of this code is to become familiar with the submission
//@@ process. Do not worry if you do not understand all the details of
//@@ the code.
int main(int argc, char ** argv) {
int deviceCount;
wbArg_read(argc, argv);
hipGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer
for (int dev = 0; dev < deviceCount; dev++) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog is a provided logging API (similar to Log4J).
//@@ The logging function wbLog takes a level which is either
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a
//@@ message to be printed.
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount, " devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".", deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ", deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ", deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ", deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum threads per block: ", deviceProp.maxThreadsPerBlock);
wbLog(TRACE, " Maximum block dimensions: ", deviceProp.maxThreadsDim[0], " x ",
deviceProp.maxThreadsDim[1], " x ",
deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0], " x ",
deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer
return 0;
} | 25c09907775843572ebae16dfd0e6a6775ab647d.cu | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <wb.h>
//@@ The purpose of this code is to become familiar with the submission
//@@ process. Do not worry if you do not understand all the details of
//@@ the code.
int main(int argc, char ** argv) {
int deviceCount;
wbArg_read(argc, argv);
cudaGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog is a provided logging API (similar to Log4J).
//@@ The logging function wbLog takes a level which is either
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a
//@@ message to be printed.
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount, " devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".", deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ", deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ", deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ", deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum threads per block: ", deviceProp.maxThreadsPerBlock);
wbLog(TRACE, " Maximum block dimensions: ", deviceProp.maxThreadsDim[0], " x ",
deviceProp.maxThreadsDim[1], " x ",
deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0], " x ",
deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer
return 0;
} |
67bffb42be0ee721b0a2ca335ce6f7600155fddb.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <fstream>
#include <chrono>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
using std::chrono::steady_clock;
using std::chrono::duration;
using std::chrono::duration_cast;
#define FILE_NAME "/home/thaivu/Projects/CUDA-NVIDIA_Learning/Lab2_MuliMatrix/SampleOfNvidia/matrixMul/benchmark_log_JetsonNano_shmem.txt"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
// Allocates a matrix with random float entries.
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = (rand() / (float)RAND_MAX) * 100.0;
}
void matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB, std::ostream &fileout)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
hipStream_t stream;
// Initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int size_C = dimsC.x * dimsC.y;
unsigned int mem_size_C = size_C * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
// copy host memory to device
checkCudaErrors(hipMemcpyAsync(d_A, h_A, mem_size_A, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(d_B, h_B, mem_size_B, hipMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
checkCudaErrors(hipStreamSynchronize(stream));
// Record the start event
checkCudaErrors(hipEventRecord(start, stream));
// Execute the kernel
int nIter = 50;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
fileout << (int)dimsA.x << ", " << msecPerMatrixMul << ", " << flopsPerMatrixMul << ", " << gigaFlops;
// Copy result from device to host
checkCudaErrors(hipMemcpyAsync(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
// verify the result of matrix multiplication
float *reference = (float *)malloc(mem_size_C);
steady_clock::time_point start_CPU = steady_clock::now();
matrixMulCPU(reference, h_A, h_B, (unsigned int)dimsA.y, (unsigned int)dimsA.x, (unsigned int)dimsB.x); // matrix_size.uiHA, matrix_size.uiWA, matrix_size.uiWB);
steady_clock::time_point end_CPU = steady_clock::now();
fileout << ", " << duration_cast <duration<double>>(end_CPU - start_CPU).count() << "\n";
printf("done.\n");
printf("Checking computed result for correctness: ");
bool correct = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
// printf("\nNOTE: The CUDA Samples are not meant for performance"\
// "measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
std::ofstream fileout;
fileout.open(FILE_NAME, std::ios_base::out | std::ios_base::app );
fileout << "kernel_size, time(msec), ops, GFlop/s, time_CPU(sec)\n" ;
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
// int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
for (int i = 1; i <= 64; i *= 2)
{
dim3 dimsA(i * block_size, i * block_size, 1);
dim3 dimsB(i * block_size, i * block_size, 1);
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB, fileout);
if (matrix_result != 0)
return matrix_result;
}
fileout.close();
return 0;
}
| 67bffb42be0ee721b0a2ca335ce6f7600155fddb.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <fstream>
#include <chrono>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
using std::chrono::steady_clock;
using std::chrono::duration;
using std::chrono::duration_cast;
#define FILE_NAME "/home/thaivu/Projects/CUDA-NVIDIA_Learning/Lab2_MuliMatrix/SampleOfNvidia/matrixMul/benchmark_log_JetsonNano_shmem.txt"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
// Allocates a matrix with random float entries.
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = (rand() / (float)RAND_MAX) * 100.0;
}
void matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB, std::ostream &fileout)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
cudaStream_t stream;
// Initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int size_C = dimsC.x * dimsC.y;
unsigned int mem_size_C = size_C * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
// copy host memory to device
checkCudaErrors(cudaMemcpyAsync(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads, 0, stream>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<< grid, threads, 0, stream>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
checkCudaErrors(cudaStreamSynchronize(stream));
// Record the start event
checkCudaErrors(cudaEventRecord(start, stream));
// Execute the kernel
int nIter = 50;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
MatrixMulCUDA<16> <<<grid, threads, 0, stream>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<<grid, threads, 0, stream>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
fileout << (int)dimsA.x << ", " << msecPerMatrixMul << ", " << flopsPerMatrixMul << ", " << gigaFlops;
// Copy result from device to host
checkCudaErrors(cudaMemcpyAsync(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
// verify the result of matrix multiplication
float *reference = (float *)malloc(mem_size_C);
steady_clock::time_point start_CPU = steady_clock::now();
matrixMulCPU(reference, h_A, h_B, (unsigned int)dimsA.y, (unsigned int)dimsA.x, (unsigned int)dimsB.x); // matrix_size.uiHA, matrix_size.uiWA, matrix_size.uiWB);
steady_clock::time_point end_CPU = steady_clock::now();
fileout << ", " << duration_cast <duration<double>>(end_CPU - start_CPU).count() << "\n";
printf("done.\n");
printf("Checking computed result for correctness: ");
bool correct = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
// printf("\nNOTE: The CUDA Samples are not meant for performance"\
// "measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
std::ofstream fileout;
fileout.open(FILE_NAME, std::ios_base::out | std::ios_base::app );
fileout << "kernel_size, time(msec), ops, GFlop/s, time_CPU(sec)\n" ;
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
// int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
for (int i = 1; i <= 64; i *= 2)
{
dim3 dimsA(i * block_size, i * block_size, 1);
dim3 dimsB(i * block_size, i * block_size, 1);
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB, fileout);
if (matrix_result != 0)
return matrix_result;
}
fileout.close();
return 0;
}
|
b3b9c4fd78f4031ea25caafc352aaa09689663c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "radixSort.h"
#include "efficient.h"
#define TEST 0
int numBlocks, numThread = 1024;
namespace RadixSort {
void printArray(int size, int * a)
{
printf("\n");
for(int i=0; i<size; ++i)
{
printf("%d ", a[i]);
}
printf("\n");
}
__global__ void createEArray(int n, int *e, int* b)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
e[index] = (b[index]==0) ? 1 : 0;
}
}
__global__ void scan(int n, int i, int *odata, int *idata)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
odata[index] = (idata[index] & i) ? 1 : 0;
}
}
__global__ void getTotalFalse(int index, int * totalFalse, int *dev_f, int *dev_e)
{
(*totalFalse) = dev_f[index] + dev_e[index];
}
__global__ void createTArray(int n, int *t, int*f, int *totalFalse)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
t[index] = index - f[index] + (*totalFalse);
}
}
__global__ void createDArray(int n, int *d, int *b, int *t, int *f)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
d[index] = b[index] ? t[index] : f[index];
}
}
__global__ void scatter(int n, int *odata, int *idata, int *d)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
odata[d[index]] = idata[index];
}
}
void split(int n, int i, int *dev_odata, int* dev_idata)
{
int *dev_b,
*dev_e,
*dev_f,
*dev_t,
*dev_d;
int hst_temp[n];
//Create b
hipMalloc((void**)&dev_b, n * sizeof(int));
hipLaunchKernelGGL(( scan), dim3(numBlocks), dim3(numThread), 0, 0, n, i, dev_b, dev_idata);
if(TEST)
{
hipMemcpy(hst_temp, dev_b, n*sizeof(int), hipMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Create e
hipMalloc((void**)&dev_e, n * sizeof(int));
hipLaunchKernelGGL(( createEArray), dim3(numBlocks), dim3(numThread), 0, 0, n, dev_e, dev_b);
if(TEST)
{
hipMemcpy(hst_temp, dev_e, n*sizeof(int), hipMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Create f by using efficient scan
hipMalloc((void**)&dev_f, n * sizeof(int));
StreamCompaction::Efficient::scan(n, dev_f, dev_e);
if(TEST)
{
hipMemcpy(hst_temp, dev_f, n*sizeof(int), hipMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Finding total false
int *dev_totalFalse;
hipMalloc((void**)&dev_totalFalse, sizeof(int));
hipLaunchKernelGGL(( getTotalFalse), dim3(1), dim3(1), 0, 0, n-1, dev_totalFalse, dev_f, dev_e);
if(TEST)
{
hipMemcpy(hst_temp, dev_totalFalse, sizeof(int), hipMemcpyDeviceToHost);
printf("\n%d %d\n", hst_temp[0], n-1);
}
//Create t
hipMalloc((void**)&dev_t, n * sizeof(int));
hipLaunchKernelGGL(( createTArray), dim3(numBlocks), dim3(numThread), 0, 0, n, dev_t, dev_f, dev_totalFalse);
if(TEST)
{
hipMemcpy(hst_temp, dev_t, n*sizeof(int), hipMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Create d
hipMalloc((void**)&dev_d, n * sizeof(int));
hipLaunchKernelGGL(( createDArray), dim3(numBlocks), dim3(numThread), 0, 0, n, dev_d, dev_b, dev_t, dev_f);
if(TEST)
{
hipMemcpy(hst_temp, dev_d, n*sizeof(int), hipMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Shuffle
hipLaunchKernelGGL(( scatter), dim3(numBlocks), dim3(numThread), 0, 0, n, dev_odata, dev_idata, dev_d);
if(TEST)
{
hipMemcpy(hst_temp, dev_odata, n*sizeof(int), hipMemcpyDeviceToHost);
printArray(n, hst_temp);
}
hipFree(dev_b);
hipFree(dev_e);
hipFree(dev_f);
hipFree(dev_t);
hipFree(dev_d);
hipFree(dev_totalFalse);
}
void sort(int n, int maxValue, int *odata, const int *idata)
{
int i = 1,
*dev_idata,
*dev_odata;
numBlocks = n / numThread + 1;
hipMalloc((void**)&dev_odata, n * sizeof(int));
hipMalloc((void**)&dev_idata, n * sizeof(int));
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
while(i <= maxValue)
{
split(n, i, dev_odata, dev_idata);
hipMemcpy(dev_idata, dev_odata, n * sizeof(int), hipMemcpyDeviceToDevice);
i<<=1;
}
hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_idata);
hipFree(dev_odata);
}
}
| b3b9c4fd78f4031ea25caafc352aaa09689663c7.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "radixSort.h"
#include "efficient.h"
#define TEST 0
int numBlocks, numThread = 1024;
namespace RadixSort {
void printArray(int size, int * a)
{
printf("\n");
for(int i=0; i<size; ++i)
{
printf("%d ", a[i]);
}
printf("\n");
}
__global__ void createEArray(int n, int *e, int* b)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
e[index] = (b[index]==0) ? 1 : 0;
}
}
__global__ void scan(int n, int i, int *odata, int *idata)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
odata[index] = (idata[index] & i) ? 1 : 0;
}
}
__global__ void getTotalFalse(int index, int * totalFalse, int *dev_f, int *dev_e)
{
(*totalFalse) = dev_f[index] + dev_e[index];
}
__global__ void createTArray(int n, int *t, int*f, int *totalFalse)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
t[index] = index - f[index] + (*totalFalse);
}
}
__global__ void createDArray(int n, int *d, int *b, int *t, int *f)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
d[index] = b[index] ? t[index] : f[index];
}
}
__global__ void scatter(int n, int *odata, int *idata, int *d)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < n)
{
odata[d[index]] = idata[index];
}
}
void split(int n, int i, int *dev_odata, int* dev_idata)
{
int *dev_b,
*dev_e,
*dev_f,
*dev_t,
*dev_d;
int hst_temp[n];
//Create b
cudaMalloc((void**)&dev_b, n * sizeof(int));
scan<<<numBlocks, numThread>>>(n, i, dev_b, dev_idata);
if(TEST)
{
cudaMemcpy(hst_temp, dev_b, n*sizeof(int), cudaMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Create e
cudaMalloc((void**)&dev_e, n * sizeof(int));
createEArray<<<numBlocks, numThread>>>(n, dev_e, dev_b);
if(TEST)
{
cudaMemcpy(hst_temp, dev_e, n*sizeof(int), cudaMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Create f by using efficient scan
cudaMalloc((void**)&dev_f, n * sizeof(int));
StreamCompaction::Efficient::scan(n, dev_f, dev_e);
if(TEST)
{
cudaMemcpy(hst_temp, dev_f, n*sizeof(int), cudaMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Finding total false
int *dev_totalFalse;
cudaMalloc((void**)&dev_totalFalse, sizeof(int));
getTotalFalse<<<1, 1>>>(n-1, dev_totalFalse, dev_f, dev_e);
if(TEST)
{
cudaMemcpy(hst_temp, dev_totalFalse, sizeof(int), cudaMemcpyDeviceToHost);
printf("\n%d %d\n", hst_temp[0], n-1);
}
//Create t
cudaMalloc((void**)&dev_t, n * sizeof(int));
createTArray<<<numBlocks, numThread>>>(n, dev_t, dev_f, dev_totalFalse);
if(TEST)
{
cudaMemcpy(hst_temp, dev_t, n*sizeof(int), cudaMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Create d
cudaMalloc((void**)&dev_d, n * sizeof(int));
createDArray<<<numBlocks, numThread>>>(n, dev_d, dev_b, dev_t, dev_f);
if(TEST)
{
cudaMemcpy(hst_temp, dev_d, n*sizeof(int), cudaMemcpyDeviceToHost);
printArray(n, hst_temp);
}
//Shuffle
scatter<<<numBlocks, numThread>>>(n, dev_odata, dev_idata, dev_d);
if(TEST)
{
cudaMemcpy(hst_temp, dev_odata, n*sizeof(int), cudaMemcpyDeviceToHost);
printArray(n, hst_temp);
}
cudaFree(dev_b);
cudaFree(dev_e);
cudaFree(dev_f);
cudaFree(dev_t);
cudaFree(dev_d);
cudaFree(dev_totalFalse);
}
void sort(int n, int maxValue, int *odata, const int *idata)
{
int i = 1,
*dev_idata,
*dev_odata;
numBlocks = n / numThread + 1;
cudaMalloc((void**)&dev_odata, n * sizeof(int));
cudaMalloc((void**)&dev_idata, n * sizeof(int));
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
while(i <= maxValue)
{
split(n, i, dev_odata, dev_idata);
cudaMemcpy(dev_idata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToDevice);
i<<=1;
}
cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_idata);
cudaFree(dev_odata);
}
}
|
3ba153e958ceab3a5281c4d6dc4b153ed8eaa5e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/L2Select.cuh>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/Pair.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/Select.cuh>
#include <faiss/gpu/utils/Tensor.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
namespace faiss { namespace gpu {
// L2 + select kernel for k == 1, implements re-use of ||c||^2
template <typename T, int kRowsPerBlock, int kBlockSize>
__global__ void l2SelectMin1(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices) {
// Each block handles kRowsPerBlock rows of the distances (results)
Pair<T, int> threadMin[kRowsPerBlock];
__shared__ Pair<T, int> blockMin[kRowsPerBlock * (kBlockSize / kWarpSize)];
T distance[kRowsPerBlock];
#pragma unroll
for (int i = 0; i < kRowsPerBlock; ++i) {
threadMin[i].k = Limits<T>::getMax();
threadMin[i].v = -1;
}
// blockIdx.x: which chunk of rows we are responsible for updating
int rowStart = blockIdx.x * kRowsPerBlock;
// FIXME: if we have exact multiples, don't need this
bool endRow = (blockIdx.x == gridDim.x - 1);
if (endRow) {
if (productDistances.getSize(0) % kRowsPerBlock == 0) {
endRow = false;
}
}
if (endRow) {
for (int row = rowStart; row < productDistances.getSize(0); ++row) {
for (int col = threadIdx.x; col < productDistances.getSize(1);
col += blockDim.x) {
distance[0] = Math<T>::add(centroidDistances[col],
productDistances[row][col]);
if (Math<T>::lt(distance[0], threadMin[0].k)) {
threadMin[0].k = distance[0];
threadMin[0].v = col;
}
}
// Reduce within the block
threadMin[0] =
blockReduceAll<Pair<T, int>, Min<Pair<T, int> >, false, false>(
threadMin[0], Min<Pair<T, int> >(), blockMin);
if (threadIdx.x == 0) {
outDistances[row][0] = threadMin[0].k;
outIndices[row][0] = threadMin[0].v;
}
// so we can use the shared memory again
__syncthreads();
threadMin[0].k = Limits<T>::getMax();
threadMin[0].v = -1;
}
} else {
for (int col = threadIdx.x; col < productDistances.getSize(1);
col += blockDim.x) {
T centroidDistance = centroidDistances[col];
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
distance[row] = productDistances[rowStart + row][col];
}
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
distance[row] = Math<T>::add(distance[row], centroidDistance);
}
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
if (Math<T>::lt(distance[row], threadMin[row].k)) {
threadMin[row].k = distance[row];
threadMin[row].v = col;
}
}
}
// Reduce within the block
blockReduceAll<kRowsPerBlock, Pair<T, int>, Min<Pair<T, int> >, false, false>(
threadMin, Min<Pair<T, int> >(), blockMin);
if (threadIdx.x == 0) {
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
outDistances[rowStart + row][0] = threadMin[row].k;
outIndices[rowStart + row][0] = threadMin[row].v;
}
}
}
}
// L2 + select kernel for k > 1, no re-use of ||c||^2
template <typename T, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock>
__global__ void l2SelectMinK(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices,
int k, T initK) {
// Each block handles a single row of the distances (results)
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ T smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
BlockSelect<T, int, false, Comparator<T>,
NumWarpQ, NumThreadQ, ThreadsPerBlock>
heap(initK, -1, smemK, smemV, k);
int row = blockIdx.x;
// Whole warps must participate in the selection
int limit = utils::roundDown(productDistances.getSize(1), kWarpSize);
int i = threadIdx.x;
for (; i < limit; i += blockDim.x) {
T v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.add(v, i);
}
if (i < productDistances.getSize(1)) {
T v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.addThreadQ(v, i);
}
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
outDistances[row][i] = smemK[i];
outIndices[row][i] = smemV[i];
}
}
template <typename T>
void runL2SelectMin(Tensor<T, 2, true>& productDistances,
Tensor<T, 1, true>& centroidDistances,
Tensor<T, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
hipStream_t stream) {
FAISS_ASSERT(productDistances.getSize(0) == outDistances.getSize(0));
FAISS_ASSERT(productDistances.getSize(0) == outIndices.getSize(0));
FAISS_ASSERT(centroidDistances.getSize(0) == productDistances.getSize(1));
FAISS_ASSERT(outDistances.getSize(1) == k);
FAISS_ASSERT(outIndices.getSize(1) == k);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
if (k == 1) {
constexpr int kThreadsPerBlock = 256;
constexpr int kRowsPerBlock = 8;
auto block = dim3(kThreadsPerBlock);
auto grid = dim3(utils::divUp(outDistances.getSize(0), kRowsPerBlock));
hipLaunchKernelGGL(( l2SelectMin1<T, kRowsPerBlock, kThreadsPerBlock>)
, dim3(grid), dim3(block), 0, stream, productDistances, centroidDistances,
outDistances, outIndices);
} else {
auto grid = dim3(outDistances.getSize(0));
#define RUN_L2_SELECT(BLOCK, NUM_WARP_Q, NUM_THREAD_Q) \
do { \
hipLaunchKernelGGL(( l2SelectMinK<T, NUM_WARP_Q, NUM_THREAD_Q, BLOCK>) \
, dim3(grid), dim3(BLOCK), 0, stream, productDistances, centroidDistances, \
outDistances, outIndices, \
k, Limits<T>::getMax()); \
} while (0)
// block size 128 for everything <= 1024
if (k <= 32) {
RUN_L2_SELECT(128, 32, 2);
} else if (k <= 64) {
RUN_L2_SELECT(128, 64, 3);
} else if (k <= 128) {
RUN_L2_SELECT(128, 128, 3);
} else if (k <= 256) {
RUN_L2_SELECT(128, 256, 4);
} else if (k <= 512) {
RUN_L2_SELECT(128, 512, 8);
} else if (k <= 1024) {
RUN_L2_SELECT(128, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
// smaller block for less shared memory
RUN_L2_SELECT(64, 2048, 8);
#endif
} else {
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
}
void runL2SelectMin(Tensor<float, 2, true>& productDistances,
Tensor<float, 1, true>& centroidDistances,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
hipStream_t stream) {
runL2SelectMin<float>(productDistances,
centroidDistances,
outDistances,
outIndices,
k,
stream);
}
void runL2SelectMin(Tensor<half, 2, true>& productDistances,
Tensor<half, 1, true>& centroidDistances,
Tensor<half, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
hipStream_t stream) {
runL2SelectMin<half>(productDistances,
centroidDistances,
outDistances,
outIndices,
k,
stream);
}
} } // namespace
| 3ba153e958ceab3a5281c4d6dc4b153ed8eaa5e5.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/L2Select.cuh>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/Pair.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/Select.cuh>
#include <faiss/gpu/utils/Tensor.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
namespace faiss { namespace gpu {
// L2 + select kernel for k == 1, implements re-use of ||c||^2
template <typename T, int kRowsPerBlock, int kBlockSize>
__global__ void l2SelectMin1(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices) {
// Each block handles kRowsPerBlock rows of the distances (results)
Pair<T, int> threadMin[kRowsPerBlock];
__shared__ Pair<T, int> blockMin[kRowsPerBlock * (kBlockSize / kWarpSize)];
T distance[kRowsPerBlock];
#pragma unroll
for (int i = 0; i < kRowsPerBlock; ++i) {
threadMin[i].k = Limits<T>::getMax();
threadMin[i].v = -1;
}
// blockIdx.x: which chunk of rows we are responsible for updating
int rowStart = blockIdx.x * kRowsPerBlock;
// FIXME: if we have exact multiples, don't need this
bool endRow = (blockIdx.x == gridDim.x - 1);
if (endRow) {
if (productDistances.getSize(0) % kRowsPerBlock == 0) {
endRow = false;
}
}
if (endRow) {
for (int row = rowStart; row < productDistances.getSize(0); ++row) {
for (int col = threadIdx.x; col < productDistances.getSize(1);
col += blockDim.x) {
distance[0] = Math<T>::add(centroidDistances[col],
productDistances[row][col]);
if (Math<T>::lt(distance[0], threadMin[0].k)) {
threadMin[0].k = distance[0];
threadMin[0].v = col;
}
}
// Reduce within the block
threadMin[0] =
blockReduceAll<Pair<T, int>, Min<Pair<T, int> >, false, false>(
threadMin[0], Min<Pair<T, int> >(), blockMin);
if (threadIdx.x == 0) {
outDistances[row][0] = threadMin[0].k;
outIndices[row][0] = threadMin[0].v;
}
// so we can use the shared memory again
__syncthreads();
threadMin[0].k = Limits<T>::getMax();
threadMin[0].v = -1;
}
} else {
for (int col = threadIdx.x; col < productDistances.getSize(1);
col += blockDim.x) {
T centroidDistance = centroidDistances[col];
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
distance[row] = productDistances[rowStart + row][col];
}
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
distance[row] = Math<T>::add(distance[row], centroidDistance);
}
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
if (Math<T>::lt(distance[row], threadMin[row].k)) {
threadMin[row].k = distance[row];
threadMin[row].v = col;
}
}
}
// Reduce within the block
blockReduceAll<kRowsPerBlock, Pair<T, int>, Min<Pair<T, int> >, false, false>(
threadMin, Min<Pair<T, int> >(), blockMin);
if (threadIdx.x == 0) {
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
outDistances[rowStart + row][0] = threadMin[row].k;
outIndices[rowStart + row][0] = threadMin[row].v;
}
}
}
}
// L2 + select kernel for k > 1, no re-use of ||c||^2
template <typename T, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock>
__global__ void l2SelectMinK(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices,
int k, T initK) {
// Each block handles a single row of the distances (results)
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ T smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
BlockSelect<T, int, false, Comparator<T>,
NumWarpQ, NumThreadQ, ThreadsPerBlock>
heap(initK, -1, smemK, smemV, k);
int row = blockIdx.x;
// Whole warps must participate in the selection
int limit = utils::roundDown(productDistances.getSize(1), kWarpSize);
int i = threadIdx.x;
for (; i < limit; i += blockDim.x) {
T v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.add(v, i);
}
if (i < productDistances.getSize(1)) {
T v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.addThreadQ(v, i);
}
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
outDistances[row][i] = smemK[i];
outIndices[row][i] = smemV[i];
}
}
template <typename T>
void runL2SelectMin(Tensor<T, 2, true>& productDistances,
Tensor<T, 1, true>& centroidDistances,
Tensor<T, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
cudaStream_t stream) {
FAISS_ASSERT(productDistances.getSize(0) == outDistances.getSize(0));
FAISS_ASSERT(productDistances.getSize(0) == outIndices.getSize(0));
FAISS_ASSERT(centroidDistances.getSize(0) == productDistances.getSize(1));
FAISS_ASSERT(outDistances.getSize(1) == k);
FAISS_ASSERT(outIndices.getSize(1) == k);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
if (k == 1) {
constexpr int kThreadsPerBlock = 256;
constexpr int kRowsPerBlock = 8;
auto block = dim3(kThreadsPerBlock);
auto grid = dim3(utils::divUp(outDistances.getSize(0), kRowsPerBlock));
l2SelectMin1<T, kRowsPerBlock, kThreadsPerBlock>
<<<grid, block, 0, stream>>>(productDistances, centroidDistances,
outDistances, outIndices);
} else {
auto grid = dim3(outDistances.getSize(0));
#define RUN_L2_SELECT(BLOCK, NUM_WARP_Q, NUM_THREAD_Q) \
do { \
l2SelectMinK<T, NUM_WARP_Q, NUM_THREAD_Q, BLOCK> \
<<<grid, BLOCK, 0, stream>>>(productDistances, centroidDistances, \
outDistances, outIndices, \
k, Limits<T>::getMax()); \
} while (0)
// block size 128 for everything <= 1024
if (k <= 32) {
RUN_L2_SELECT(128, 32, 2);
} else if (k <= 64) {
RUN_L2_SELECT(128, 64, 3);
} else if (k <= 128) {
RUN_L2_SELECT(128, 128, 3);
} else if (k <= 256) {
RUN_L2_SELECT(128, 256, 4);
} else if (k <= 512) {
RUN_L2_SELECT(128, 512, 8);
} else if (k <= 1024) {
RUN_L2_SELECT(128, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
// smaller block for less shared memory
RUN_L2_SELECT(64, 2048, 8);
#endif
} else {
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
}
void runL2SelectMin(Tensor<float, 2, true>& productDistances,
Tensor<float, 1, true>& centroidDistances,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
cudaStream_t stream) {
runL2SelectMin<float>(productDistances,
centroidDistances,
outDistances,
outIndices,
k,
stream);
}
void runL2SelectMin(Tensor<half, 2, true>& productDistances,
Tensor<half, 1, true>& centroidDistances,
Tensor<half, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
cudaStream_t stream) {
runL2SelectMin<half>(productDistances,
centroidDistances,
outDistances,
outIndices,
k,
stream);
}
} } // namespace
|
88abe05043122bf75b2ee802d61e43dcf355f076.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../gpu_utils/runtime.h"
#include "GPoisson.h"
__global__ void update_poisson_neuron(GPoissonNeurons *d_neurons, int const num, int const start_id)
{
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num; idx += blockDim.x * gridDim.x) {
if (hiprand_uniform(&d_neurons->p_state[idx]) < d_neurons->p_rate[idx]) {
gFireCount[start_id + idx]++;
gFiredTable[gFiredTableCap*gCurrentIdx + atomicAdd(&(gFiredTableSizes[gCurrentIdx]), 1)] = start_id + idx;
}
}
}
| 88abe05043122bf75b2ee802d61e43dcf355f076.cu |
#include "../../gpu_utils/runtime.h"
#include "GPoisson.h"
__global__ void update_poisson_neuron(GPoissonNeurons *d_neurons, int const num, int const start_id)
{
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num; idx += blockDim.x * gridDim.x) {
if (curand_uniform(&d_neurons->p_state[idx]) < d_neurons->p_rate[idx]) {
gFireCount[start_id + idx]++;
gFiredTable[gFiredTableCap*gCurrentIdx + atomicAdd(&(gFiredTableSizes[gCurrentIdx]), 1)] = start_id + idx;
}
}
}
|
b31fbad805871aadeba1e14d251f56ae38ad96de.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <algorithm>
#define Perspective_Correct_Toggle 1
#define BackFaceCulling_Toggle 1
#define K_Buffer_Toggle 1
#define Bilinear_Color_Filter_Toggle 1
#define Naive_Sort_Toggle 1
#define Alpha_Intensity 0.3f
RenderMode curr_Mode = r_Triangle;
//Tips: You can change the property of the model matrix in main.cpp
// Timer
//int counter = 0;
//float time_ap = 0, time_r = 0, time_f = 0, time_s = 0;
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType {
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
VertexAttributeTexcoord texcoord0;
TextureData* dev_diffuseTex;
// ...
int TexWidth, TexHeight;
glm::vec4 K_buffer[4];
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
struct KBuffer4 {
float depths[4];
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int *dev_mutex = NULL;
static int * dev_depth = NULL; // you might need this buffer when doing depth test
static KBuffer4 *dev_k_buffer = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0.9;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// From Wikipedia: https://en.wikipedia.org/wiki/Bilinear_filtering Part "Sample Code"
__device__ __host__ glm::vec3 getBilinearFilteredPixelColor(TextureData* tex, glm::vec2 uv, int texWidth, int texHeight) {
float u = uv.s * texWidth - 0.5f;
float v = uv.t * texHeight - 0.5f;
int x = glm::floor(u);
int y = glm::floor(v);
float u_ratio = u - x;
float v_ratio = v - y;
float u_opposite = 1 - u_ratio;
float v_opposite = 1 - v_ratio;
int i0 = 3 * (x + y * texWidth);
int i1 = 3 * ((x + 1) + y * texWidth);
int i2 = 3 * (x + (y + 1) * texWidth);
int i3 = 3 * ((x + 1) + (y + 1) * texWidth);
float red = (tex[i0] * u_opposite + tex[i1] * u_ratio) * v_opposite + (tex[i2] * u_opposite + tex[i3] * u_ratio) * v_ratio;
float green = (tex[i0 + 1] * u_opposite + tex[i1 + 1] * u_ratio) * v_opposite + (tex[i2 + 1] * u_opposite + tex[i3 + 1] * u_ratio) * v_ratio;
float blue = (tex[i0 + 2] * u_opposite + tex[i1 + 2] * u_ratio) * v_opposite + (tex[i2 + 2] * u_opposite + tex[i3 + 2] * u_ratio) * v_ratio;
return glm::vec3(red, green, blue) / 255.0f;
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer, RenderMode renderMode) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
//framebuffer[index] = fragmentBuffer[index].color;
Fragment fgbuffer = fragmentBuffer[index];
// TODO: add your fragment shader code here
glm::vec3 normal = fgbuffer.eyeNor;
glm::vec3 lightDir = glm::normalize(glm::vec3(-3.0f, 5.0f, 5.0f) - fgbuffer.eyePos);
float lambertian = glm::clamp(glm::dot(normal, lightDir), 0.0f, 1.0f);
float specular = 0.0f;
if (lambertian > 0.0f) {
glm::vec3 viewDir = glm::normalize(-fgbuffer.eyePos);
//this is blinn phong
glm::vec3 halfDir = glm::normalize(lightDir + viewDir);
float specAngle = glm::clamp(glm::dot(halfDir, normal), 0.0f, 1.0f);
specular = glm::pow(specAngle, 16.0f);
}
glm::vec3 ambientColor = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 diffuseColor;
glm::vec3 specColor = glm::vec3(1.0, 1.0, 1.0);
#if K_Buffer_Toggle
float a = Alpha_Intensity;
/*diffuseColor = a * glm::vec3(fgbuffer.K_buffer[3]);
diffuseColor = a * glm::vec3(fgbuffer.K_buffer[2]) + (1 - a)* diffuseColor;
diffuseColor = a * glm::vec3(fgbuffer.K_buffer[1]) + (1 - a)* diffuseColor;
diffuseColor = a * glm::vec3(fgbuffer.K_buffer[0]) + (1 - a)* diffuseColor;
*/
fgbuffer.K_buffer[3] = glm::vec4((a * glm::vec3(fgbuffer.K_buffer[3]), (1-a)*glm::vec3(0.0f,0.0f,0.0f)),fgbuffer.K_buffer[3].w);
for (int i = 3; i > 0; i--) {
if (fgbuffer.K_buffer[i].w == 1.0f)
fgbuffer.K_buffer[i] = glm::vec4(0.0f,0.0f,0.0f,1.0f);
fgbuffer.K_buffer[i-1] = glm::vec4((a * glm::vec3(fgbuffer.K_buffer[i-1]) + (1 - a)*glm::vec3(fgbuffer.K_buffer[i])), fgbuffer.K_buffer[i-1].w);
}
diffuseColor = glm::vec3(fgbuffer.K_buffer[0]);
specular = 0;
lambertian = 1.0f;
#else
if (fgbuffer.dev_diffuseTex != NULL)
#if Bilinear_Color_Filter_Toggle
diffuseColor = getBilinearFilteredPixelColor(fgbuffer.dev_diffuseTex, fgbuffer.texcoord0, fgbuffer.TexWidth, fgbuffer.TexHeight);
#else
diffuseColor = fgbuffer.color;
#endif
else
diffuseColor = fgbuffer.color;
#endif
glm::vec3 colorLinear = ambientColor + lambertian * diffuseColor + specular * specColor;
framebuffer[index] = colorLinear;
if(renderMode == r_Point || renderMode == r_Line)
framebuffer[index] = diffuseColor;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
hipFree(dev_fragmentBuffer);
hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(int));
#if K_Buffer_Toggle
hipFree(dev_k_buffer);
hipMalloc((void**)&dev_k_buffer, width * height * sizeof(KBuffer4));
#endif
hipFree(dev_mutex);
hipMalloc(&dev_mutex, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
__global__
void initKBuffer4(int w, int h, KBuffer4 * k_buffer)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
k_buffer[index].depths[0] = 1.0f;
k_buffer[index].depths[1] = 1.0f;
k_buffer[index].depths[2] = 1.0f;
k_buffer[index].depths[3] = 1.0f;
}
}
__global__
void initKBufferInFrag(int w, int h, Fragment * fragbuffer)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
fragbuffer[index].K_buffer[0].w = 1.0f;
fragbuffer[index].K_buffer[1].w = 1.0f;
fragbuffer[index].K_buffer[2].w = 1.0f;
fragbuffer[index].K_buffer[3].w = 1.0f;
}
}
/**
* kern function with support for stride to sometimes replace hipMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
}
else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode(
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
hipMalloc(&dev_bufferView, bufferView.byteLength);
hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
hipMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
hipMalloc(&dev_diffuseTex, s);
hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
hipDeviceSynchronize();
// Transform from local to camera
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, hipFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
hipFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
glm::vec4 position = glm::vec4(primitive.dev_position[vid], 1.0f);
glm::vec4 clipVPosition = MVP * position;
// Then divide the pos by its w element to transform into NDC space
clipVPosition /= clipVPosition.w;
// Finally transform x and y to viewport space
clipVPosition.x = 0.5f * (float)width * (clipVPosition.x + 1.0f); // Viewport(Screen / Window) Space
clipVPosition.y = 0.5f * (float)height * (1.0f - clipVPosition.y); // Viewport(Screen / Window) Space
primitive.dev_verticesOut[vid].pos = clipVPosition;
primitive.dev_verticesOut[vid].eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
glm::vec4 eyeSpacePos = (MV * glm::vec4(primitive.dev_position[vid], 1.0f));
eyeSpacePos /= eyeSpacePos.w;
primitive.dev_verticesOut[vid].eyePos = glm::vec3(eyeSpacePos);
if (primitive.dev_diffuseTex != NULL) {
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
}
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
__device__ float cuda_clamp(float input, float min, float max) {
float result = input;
if (result < min)
result = min;
if (result > max)
result = max;
return result;
}
__device__ __host__ float cuda_getPerspectiveCorrectZ(glm::vec3 tri[3], glm::vec3 baryvalue) {
float inverse_Z = baryvalue.x / (tri[0].z + FLT_EPSILON) + baryvalue.y / (tri[1].z + FLT_EPSILON) + baryvalue.z / (tri[2].z + FLT_EPSILON);
return 1.0f / inverse_Z;
}
__device__ __host__ glm::vec2 cuda_getPerspectiveCorrectUV(glm::vec2 tri_uvs[3], glm::vec3 tri[3], glm::vec3 baryvalue, float Z) {
glm::vec2 correct_texcoords = Z * glm::vec2(
baryvalue.x * tri_uvs[0] / (tri[0].z + FLT_EPSILON) +
baryvalue.y * tri_uvs[1] / (tri[1].z + FLT_EPSILON) +
baryvalue.z * tri_uvs[2] / (tri[2].z + FLT_EPSILON));
return correct_texcoords;
}
__device__ __host__ glm::vec3 cuda_getPerspectiveCorrectNormal(glm::vec3 tri_normals[3], glm::vec3 tri[3], glm::vec3 baryvalue, float Z) {
glm::vec3 correct_normal = glm::normalize(Z * glm::vec3(
baryvalue.x * tri_normals[0] / (tri[0].z + FLT_EPSILON) +
baryvalue.y * tri_normals[1] / (tri[1].z + FLT_EPSILON) +
baryvalue.z * tri_normals[2] / (tri[2].z + FLT_EPSILON)));
return correct_normal;
}
__host__ __device__ void naive_sort(glm::vec4 *k_buffer4) {
for (int i = 0; i < 3; i++) {
float min = k_buffer4[i].w;
int n = i;
for (int j = i + 1; j < 4; j++) {
if (k_buffer4[j].w < min) {
n = j;
min = k_buffer4[j].w;
}
}
glm::vec4 temp = k_buffer4[i];
k_buffer4[i] = k_buffer4[n];
k_buffer4[n] = temp;
}
}
// From https://devtalk.nvidia.com/default/topic/492068/atomicmin-with-float/
__device__ static
float fatomicMin(float *addr, float value)
{
float old = *addr, assumed;
if (old <= value) return old;
do {
assumed = old;
old = atomicCAS((unsigned int*)addr, __float_as_int(assumed), __float_as_int(value));
} while (old != assumed);
return old;
}
__global__ void rasterizer(Fragment *fragmentBuffer, Primitive *primitives, int *depth, int num_primitives, int height, int width, int *mutex, KBuffer4* k_buffer) {
// index of primitives
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid < num_primitives) {
Primitive this_primitives = primitives[pid];
#if BackFaceCulling_Toggle
if (glm::dot(this_primitives.v[0].eyeNor, -this_primitives.v[0].eyePos) < 0.0f)
return;
#endif
glm::vec3 tri[3];
//tri[0] = glm::vec3(0.5f - this_primitives.v[0].pos[0] * 3.0f, 0.8f - this_primitives.v[0].pos[1] * 3.0f, this_primitives.v[0].pos[2]);
//tri[1] = glm::vec3(0.5f - this_primitives.v[1].pos[0] * 3.0f, 0.8f - this_primitives.v[1].pos[1] * 3.0f, this_primitives.v[1].pos[2]);
//tri[2] = glm::vec3(0.5f - this_primitives.v[2].pos[0] * 3.0f, 0.8f - this_primitives.v[2].pos[1] * 3.0f, this_primitives.v[2].pos[2]);
tri[0] = glm::vec3(this_primitives.v[0].pos);
tri[1] = glm::vec3(this_primitives.v[1].pos);
tri[2] = glm::vec3(this_primitives.v[2].pos);
AABB bbox;
bbox = getAABBForTriangle(tri);
/*bbox.min.x *= width;
bbox.max.x *= width;
bbox.min.y *= height;
bbox.max.y *= height;*/
//clamp inside of the screen
bbox.min.x = glm::clamp(bbox.min.x, 0.0f, float(width));
bbox.max.x = glm::clamp(bbox.max.x, 0.0f, float(width));
bbox.min.y = glm::clamp(bbox.min.y, 0.0f, float(height));
bbox.max.y = glm::clamp(bbox.max.y, 0.0f, float(height));
//scan the pixels inside of the bbox
for (int i = bbox.min.x; i <= bbox.max.x; i++)
for (int j = bbox.min.y; j <= bbox.max.y; j++) {
glm::vec2 point(i, j);
glm::vec3 baryvalue = calculateBarycentricCoordinate(tri, point);
if (isBarycentricCoordInBounds(baryvalue)) {
int pixel_index = i + j*width;
float ffragDepth;
#if Perspective_Correct_Toggle
ffragDepth = cuda_getPerspectiveCorrectZ(tri, baryvalue);
#else
ffragDepth = baryvalue[0] * this_primitives.v[0].pos[2] + baryvalue[1] * this_primitives.v[1].pos[2] + baryvalue[2] * this_primitives.v[2].pos[2];
#endif
int ifragDepth = INT_MAX * ffragDepth;
#if K_Buffer_Toggle
float maxDepth = -1.0f;
int max_id = 0;
for (int m = 3; m >0; m--) {
if (maxDepth < k_buffer[pixel_index].depths[m]) {
maxDepth = k_buffer[pixel_index].depths[m];
max_id = m;
}
}
if (ffragDepth < maxDepth) {
fatomicMin(&k_buffer[pixel_index].depths[max_id], ffragDepth);
#else
if (ifragDepth < depth[pixel_index]) {
atomicMin(&depth[pixel_index], ifragDepth);
#endif
bool isSet;
do {
isSet = (atomicCAS(&mutex[pixel_index], 0, 1) == 0);
if (isSet) {
// Critical section goes here.
// The critical section MUST be inside the wait loop;
// if it is afterward, a deadlock will occur.
//fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
#if Perspective_Correct_Toggle
glm::vec2 tri_uvs[3];
tri_uvs[0] = this_primitives.v[0].texcoord0;
tri_uvs[1] = this_primitives.v[1].texcoord0;
tri_uvs[2] = this_primitives.v[2].texcoord0;
fragmentBuffer[pixel_index].texcoord0 = cuda_getPerspectiveCorrectUV(tri_uvs, tri, baryvalue, ffragDepth);
//fragmentBuffer[pixel_index].texcoord0 = baryvalue[0] * this_primitives.v[0].texcoord0 + baryvalue[1] * this_primitives.v[1].texcoord0 + baryvalue[2] * this_primitives.v[2].texcoord0;
#else
fragmentBuffer[pixel_index].texcoord0 = baryvalue[0] * this_primitives.v[0].texcoord0 + baryvalue[1] * this_primitives.v[1].texcoord0 + baryvalue[2] * this_primitives.v[2].texcoord0;
#endif
#if Perspective_Correct_Toggle
glm::vec3 tri_normals[3];
tri_normals[0] = this_primitives.v[0].eyeNor;
tri_normals[1] = this_primitives.v[1].eyeNor;
tri_normals[2] = this_primitives.v[2].eyeNor;
fragmentBuffer[pixel_index].eyeNor = cuda_getPerspectiveCorrectNormal(tri_normals, tri, baryvalue, ffragDepth);
#else
fragmentBuffer[pixel_index].eyeNor = baryvalue[0] * this_primitives.v[0].eyeNor + baryvalue[1] * this_primitives.v[1].eyeNor + baryvalue[2] * this_primitives.v[2].eyeNor;
#endif
fragmentBuffer[pixel_index].eyePos = baryvalue[0] * this_primitives.v[0].eyePos + baryvalue[1] * this_primitives.v[1].eyePos + baryvalue[2] * this_primitives.v[2].eyePos;
fragmentBuffer[pixel_index].TexWidth = this_primitives.v[0].texWidth;
fragmentBuffer[pixel_index].TexHeight = this_primitives.v[0].texHeight;
#if K_Buffer_Toggle
if (this_primitives.v[0].dev_diffuseTex != NULL) {
fragmentBuffer[pixel_index].dev_diffuseTex = this_primitives.v[0].dev_diffuseTex;
#if Bilinear_Color_Filter_Toggle
fragmentBuffer[pixel_index].color = getBilinearFilteredPixelColor(this_primitives.v[0].dev_diffuseTex, fragmentBuffer[pixel_index].texcoord0, this_primitives.v[0].texWidth, this_primitives.v[0].texHeight);
#else
fragmentBuffer[pixel_index].color = glm::vec3(1.0f, 1.0f, 1.0f);
#endif
//fragmentBuffer[pixel_index].K_buffer[max_id] = glm::vec4(fragmentBuffer[pixel_index].texcoord0, 0.0f, ffragDepth);
}
else
fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
//K_buffer RBGZ
fragmentBuffer[pixel_index].K_buffer[max_id] = glm::vec4(fragmentBuffer[pixel_index].color, ffragDepth);
//sort fragment k-buffer
#if Naive_Sort_Toggle
naive_sort(fragmentBuffer[pixel_index].K_buffer);
#else
float keys[4] = { fragmentBuffer[pixel_index].K_buffer[0].w, fragmentBuffer[pixel_index].K_buffer[1].w, fragmentBuffer[pixel_index].K_buffer[2].w , fragmentBuffer[pixel_index].K_buffer[3].w };
thrust::sort_by_key(thrust::device, keys, keys + 4, fragmentBuffer[pixel_index].K_buffer);
#endif
for (int m = 0; m < 4; m++) {
k_buffer[pixel_index].depths[m] = fragmentBuffer[pixel_index].K_buffer[m].w;
}
#else
if (this_primitives.v[0].dev_diffuseTex != NULL) {
fragmentBuffer[pixel_index].dev_diffuseTex = this_primitives.v[0].dev_diffuseTex;
fragmentBuffer[pixel_index].color = glm::vec3(1.0f, 1.0f, 1.0f);
//fragmentBuffer[pixel_index].color = getBilinearFilteredPixelColor(this_primitives.v[0].dev_diffuseTex, fragmentBuffer[pixel_index].texcoord0, this_primitives.v[0].texWidth, this_primitives.v[0].texHeight);
}
else
fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
#endif
//k_max_idx[pixel_index] = 3;
}
if (isSet) {
mutex[pixel_index] = 0;
}
} while (!isSet);
}
}
}
}
}
__global__ void rasterizer_Line(Fragment *fragmentBuffer, Primitive *primitives, int *depth, int num_primitives, int height, int width, int *mutex) {
// index of primitives
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid < num_primitives) {
Primitive this_primitives = primitives[pid];
//if (glm::dot(this_primitives.v[0].eyeNor, -this_primitives.v[0].eyePos) < 0.0f)
// return;
//3 edges for each triangle
for (int i = 0; i < 3; i++) {
VertexOut v_outs[2];
v_outs[0] = this_primitives.v[i % 3];
v_outs[1] = this_primitives.v[(i + 1) % 3];
glm::vec3 v_start, v_end;
v_start = glm::vec3(v_outs[0].pos);
v_end = glm::vec3(v_outs[1].pos);
v_start = glm::clamp(v_start, glm::vec3(0, 0, 0), glm::vec3(width, height, 1.0f));
v_end = glm::clamp(v_end, glm::vec3(0, 0, 0), glm::vec3(width, height, 1.0f));
glm::vec3 v_dir = glm::normalize(v_end - v_start);
int j = 0;
while (true) {
glm::vec3 v_curr = v_start + v_dir * float(j);
j++;
if (glm::dot(v_end - v_curr, v_dir) < 0.0f)
break;
int px, py;
px = v_curr.x;
py = v_curr.y;
int pixel_index = px + py*width;
glm::vec2 baryvalue;
baryvalue[0] = glm::length(v_curr - v_start) / glm::length(v_end - v_start);
baryvalue[1] = 1.0f - baryvalue[0];
//Get perspective Correct Z
float ffragDepth = baryvalue[0] / v_start.z + baryvalue[1] / v_end.z;
ffragDepth = 1.0f / ffragDepth;
int ifragDepth = INT_MAX * ffragDepth;
if (ifragDepth < depth[pixel_index]) {
atomicMin(&depth[pixel_index], ifragDepth);
bool isSet;
do {
isSet = (atomicCAS(&mutex[pixel_index], 0, 1) == 0);
if (isSet) {
//TexCoords(Perspective Correct)
fragmentBuffer[pixel_index].texcoord0 = (ffragDepth*(
v_outs[0].texcoord0 * baryvalue[0] / v_outs[0].pos.z +
v_outs[1].texcoord0 * baryvalue[1] / v_outs[1].pos.z
)
);
//Normals(Per. Cor.)
fragmentBuffer[pixel_index].eyeNor = glm::normalize(ffragDepth*(
v_outs[0].eyeNor * baryvalue[0] / v_outs[0].pos.z +
v_outs[1].eyeNor * baryvalue[1] / v_outs[1].pos.z
)
);
//EyePos
fragmentBuffer[pixel_index].eyePos = baryvalue[0] * v_outs[0].eyePos + baryvalue[1] * v_outs[1].eyePos;
//
fragmentBuffer[pixel_index].TexWidth = v_outs[0].texWidth;
fragmentBuffer[pixel_index].TexHeight = v_outs[0].texHeight;
//Tex
if (v_outs[0].dev_diffuseTex != NULL) {
fragmentBuffer[pixel_index].dev_diffuseTex = v_outs[0].dev_diffuseTex;
//fragmentBuffer[pixel_index].color = getBilinearFilteredPixelColor(this_primitives.v[0].dev_diffuseTex, fragmentBuffer[pixel_index].texcoord0, this_primitives.v[0].texWidth, this_primitives.v[0].texHeight);
}
else
fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
}
if (isSet) {
mutex[pixel_index] = 0;
}
} while (!isSet);
}
}
}
}
}
__global__ void rasterizer_Point(Fragment *fragmentBuffer, Primitive *primitives, int *depth, int num_primitives, int height, int width, int *mutex) {
// index of primitives
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid < num_primitives) {
Primitive this_primitives = primitives[pid];
//if (glm::dot(this_primitives.v[0].eyeNor, -this_primitives.v[0].eyePos) < 0.0f)
// return;
//3 points for each triangle
for (int i = 0; i < 3; i++) {
int pixel_index = int(this_primitives.v[i].pos.x) + int(this_primitives.v[i].pos.y) * width;
float ffragDepth = this_primitives.v[i].pos.z;
int ifragDepth = INT_MAX * ffragDepth;
if (ifragDepth < depth[pixel_index]) {
atomicMin(&depth[pixel_index], ifragDepth);
bool isSet;
do {
isSet = (atomicCAS(&mutex[pixel_index], 0, 1) == 0);
if (isSet) {
//TexCoords
fragmentBuffer[pixel_index].texcoord0 = this_primitives.v[i].texcoord0;
//Normals(Per. Cor.)
fragmentBuffer[pixel_index].eyeNor = this_primitives.v[i].eyeNor;
//EyePos
fragmentBuffer[pixel_index].eyePos = this_primitives.v[i].eyePos;
//
fragmentBuffer[pixel_index].TexWidth = this_primitives.v[0].texWidth;
fragmentBuffer[pixel_index].TexHeight = this_primitives.v[0].texHeight;
//Tex
if (this_primitives.v[0].dev_diffuseTex != NULL) {
fragmentBuffer[pixel_index].dev_diffuseTex = this_primitives.v[0].dev_diffuseTex;
//fragmentBuffer[pixel_index].color = getBilinearFilteredPixelColor(this_primitives.v[0].dev_diffuseTex, fragmentBuffer[pixel_index].texcoord0, this_primitives.v[0].texWidth, this_primitives.v[0].texHeight);
}
else
fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
}
if (isSet) {
mutex[pixel_index] = 0;
}
} while (!isSet);
}
}
}
}
struct BackFaceCulling_Cmp {
__host__ __device__ bool operator()(const Primitive &p) {
glm::vec3 face_normal = glm::cross(glm::vec3(p.v[1].pos - p.v[0].pos), glm::vec3(p.v[2].pos - p.v[0].pos));
glm::vec3 inverse_eye_dir = -p.v[0].eyePos;
// 'cause the NDC to pixel mirror the vertices, thus the front face turns to be clockwise
return glm::dot(face_normal, inverse_eye_dir) > 0.0f;
}
};
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
/*float time_elapsed=0;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start,0);
*/
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
hipDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
/*hipEventRecord( stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&time_elapsed,start,stop);
if (counter < 100) {
time_ap += time_elapsed;
}
else if (counter == 100) {
printf("Vertex Process & primitive Assembly: %f ms\n", time_ap);
}*/
int Culled_totalNumPrimitives = totalNumPrimitives;
#if BackFaceCulling_Toggle
Primitive* dev_primitives_end = thrust::remove_if(thrust::device, dev_primitives, dev_primitives + totalNumPrimitives, BackFaceCulling_Cmp());
Culled_totalNumPrimitives = dev_primitives_end - dev_primitives;
if (Culled_totalNumPrimitives <= 0)
Culled_totalNumPrimitives = 1;
#endif
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
#if K_Buffer_Toggle
initKBuffer4 << <blockCount2d, blockSize2d >> > (width, height, dev_k_buffer);
initKBufferInFrag << <blockCount2d, blockSize2d >> > (width, height, dev_fragmentBuffer);
#endif
//hipEventRecord(start, 0);
// TODO: rasterize
hipMemset(dev_mutex, 0, width * height * sizeof(int));
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((Culled_totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
if (curr_Mode == r_Point)
rasterizer_Point << <numBlocksForPrimitives, numThreadsPerBlock >> >(dev_fragmentBuffer, dev_primitives, dev_depth, Culled_totalNumPrimitives, height, width, dev_mutex);
else if (curr_Mode == r_Line)
rasterizer_Line << <numBlocksForPrimitives, numThreadsPerBlock >> >(dev_fragmentBuffer, dev_primitives, dev_depth, Culled_totalNumPrimitives, height, width, dev_mutex);
else if (curr_Mode == r_Triangle)
rasterizer << <numBlocksForPrimitives, numThreadsPerBlock >> >(dev_fragmentBuffer, dev_primitives, dev_depth, Culled_totalNumPrimitives, height, width, dev_mutex, dev_k_buffer);
/*hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&time_elapsed, start, stop);
if (counter < 100) {
time_r += time_elapsed;
}
else if (counter == 100) {
printf("Rasterization: %f ms\n", time_r);
}*/
checkCUDAError("rasterization");
//hipEventRecord(start, 0);
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer, curr_Mode);
/*hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&time_elapsed, start, stop);*/
/*if (counter < 100) {
time_f += time_elapsed;
}
else if (counter == 100) {
printf("Render(Fragment Shader): %f ms\n", time_f);
}*/
//hipEventRecord(start, 0);
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO << <blockCount2d, blockSize2d >> >(pbo, width, height, dev_framebuffer);
//hipEventRecord(stop, 0);
//hipEventSynchronize(start);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&time_elapsed, start, stop);
//if (counter < 100) {
// time_s += time_elapsed;
//}
//else if (counter == 100) {
// printf("SendToPBO: %f ms\n", time_s);
//}
//counter++;
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
hipFree(p->dev_indices);
hipFree(p->dev_position);
hipFree(p->dev_normal);
hipFree(p->dev_texcoord0);
hipFree(p->dev_diffuseTex);
hipFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_depth);
dev_depth = NULL;
#if K_Buffer_Toggle
hipFree(dev_k_buffer);
dev_k_buffer = NULL;
#endif
hipFree(dev_mutex);
dev_mutex = NULL;
checkCUDAError("rasterize Free");
}
| b31fbad805871aadeba1e14d251f56ae38ad96de.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <algorithm>
#define Perspective_Correct_Toggle 1
#define BackFaceCulling_Toggle 1
#define K_Buffer_Toggle 1
#define Bilinear_Color_Filter_Toggle 1
#define Naive_Sort_Toggle 1
#define Alpha_Intensity 0.3f
RenderMode curr_Mode = r_Triangle;
//Tips: You can change the property of the model matrix in main.cpp
// Timer
//int counter = 0;
//float time_ap = 0, time_r = 0, time_f = 0, time_s = 0;
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType {
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
VertexAttributeTexcoord texcoord0;
TextureData* dev_diffuseTex;
// ...
int TexWidth, TexHeight;
glm::vec4 K_buffer[4];
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
struct KBuffer4 {
float depths[4];
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int *dev_mutex = NULL;
static int * dev_depth = NULL; // you might need this buffer when doing depth test
static KBuffer4 *dev_k_buffer = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0.9;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// From Wikipedia: https://en.wikipedia.org/wiki/Bilinear_filtering Part "Sample Code"
__device__ __host__ glm::vec3 getBilinearFilteredPixelColor(TextureData* tex, glm::vec2 uv, int texWidth, int texHeight) {
float u = uv.s * texWidth - 0.5f;
float v = uv.t * texHeight - 0.5f;
int x = glm::floor(u);
int y = glm::floor(v);
float u_ratio = u - x;
float v_ratio = v - y;
float u_opposite = 1 - u_ratio;
float v_opposite = 1 - v_ratio;
int i0 = 3 * (x + y * texWidth);
int i1 = 3 * ((x + 1) + y * texWidth);
int i2 = 3 * (x + (y + 1) * texWidth);
int i3 = 3 * ((x + 1) + (y + 1) * texWidth);
float red = (tex[i0] * u_opposite + tex[i1] * u_ratio) * v_opposite + (tex[i2] * u_opposite + tex[i3] * u_ratio) * v_ratio;
float green = (tex[i0 + 1] * u_opposite + tex[i1 + 1] * u_ratio) * v_opposite + (tex[i2 + 1] * u_opposite + tex[i3 + 1] * u_ratio) * v_ratio;
float blue = (tex[i0 + 2] * u_opposite + tex[i1 + 2] * u_ratio) * v_opposite + (tex[i2 + 2] * u_opposite + tex[i3 + 2] * u_ratio) * v_ratio;
return glm::vec3(red, green, blue) / 255.0f;
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer, RenderMode renderMode) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
//framebuffer[index] = fragmentBuffer[index].color;
Fragment fgbuffer = fragmentBuffer[index];
// TODO: add your fragment shader code here
glm::vec3 normal = fgbuffer.eyeNor;
glm::vec3 lightDir = glm::normalize(glm::vec3(-3.0f, 5.0f, 5.0f) - fgbuffer.eyePos);
float lambertian = glm::clamp(glm::dot(normal, lightDir), 0.0f, 1.0f);
float specular = 0.0f;
if (lambertian > 0.0f) {
glm::vec3 viewDir = glm::normalize(-fgbuffer.eyePos);
//this is blinn phong
glm::vec3 halfDir = glm::normalize(lightDir + viewDir);
float specAngle = glm::clamp(glm::dot(halfDir, normal), 0.0f, 1.0f);
specular = glm::pow(specAngle, 16.0f);
}
glm::vec3 ambientColor = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 diffuseColor;
glm::vec3 specColor = glm::vec3(1.0, 1.0, 1.0);
#if K_Buffer_Toggle
float a = Alpha_Intensity;
/*diffuseColor = a * glm::vec3(fgbuffer.K_buffer[3]);
diffuseColor = a * glm::vec3(fgbuffer.K_buffer[2]) + (1 - a)* diffuseColor;
diffuseColor = a * glm::vec3(fgbuffer.K_buffer[1]) + (1 - a)* diffuseColor;
diffuseColor = a * glm::vec3(fgbuffer.K_buffer[0]) + (1 - a)* diffuseColor;
*/
fgbuffer.K_buffer[3] = glm::vec4((a * glm::vec3(fgbuffer.K_buffer[3]), (1-a)*glm::vec3(0.0f,0.0f,0.0f)),fgbuffer.K_buffer[3].w);
for (int i = 3; i > 0; i--) {
if (fgbuffer.K_buffer[i].w == 1.0f)
fgbuffer.K_buffer[i] = glm::vec4(0.0f,0.0f,0.0f,1.0f);
fgbuffer.K_buffer[i-1] = glm::vec4((a * glm::vec3(fgbuffer.K_buffer[i-1]) + (1 - a)*glm::vec3(fgbuffer.K_buffer[i])), fgbuffer.K_buffer[i-1].w);
}
diffuseColor = glm::vec3(fgbuffer.K_buffer[0]);
specular = 0;
lambertian = 1.0f;
#else
if (fgbuffer.dev_diffuseTex != NULL)
#if Bilinear_Color_Filter_Toggle
diffuseColor = getBilinearFilteredPixelColor(fgbuffer.dev_diffuseTex, fgbuffer.texcoord0, fgbuffer.TexWidth, fgbuffer.TexHeight);
#else
diffuseColor = fgbuffer.color;
#endif
else
diffuseColor = fgbuffer.color;
#endif
glm::vec3 colorLinear = ambientColor + lambertian * diffuseColor + specular * specColor;
framebuffer[index] = colorLinear;
if(renderMode == r_Point || renderMode == r_Line)
framebuffer[index] = diffuseColor;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
cudaFree(dev_fragmentBuffer);
cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(int));
#if K_Buffer_Toggle
cudaFree(dev_k_buffer);
cudaMalloc((void**)&dev_k_buffer, width * height * sizeof(KBuffer4));
#endif
cudaFree(dev_mutex);
cudaMalloc(&dev_mutex, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
__global__
void initKBuffer4(int w, int h, KBuffer4 * k_buffer)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
k_buffer[index].depths[0] = 1.0f;
k_buffer[index].depths[1] = 1.0f;
k_buffer[index].depths[2] = 1.0f;
k_buffer[index].depths[3] = 1.0f;
}
}
__global__
void initKBufferInFrag(int w, int h, Fragment * fragbuffer)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
fragbuffer[index].K_buffer[0].w = 1.0f;
fragbuffer[index].K_buffer[1].w = 1.0f;
fragbuffer[index].K_buffer[2].w = 1.0f;
fragbuffer[index].K_buffer[3].w = 1.0f;
}
}
/**
* kern function with support for stride to sometimes replace cudaMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
}
else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode(
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
cudaMalloc(&dev_bufferView, bufferView.byteLength);
cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
cudaMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
cudaMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
cudaMalloc(&dev_diffuseTex, s);
cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
cudaDeviceSynchronize();
// Transform from local to camera
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, cudaFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
cudaFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
glm::vec4 position = glm::vec4(primitive.dev_position[vid], 1.0f);
glm::vec4 clipVPosition = MVP * position;
// Then divide the pos by its w element to transform into NDC space
clipVPosition /= clipVPosition.w;
// Finally transform x and y to viewport space
clipVPosition.x = 0.5f * (float)width * (clipVPosition.x + 1.0f); // Viewport(Screen / Window) Space
clipVPosition.y = 0.5f * (float)height * (1.0f - clipVPosition.y); // Viewport(Screen / Window) Space
primitive.dev_verticesOut[vid].pos = clipVPosition;
primitive.dev_verticesOut[vid].eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
glm::vec4 eyeSpacePos = (MV * glm::vec4(primitive.dev_position[vid], 1.0f));
eyeSpacePos /= eyeSpacePos.w;
primitive.dev_verticesOut[vid].eyePos = glm::vec3(eyeSpacePos);
if (primitive.dev_diffuseTex != NULL) {
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
}
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
__device__ float cuda_clamp(float input, float min, float max) {
float result = input;
if (result < min)
result = min;
if (result > max)
result = max;
return result;
}
__device__ __host__ float cuda_getPerspectiveCorrectZ(glm::vec3 tri[3], glm::vec3 baryvalue) {
float inverse_Z = baryvalue.x / (tri[0].z + FLT_EPSILON) + baryvalue.y / (tri[1].z + FLT_EPSILON) + baryvalue.z / (tri[2].z + FLT_EPSILON);
return 1.0f / inverse_Z;
}
__device__ __host__ glm::vec2 cuda_getPerspectiveCorrectUV(glm::vec2 tri_uvs[3], glm::vec3 tri[3], glm::vec3 baryvalue, float Z) {
glm::vec2 correct_texcoords = Z * glm::vec2(
baryvalue.x * tri_uvs[0] / (tri[0].z + FLT_EPSILON) +
baryvalue.y * tri_uvs[1] / (tri[1].z + FLT_EPSILON) +
baryvalue.z * tri_uvs[2] / (tri[2].z + FLT_EPSILON));
return correct_texcoords;
}
__device__ __host__ glm::vec3 cuda_getPerspectiveCorrectNormal(glm::vec3 tri_normals[3], glm::vec3 tri[3], glm::vec3 baryvalue, float Z) {
glm::vec3 correct_normal = glm::normalize(Z * glm::vec3(
baryvalue.x * tri_normals[0] / (tri[0].z + FLT_EPSILON) +
baryvalue.y * tri_normals[1] / (tri[1].z + FLT_EPSILON) +
baryvalue.z * tri_normals[2] / (tri[2].z + FLT_EPSILON)));
return correct_normal;
}
__host__ __device__ void naive_sort(glm::vec4 *k_buffer4) {
for (int i = 0; i < 3; i++) {
float min = k_buffer4[i].w;
int n = i;
for (int j = i + 1; j < 4; j++) {
if (k_buffer4[j].w < min) {
n = j;
min = k_buffer4[j].w;
}
}
glm::vec4 temp = k_buffer4[i];
k_buffer4[i] = k_buffer4[n];
k_buffer4[n] = temp;
}
}
// From https://devtalk.nvidia.com/default/topic/492068/atomicmin-with-float/
__device__ static
float fatomicMin(float *addr, float value)
{
float old = *addr, assumed;
if (old <= value) return old;
do {
assumed = old;
old = atomicCAS((unsigned int*)addr, __float_as_int(assumed), __float_as_int(value));
} while (old != assumed);
return old;
}
__global__ void rasterizer(Fragment *fragmentBuffer, Primitive *primitives, int *depth, int num_primitives, int height, int width, int *mutex, KBuffer4* k_buffer) {
// index of primitives
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid < num_primitives) {
Primitive this_primitives = primitives[pid];
#if BackFaceCulling_Toggle
if (glm::dot(this_primitives.v[0].eyeNor, -this_primitives.v[0].eyePos) < 0.0f)
return;
#endif
glm::vec3 tri[3];
//tri[0] = glm::vec3(0.5f - this_primitives.v[0].pos[0] * 3.0f, 0.8f - this_primitives.v[0].pos[1] * 3.0f, this_primitives.v[0].pos[2]);
//tri[1] = glm::vec3(0.5f - this_primitives.v[1].pos[0] * 3.0f, 0.8f - this_primitives.v[1].pos[1] * 3.0f, this_primitives.v[1].pos[2]);
//tri[2] = glm::vec3(0.5f - this_primitives.v[2].pos[0] * 3.0f, 0.8f - this_primitives.v[2].pos[1] * 3.0f, this_primitives.v[2].pos[2]);
tri[0] = glm::vec3(this_primitives.v[0].pos);
tri[1] = glm::vec3(this_primitives.v[1].pos);
tri[2] = glm::vec3(this_primitives.v[2].pos);
AABB bbox;
bbox = getAABBForTriangle(tri);
/*bbox.min.x *= width;
bbox.max.x *= width;
bbox.min.y *= height;
bbox.max.y *= height;*/
//clamp inside of the screen
bbox.min.x = glm::clamp(bbox.min.x, 0.0f, float(width));
bbox.max.x = glm::clamp(bbox.max.x, 0.0f, float(width));
bbox.min.y = glm::clamp(bbox.min.y, 0.0f, float(height));
bbox.max.y = glm::clamp(bbox.max.y, 0.0f, float(height));
//scan the pixels inside of the bbox
for (int i = bbox.min.x; i <= bbox.max.x; i++)
for (int j = bbox.min.y; j <= bbox.max.y; j++) {
glm::vec2 point(i, j);
glm::vec3 baryvalue = calculateBarycentricCoordinate(tri, point);
if (isBarycentricCoordInBounds(baryvalue)) {
int pixel_index = i + j*width;
float ffragDepth;
#if Perspective_Correct_Toggle
ffragDepth = cuda_getPerspectiveCorrectZ(tri, baryvalue);
#else
ffragDepth = baryvalue[0] * this_primitives.v[0].pos[2] + baryvalue[1] * this_primitives.v[1].pos[2] + baryvalue[2] * this_primitives.v[2].pos[2];
#endif
int ifragDepth = INT_MAX * ffragDepth;
#if K_Buffer_Toggle
float maxDepth = -1.0f;
int max_id = 0;
for (int m = 3; m >0; m--) {
if (maxDepth < k_buffer[pixel_index].depths[m]) {
maxDepth = k_buffer[pixel_index].depths[m];
max_id = m;
}
}
if (ffragDepth < maxDepth) {
fatomicMin(&k_buffer[pixel_index].depths[max_id], ffragDepth);
#else
if (ifragDepth < depth[pixel_index]) {
atomicMin(&depth[pixel_index], ifragDepth);
#endif
bool isSet;
do {
isSet = (atomicCAS(&mutex[pixel_index], 0, 1) == 0);
if (isSet) {
// Critical section goes here.
// The critical section MUST be inside the wait loop;
// if it is afterward, a deadlock will occur.
//fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
#if Perspective_Correct_Toggle
glm::vec2 tri_uvs[3];
tri_uvs[0] = this_primitives.v[0].texcoord0;
tri_uvs[1] = this_primitives.v[1].texcoord0;
tri_uvs[2] = this_primitives.v[2].texcoord0;
fragmentBuffer[pixel_index].texcoord0 = cuda_getPerspectiveCorrectUV(tri_uvs, tri, baryvalue, ffragDepth);
//fragmentBuffer[pixel_index].texcoord0 = baryvalue[0] * this_primitives.v[0].texcoord0 + baryvalue[1] * this_primitives.v[1].texcoord0 + baryvalue[2] * this_primitives.v[2].texcoord0;
#else
fragmentBuffer[pixel_index].texcoord0 = baryvalue[0] * this_primitives.v[0].texcoord0 + baryvalue[1] * this_primitives.v[1].texcoord0 + baryvalue[2] * this_primitives.v[2].texcoord0;
#endif
#if Perspective_Correct_Toggle
glm::vec3 tri_normals[3];
tri_normals[0] = this_primitives.v[0].eyeNor;
tri_normals[1] = this_primitives.v[1].eyeNor;
tri_normals[2] = this_primitives.v[2].eyeNor;
fragmentBuffer[pixel_index].eyeNor = cuda_getPerspectiveCorrectNormal(tri_normals, tri, baryvalue, ffragDepth);
#else
fragmentBuffer[pixel_index].eyeNor = baryvalue[0] * this_primitives.v[0].eyeNor + baryvalue[1] * this_primitives.v[1].eyeNor + baryvalue[2] * this_primitives.v[2].eyeNor;
#endif
fragmentBuffer[pixel_index].eyePos = baryvalue[0] * this_primitives.v[0].eyePos + baryvalue[1] * this_primitives.v[1].eyePos + baryvalue[2] * this_primitives.v[2].eyePos;
fragmentBuffer[pixel_index].TexWidth = this_primitives.v[0].texWidth;
fragmentBuffer[pixel_index].TexHeight = this_primitives.v[0].texHeight;
#if K_Buffer_Toggle
if (this_primitives.v[0].dev_diffuseTex != NULL) {
fragmentBuffer[pixel_index].dev_diffuseTex = this_primitives.v[0].dev_diffuseTex;
#if Bilinear_Color_Filter_Toggle
fragmentBuffer[pixel_index].color = getBilinearFilteredPixelColor(this_primitives.v[0].dev_diffuseTex, fragmentBuffer[pixel_index].texcoord0, this_primitives.v[0].texWidth, this_primitives.v[0].texHeight);
#else
fragmentBuffer[pixel_index].color = glm::vec3(1.0f, 1.0f, 1.0f);
#endif
//fragmentBuffer[pixel_index].K_buffer[max_id] = glm::vec4(fragmentBuffer[pixel_index].texcoord0, 0.0f, ffragDepth);
}
else
fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
//K_buffer RBGZ
fragmentBuffer[pixel_index].K_buffer[max_id] = glm::vec4(fragmentBuffer[pixel_index].color, ffragDepth);
//sort fragment k-buffer
#if Naive_Sort_Toggle
naive_sort(fragmentBuffer[pixel_index].K_buffer);
#else
float keys[4] = { fragmentBuffer[pixel_index].K_buffer[0].w, fragmentBuffer[pixel_index].K_buffer[1].w, fragmentBuffer[pixel_index].K_buffer[2].w , fragmentBuffer[pixel_index].K_buffer[3].w };
thrust::sort_by_key(thrust::device, keys, keys + 4, fragmentBuffer[pixel_index].K_buffer);
#endif
for (int m = 0; m < 4; m++) {
k_buffer[pixel_index].depths[m] = fragmentBuffer[pixel_index].K_buffer[m].w;
}
#else
if (this_primitives.v[0].dev_diffuseTex != NULL) {
fragmentBuffer[pixel_index].dev_diffuseTex = this_primitives.v[0].dev_diffuseTex;
fragmentBuffer[pixel_index].color = glm::vec3(1.0f, 1.0f, 1.0f);
//fragmentBuffer[pixel_index].color = getBilinearFilteredPixelColor(this_primitives.v[0].dev_diffuseTex, fragmentBuffer[pixel_index].texcoord0, this_primitives.v[0].texWidth, this_primitives.v[0].texHeight);
}
else
fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
#endif
//k_max_idx[pixel_index] = 3;
}
if (isSet) {
mutex[pixel_index] = 0;
}
} while (!isSet);
}
}
}
}
}
__global__ void rasterizer_Line(Fragment *fragmentBuffer, Primitive *primitives, int *depth, int num_primitives, int height, int width, int *mutex) {
// index of primitives
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid < num_primitives) {
Primitive this_primitives = primitives[pid];
//if (glm::dot(this_primitives.v[0].eyeNor, -this_primitives.v[0].eyePos) < 0.0f)
// return;
//3 edges for each triangle
for (int i = 0; i < 3; i++) {
VertexOut v_outs[2];
v_outs[0] = this_primitives.v[i % 3];
v_outs[1] = this_primitives.v[(i + 1) % 3];
glm::vec3 v_start, v_end;
v_start = glm::vec3(v_outs[0].pos);
v_end = glm::vec3(v_outs[1].pos);
v_start = glm::clamp(v_start, glm::vec3(0, 0, 0), glm::vec3(width, height, 1.0f));
v_end = glm::clamp(v_end, glm::vec3(0, 0, 0), glm::vec3(width, height, 1.0f));
glm::vec3 v_dir = glm::normalize(v_end - v_start);
int j = 0;
while (true) {
glm::vec3 v_curr = v_start + v_dir * float(j);
j++;
if (glm::dot(v_end - v_curr, v_dir) < 0.0f)
break;
int px, py;
px = v_curr.x;
py = v_curr.y;
int pixel_index = px + py*width;
glm::vec2 baryvalue;
baryvalue[0] = glm::length(v_curr - v_start) / glm::length(v_end - v_start);
baryvalue[1] = 1.0f - baryvalue[0];
//Get perspective Correct Z
float ffragDepth = baryvalue[0] / v_start.z + baryvalue[1] / v_end.z;
ffragDepth = 1.0f / ffragDepth;
int ifragDepth = INT_MAX * ffragDepth;
if (ifragDepth < depth[pixel_index]) {
atomicMin(&depth[pixel_index], ifragDepth);
bool isSet;
do {
isSet = (atomicCAS(&mutex[pixel_index], 0, 1) == 0);
if (isSet) {
//TexCoords(Perspective Correct)
fragmentBuffer[pixel_index].texcoord0 = (ffragDepth*(
v_outs[0].texcoord0 * baryvalue[0] / v_outs[0].pos.z +
v_outs[1].texcoord0 * baryvalue[1] / v_outs[1].pos.z
)
);
//Normals(Per. Cor.)
fragmentBuffer[pixel_index].eyeNor = glm::normalize(ffragDepth*(
v_outs[0].eyeNor * baryvalue[0] / v_outs[0].pos.z +
v_outs[1].eyeNor * baryvalue[1] / v_outs[1].pos.z
)
);
//EyePos
fragmentBuffer[pixel_index].eyePos = baryvalue[0] * v_outs[0].eyePos + baryvalue[1] * v_outs[1].eyePos;
//
fragmentBuffer[pixel_index].TexWidth = v_outs[0].texWidth;
fragmentBuffer[pixel_index].TexHeight = v_outs[0].texHeight;
//Tex
if (v_outs[0].dev_diffuseTex != NULL) {
fragmentBuffer[pixel_index].dev_diffuseTex = v_outs[0].dev_diffuseTex;
//fragmentBuffer[pixel_index].color = getBilinearFilteredPixelColor(this_primitives.v[0].dev_diffuseTex, fragmentBuffer[pixel_index].texcoord0, this_primitives.v[0].texWidth, this_primitives.v[0].texHeight);
}
else
fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
}
if (isSet) {
mutex[pixel_index] = 0;
}
} while (!isSet);
}
}
}
}
}
__global__ void rasterizer_Point(Fragment *fragmentBuffer, Primitive *primitives, int *depth, int num_primitives, int height, int width, int *mutex) {
// index of primitives
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid < num_primitives) {
Primitive this_primitives = primitives[pid];
//if (glm::dot(this_primitives.v[0].eyeNor, -this_primitives.v[0].eyePos) < 0.0f)
// return;
//3 points for each triangle
for (int i = 0; i < 3; i++) {
int pixel_index = int(this_primitives.v[i].pos.x) + int(this_primitives.v[i].pos.y) * width;
float ffragDepth = this_primitives.v[i].pos.z;
int ifragDepth = INT_MAX * ffragDepth;
if (ifragDepth < depth[pixel_index]) {
atomicMin(&depth[pixel_index], ifragDepth);
bool isSet;
do {
isSet = (atomicCAS(&mutex[pixel_index], 0, 1) == 0);
if (isSet) {
//TexCoords
fragmentBuffer[pixel_index].texcoord0 = this_primitives.v[i].texcoord0;
//Normals(Per. Cor.)
fragmentBuffer[pixel_index].eyeNor = this_primitives.v[i].eyeNor;
//EyePos
fragmentBuffer[pixel_index].eyePos = this_primitives.v[i].eyePos;
//
fragmentBuffer[pixel_index].TexWidth = this_primitives.v[0].texWidth;
fragmentBuffer[pixel_index].TexHeight = this_primitives.v[0].texHeight;
//Tex
if (this_primitives.v[0].dev_diffuseTex != NULL) {
fragmentBuffer[pixel_index].dev_diffuseTex = this_primitives.v[0].dev_diffuseTex;
//fragmentBuffer[pixel_index].color = getBilinearFilteredPixelColor(this_primitives.v[0].dev_diffuseTex, fragmentBuffer[pixel_index].texcoord0, this_primitives.v[0].texWidth, this_primitives.v[0].texHeight);
}
else
fragmentBuffer[pixel_index].color = glm::vec3(1, 1, 1);
}
if (isSet) {
mutex[pixel_index] = 0;
}
} while (!isSet);
}
}
}
}
struct BackFaceCulling_Cmp {
__host__ __device__ bool operator()(const Primitive &p) {
glm::vec3 face_normal = glm::cross(glm::vec3(p.v[1].pos - p.v[0].pos), glm::vec3(p.v[2].pos - p.v[0].pos));
glm::vec3 inverse_eye_dir = -p.v[0].eyePos;
// 'cause the NDC to pixel mirror the vertices, thus the front face turns to be clockwise
return glm::dot(face_normal, inverse_eye_dir) > 0.0f;
}
};
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
/*float time_elapsed=0;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start,0);
*/
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
cudaDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
/*cudaEventRecord( stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_elapsed,start,stop);
if (counter < 100) {
time_ap += time_elapsed;
}
else if (counter == 100) {
printf("Vertex Process & primitive Assembly: %f ms\n", time_ap);
}*/
int Culled_totalNumPrimitives = totalNumPrimitives;
#if BackFaceCulling_Toggle
Primitive* dev_primitives_end = thrust::remove_if(thrust::device, dev_primitives, dev_primitives + totalNumPrimitives, BackFaceCulling_Cmp());
Culled_totalNumPrimitives = dev_primitives_end - dev_primitives;
if (Culled_totalNumPrimitives <= 0)
Culled_totalNumPrimitives = 1;
#endif
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
#if K_Buffer_Toggle
initKBuffer4 << <blockCount2d, blockSize2d >> > (width, height, dev_k_buffer);
initKBufferInFrag << <blockCount2d, blockSize2d >> > (width, height, dev_fragmentBuffer);
#endif
//cudaEventRecord(start, 0);
// TODO: rasterize
cudaMemset(dev_mutex, 0, width * height * sizeof(int));
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((Culled_totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
if (curr_Mode == r_Point)
rasterizer_Point << <numBlocksForPrimitives, numThreadsPerBlock >> >(dev_fragmentBuffer, dev_primitives, dev_depth, Culled_totalNumPrimitives, height, width, dev_mutex);
else if (curr_Mode == r_Line)
rasterizer_Line << <numBlocksForPrimitives, numThreadsPerBlock >> >(dev_fragmentBuffer, dev_primitives, dev_depth, Culled_totalNumPrimitives, height, width, dev_mutex);
else if (curr_Mode == r_Triangle)
rasterizer << <numBlocksForPrimitives, numThreadsPerBlock >> >(dev_fragmentBuffer, dev_primitives, dev_depth, Culled_totalNumPrimitives, height, width, dev_mutex, dev_k_buffer);
/*cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_elapsed, start, stop);
if (counter < 100) {
time_r += time_elapsed;
}
else if (counter == 100) {
printf("Rasterization: %f ms\n", time_r);
}*/
checkCUDAError("rasterization");
//cudaEventRecord(start, 0);
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer, curr_Mode);
/*cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_elapsed, start, stop);*/
/*if (counter < 100) {
time_f += time_elapsed;
}
else if (counter == 100) {
printf("Render(Fragment Shader): %f ms\n", time_f);
}*/
//cudaEventRecord(start, 0);
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO << <blockCount2d, blockSize2d >> >(pbo, width, height, dev_framebuffer);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(start);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&time_elapsed, start, stop);
//if (counter < 100) {
// time_s += time_elapsed;
//}
//else if (counter == 100) {
// printf("SendToPBO: %f ms\n", time_s);
//}
//counter++;
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
cudaFree(p->dev_indices);
cudaFree(p->dev_position);
cudaFree(p->dev_normal);
cudaFree(p->dev_texcoord0);
cudaFree(p->dev_diffuseTex);
cudaFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_depth);
dev_depth = NULL;
#if K_Buffer_Toggle
cudaFree(dev_k_buffer);
dev_k_buffer = NULL;
#endif
cudaFree(dev_mutex);
dev_mutex = NULL;
checkCUDAError("rasterize Free");
}
|
66ee8d91769a94edc5b337cba6b03cb200fb081d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace crfasrnn_caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_SUM:
caffe_gpu_set(count, Dtype(0.), top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 0; i < bottom.size(); ++i) {
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
break;
case EltwiseParameter_EltwiseOp_SUM:
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, i, mask, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer);
} // namespace crfasrnn_caffe
| 66ee8d91769a94edc5b337cba6b03cb200fb081d.cu | #include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace crfasrnn_caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_SUM:
caffe_gpu_set(count, Dtype(0.), top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 0; i < bottom.size(); ++i) {
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
break;
case EltwiseParameter_EltwiseOp_SUM:
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, i, mask, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer);
} // namespace crfasrnn_caffe
|
dc01e29dc7a05cc0846ed2d5a31c265741da42bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.cuh"
__global__ void filtering(
float* patches, int patch_size, float filt_sigma, float* noise_image, const int total_pixels, float* filtered_image)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int total_patch_size = patch_size * patch_size;
extern __shared__ float s[];
float* patches_self = s;
float* patches_sub = (float*)&patches_self[blockDim.x * total_patch_size];
// grid stride loop
for (int pixel = tid; pixel < total_pixels; pixel += stride) {
// the patch that correspond to the pixel we want to filter that its patch will be compared with all the others
for (int i = 0; i < total_patch_size; i++) {
patches_self[threadIdx.x * total_patch_size + i] = patches[pixel * total_patch_size + i];
}
//__syncthreads();
float weight = 0;
float max = -1.0;
float sum_weights = 0;
// making use of register memory
float filtered_value = 0;
for (int i = 0; i < total_pixels / blockDim.x; i++) {
// each thread per block copy a patch to shared memory
for (int e = 0; e < total_patch_size; e++) {
patches_sub[threadIdx.x * total_patch_size + e] =
patches[(threadIdx.x + i * blockDim.x) * total_patch_size + e];
}
__syncthreads();
// each thread per block calculate the weights
for (int j = 0; j < blockDim.x; j++) {
weight = euclidean_distance_patch(
patches_self + (threadIdx.x) * total_patch_size, patches_sub + j * total_patch_size, patch_size);
weight = exp(-(weight * weight) / filt_sigma);
max = (weight > max && (i * blockDim.x + j) != pixel) ? weight : max;
sum_weights += weight;
float noise_pixel = *(patches_sub + j * total_patch_size + total_patch_size / 2);
filtered_value += weight * noise_pixel;
}
__syncthreads();
}
// neglect the weight of self distance
sum_weights -= 1;
sum_weights += max;
float noise_pixel_self = *(patches_self + threadIdx.x * total_patch_size + total_patch_size / 2);
filtered_value -= noise_pixel_self;
filtered_value += max * noise_pixel_self;
filtered_value /= sum_weights;
filtered_image[pixel] = filtered_value;
}
}
// take two patches and calculate their distance
__device__ float euclidean_distance_patch(float* patch1, float* patch2, int patch_size)
{
int total_patch_size = patch_size * patch_size;
float distance = 0;
for (int i = 0; i < total_patch_size; i++) {
float temp = patch1[i] - patch2[i];
distance += temp * temp; // kudos to student Christos Pavlidis, I didn't notice bad performance using pow()
}
return sqrt(distance);
}
| dc01e29dc7a05cc0846ed2d5a31c265741da42bb.cu | #include "utils.cuh"
__global__ void filtering(
float* patches, int patch_size, float filt_sigma, float* noise_image, const int total_pixels, float* filtered_image)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int total_patch_size = patch_size * patch_size;
extern __shared__ float s[];
float* patches_self = s;
float* patches_sub = (float*)&patches_self[blockDim.x * total_patch_size];
// grid stride loop
for (int pixel = tid; pixel < total_pixels; pixel += stride) {
// the patch that correspond to the pixel we want to filter that its patch will be compared with all the others
for (int i = 0; i < total_patch_size; i++) {
patches_self[threadIdx.x * total_patch_size + i] = patches[pixel * total_patch_size + i];
}
//__syncthreads();
float weight = 0;
float max = -1.0;
float sum_weights = 0;
// making use of register memory
float filtered_value = 0;
for (int i = 0; i < total_pixels / blockDim.x; i++) {
// each thread per block copy a patch to shared memory
for (int e = 0; e < total_patch_size; e++) {
patches_sub[threadIdx.x * total_patch_size + e] =
patches[(threadIdx.x + i * blockDim.x) * total_patch_size + e];
}
__syncthreads();
// each thread per block calculate the weights
for (int j = 0; j < blockDim.x; j++) {
weight = euclidean_distance_patch(
patches_self + (threadIdx.x) * total_patch_size, patches_sub + j * total_patch_size, patch_size);
weight = exp(-(weight * weight) / filt_sigma);
max = (weight > max && (i * blockDim.x + j) != pixel) ? weight : max;
sum_weights += weight;
float noise_pixel = *(patches_sub + j * total_patch_size + total_patch_size / 2);
filtered_value += weight * noise_pixel;
}
__syncthreads();
}
// neglect the weight of self distance
sum_weights -= 1;
sum_weights += max;
float noise_pixel_self = *(patches_self + threadIdx.x * total_patch_size + total_patch_size / 2);
filtered_value -= noise_pixel_self;
filtered_value += max * noise_pixel_self;
filtered_value /= sum_weights;
filtered_image[pixel] = filtered_value;
}
}
// take two patches and calculate their distance
__device__ float euclidean_distance_patch(float* patch1, float* patch2, int patch_size)
{
int total_patch_size = patch_size * patch_size;
float distance = 0;
for (int i = 0; i < total_patch_size; i++) {
float temp = patch1[i] - patch2[i];
distance += temp * temp; // kudos to student Christos Pavlidis, I didn't notice bad performance using pow()
}
return sqrt(distance);
}
|
c5a35df87c198a1f9435db254e4dafbb79f8c444.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/image/transform_gpu.h"
#include "caffe2/utils/conversions.h"
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
* Distributed under 2-clause BSD license; see accompanying LICENSE file
*
**/
namespace caffe2 {
namespace {
// input in (int8, NHWC), output in (fp32, NCHW)
template <typename In, typename Out>
__global__ void transform_kernel(
const int N,
const int C,
const int H,
const int W,
const float* mean,
const float* std,
const In* in,
Out* out) {
const int n = blockIdx.x;
const int nStride = C*H*W;
// pointers to data for this image
const In* input_ptr = &in[n*nStride];
Out* output_ptr = &out[n*nStride];
// either read or write uncoalesced - try reading
for (int c=0; c < C; ++c) {
for (int h=threadIdx.y; h < H; h += blockDim.y) {
for (int w=threadIdx.x; w < W; w += blockDim.x) {
int in_idx = c + C*w + C*W*h; // HWC
int out_idx = c*H*W + h*W + w; // CHW
output_ptr[out_idx] = convert::To<float,Out>(
(convert::To<In,float>(input_ptr[in_idx])-mean[c]) * std[c]);
}
}
}
}
}
template <typename T_IN, typename T_OUT, class Context>
bool TransformOnGPU(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
Context* context) {
// data comes in as NHWC
const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);
// data goes out as NCHW
Y->Resize(std::vector<int>{N,C,H,W});
auto* input_data = X.template data<T_IN>();
auto* output_data = Y->template mutable_data<T_OUT>();
hipLaunchKernelGGL(( transform_kernel<
T_IN, T_OUT>), dim3(N), dim3(dim3(16, 16)), 0, context->cuda_stream(),
N, C, H, W, mean.template data<float>(), std.template data<float>(),
input_data, output_data);
return true;
};
template bool TransformOnGPU<uint8_t, float, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
template bool TransformOnGPU<uint8_t, at::Half, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
} // namespace caffe2
| c5a35df87c198a1f9435db254e4dafbb79f8c444.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/image/transform_gpu.h"
#include "caffe2/utils/conversions.h"
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
* Distributed under 2-clause BSD license; see accompanying LICENSE file
*
**/
namespace caffe2 {
namespace {
// input in (int8, NHWC), output in (fp32, NCHW)
template <typename In, typename Out>
__global__ void transform_kernel(
const int N,
const int C,
const int H,
const int W,
const float* mean,
const float* std,
const In* in,
Out* out) {
const int n = blockIdx.x;
const int nStride = C*H*W;
// pointers to data for this image
const In* input_ptr = &in[n*nStride];
Out* output_ptr = &out[n*nStride];
// either read or write uncoalesced - try reading
for (int c=0; c < C; ++c) {
for (int h=threadIdx.y; h < H; h += blockDim.y) {
for (int w=threadIdx.x; w < W; w += blockDim.x) {
int in_idx = c + C*w + C*W*h; // HWC
int out_idx = c*H*W + h*W + w; // CHW
output_ptr[out_idx] = convert::To<float,Out>(
(convert::To<In,float>(input_ptr[in_idx])-mean[c]) * std[c]);
}
}
}
}
}
template <typename T_IN, typename T_OUT, class Context>
bool TransformOnGPU(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
Context* context) {
// data comes in as NHWC
const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);
// data goes out as NCHW
Y->Resize(std::vector<int>{N,C,H,W});
auto* input_data = X.template data<T_IN>();
auto* output_data = Y->template mutable_data<T_OUT>();
transform_kernel<
T_IN, T_OUT><<<N, dim3(16, 16), 0, context->cuda_stream()>>>(
N, C, H, W, mean.template data<float>(), std.template data<float>(),
input_data, output_data);
return true;
};
template bool TransformOnGPU<uint8_t, float, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
template bool TransformOnGPU<uint8_t, at::Half, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
} // namespace caffe2
|
14c4eb8906715baefc53dfb03653674aac78c26d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include "headers/dsingle.h"
#include "error_functions.cu"
// Addition
// Algorithm 6 from Tight and rigourous error bounds. relative error < 3u
__device__ dsingle operator+(dsingle a, dsingle b){
float hi, lo,thi, tlo;
// perform exact addition, with lo and tlo being the error term.
two_sum(a.hi(), b.hi(),hi,lo);
two_sum(a.lo(), b.lo(),thi,tlo);
lo = lo + thi;
quick_two_sum(hi,lo,hi,lo);
lo = lo + tlo;
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator+(dsingle a, float b){
float hi, lo;
// perform exact addition
two_sum(a.hi(),b,a.lo(),hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator+(float a, dsingle b){
float hi, lo;
// perform exact addition
two_sum(a,b.hi(),b.lo(),hi,lo);
return dsingle(hi,lo);
}
// Subtraction
__device__ dsingle operator-(dsingle a, dsingle b){
float hi, lo;
two_diff(a.hi(), b.hi(),hi,lo);
float thi, tlo;
two_diff(a.lo(), b.lo(),thi,tlo);
lo = lo + thi;
quick_two_sum(hi,lo,hi,lo);
lo = lo + tlo;
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator-(dsingle a, float b){
float hi, lo;
two_sum(a.hi(),a.lo(),-b,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator-(float a, dsingle b){
float hi, lo;
two_diff(a,b.hi(),b.lo(),hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator-(dsingle a){
return dsingle(-a.hi(),-a.lo());
}
// Multiplication
__device__ dsingle operator*(dsingle a, dsingle b){
float hi, lo;
two_prod(a.hi(), b.hi(),hi,lo);
//float t = a.lo() * b.lo();
//t = fmaf(a.hi(),b.lo(),t);
//t = fmaf(a.lo(),b.hi(),t);
//lo = lo + t;
lo = fmaf(a.lo(),b.lo(),lo);
lo = fmaf(a.hi(),b.lo(),lo);
lo = fmaf(a.lo(),b.hi(),lo);
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator*(dsingle a, float b){
float hi, lo;
two_prod(a.hi(), b,hi,lo);
lo = fmaf(a.lo(), b, lo);
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator*(float a, dsingle b){
return (b * a);
}
// Division
__device__ dsingle operator/(dsingle a, dsingle b){
float hi, lo;
hi = a.hi()/b.hi();
float thi, tlo;
two_prod(hi,b.hi(),thi,tlo);
//lo = ((((a.hi() - thi) - tlo) + a.lo()) - hi*b.lo() ) / b.hi();
//lo = fmaf(-hi,b.lo(),(((a.hi() - thi) - tlo) + a.lo())) / b.hi();
lo = fmaf(-hi,b.lo(),(a.hi() - thi) + (a.lo() - tlo )) / b.hi();
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator/(dsingle a, float b){
float hi, lo;
hi = a.hi()/b;
float thi, tlo;
two_prod(hi,b,thi,tlo);
lo = (((a.hi() - thi) - tlo) + a.lo())/b;
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator/(float a, dsingle b){
float hi, lo;
hi = a/b.hi();
float thi, tlo;
two_prod(hi,b.hi(),thi,tlo);
//lo = ( ((a - thi) - tlo) - hi*b.lo() )/b.hi();
lo = fmaf(-hi,b.lo(),((a - thi) - tlo))/b.hi();
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
/*
__global__ void addCUDA(dsingle *a, dsingle *b, dsingle *c){
*c = *a / *b;
}
template<typename T, typename U>
void test_addition(T const& x , U const& y){
printf("Testing extended_add\n");
dsingle a, b, c;
a = dsingle(x);
b = dsingle(y);
dsingle *da, *db, *dc;
hipMalloc((void **)&da,sizeof(dsingle));
hipMalloc((void **)&db,sizeof(dsingle));
hipMalloc((void **)&dc,sizeof(dsingle));
hipMemcpy(da, &a, sizeof(dsingle),hipMemcpyHostToDevice);
hipMemcpy(db, &b, sizeof(dsingle),hipMemcpyHostToDevice);
addCUDA<<<30,32>>>(da,db,dc);
hipMemcpy(&c, dc, sizeof(dsingle),hipMemcpyDeviceToHost);
double truev = x / y;
printf("%.16f : native double\n",truev);
printf("%.16f : extended\n", c.evaluate());
printf("%.16f : diffrence\n", c.evaluate() - truev);
printf("---\n");
hipFree( da );
hipFree( db );
hipFree( dc );
}
int main(int argc, char const *argv[])
{
test_addition((1.0/3),(2.0f));
return 0;
}*/ | 14c4eb8906715baefc53dfb03653674aac78c26d.cu | #include <stdio.h>
#include <math.h>
#include "headers/dsingle.h"
#include "error_functions.cu"
// Addition
// Algorithm 6 from Tight and rigourous error bounds. relative error < 3u²
__device__ dsingle operator+(dsingle a, dsingle b){
float hi, lo,thi, tlo;
// perform exact addition, with lo and tlo being the error term.
two_sum(a.hi(), b.hi(),hi,lo);
two_sum(a.lo(), b.lo(),thi,tlo);
lo = lo + thi;
quick_two_sum(hi,lo,hi,lo);
lo = lo + tlo;
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator+(dsingle a, float b){
float hi, lo;
// perform exact addition
two_sum(a.hi(),b,a.lo(),hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator+(float a, dsingle b){
float hi, lo;
// perform exact addition
two_sum(a,b.hi(),b.lo(),hi,lo);
return dsingle(hi,lo);
}
// Subtraction
__device__ dsingle operator-(dsingle a, dsingle b){
float hi, lo;
two_diff(a.hi(), b.hi(),hi,lo);
float thi, tlo;
two_diff(a.lo(), b.lo(),thi,tlo);
lo = lo + thi;
quick_two_sum(hi,lo,hi,lo);
lo = lo + tlo;
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator-(dsingle a, float b){
float hi, lo;
two_sum(a.hi(),a.lo(),-b,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator-(float a, dsingle b){
float hi, lo;
two_diff(a,b.hi(),b.lo(),hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator-(dsingle a){
return dsingle(-a.hi(),-a.lo());
}
// Multiplication
__device__ dsingle operator*(dsingle a, dsingle b){
float hi, lo;
two_prod(a.hi(), b.hi(),hi,lo);
//float t = a.lo() * b.lo();
//t = fmaf(a.hi(),b.lo(),t);
//t = fmaf(a.lo(),b.hi(),t);
//lo = lo + t;
lo = fmaf(a.lo(),b.lo(),lo);
lo = fmaf(a.hi(),b.lo(),lo);
lo = fmaf(a.lo(),b.hi(),lo);
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator*(dsingle a, float b){
float hi, lo;
two_prod(a.hi(), b,hi,lo);
lo = fmaf(a.lo(), b, lo);
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator*(float a, dsingle b){
return (b * a);
}
// Division
__device__ dsingle operator/(dsingle a, dsingle b){
float hi, lo;
hi = a.hi()/b.hi();
float thi, tlo;
two_prod(hi,b.hi(),thi,tlo);
//lo = ((((a.hi() - thi) - tlo) + a.lo()) - hi*b.lo() ) / b.hi();
//lo = fmaf(-hi,b.lo(),(((a.hi() - thi) - tlo) + a.lo())) / b.hi();
lo = fmaf(-hi,b.lo(),(a.hi() - thi) + (a.lo() - tlo )) / b.hi();
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator/(dsingle a, float b){
float hi, lo;
hi = a.hi()/b;
float thi, tlo;
two_prod(hi,b,thi,tlo);
lo = (((a.hi() - thi) - tlo) + a.lo())/b;
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
__device__ dsingle operator/(float a, dsingle b){
float hi, lo;
hi = a/b.hi();
float thi, tlo;
two_prod(hi,b.hi(),thi,tlo);
//lo = ( ((a - thi) - tlo) - hi*b.lo() )/b.hi();
lo = fmaf(-hi,b.lo(),((a - thi) - tlo))/b.hi();
quick_two_sum(hi,lo,hi,lo);
return dsingle(hi,lo);
}
/*
__global__ void addCUDA(dsingle *a, dsingle *b, dsingle *c){
*c = *a / *b;
}
template<typename T, typename U>
void test_addition(T const& x , U const& y){
printf("Testing extended_add\n");
dsingle a, b, c;
a = dsingle(x);
b = dsingle(y);
dsingle *da, *db, *dc;
cudaMalloc((void **)&da,sizeof(dsingle));
cudaMalloc((void **)&db,sizeof(dsingle));
cudaMalloc((void **)&dc,sizeof(dsingle));
cudaMemcpy(da, &a, sizeof(dsingle),cudaMemcpyHostToDevice);
cudaMemcpy(db, &b, sizeof(dsingle),cudaMemcpyHostToDevice);
addCUDA<<<30,32>>>(da,db,dc);
cudaMemcpy(&c, dc, sizeof(dsingle),cudaMemcpyDeviceToHost);
double truev = x / y;
printf("%.16f : native double\n",truev);
printf("%.16f : extended\n", c.evaluate());
printf("%.16f : diffrence\n", c.evaluate() - truev);
printf("---\n");
cudaFree( da );
cudaFree( db );
cudaFree( dc );
}
int main(int argc, char const *argv[])
{
test_addition((1.0/3),(2.0f));
return 0;
}*/ |
f5383b98fcae78967dda62320e762e6f689203ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Fast Lee Hologram computation using CUDA
Programmed by Shay Ohayon
DiCarlo Lab @ MIT
Revision History
Version 0.1 10/22/2014
*/
#include <stdio.h>
#include "mex.h"
#include <Windows.h>
#include <math.h>
#define MIN(a,b) (a)<(b)?(a):(b)
#define M_PI 3.14159265358979323846
const int DMDwidth = 1024;
const int DMDheight = 768;
const int effectiveDMDwidth = DMDheight;
__global__ void computeCuda(double *inputPhases, bool *binaryPatterns, double *carrierWave, int patternSizeX, int patternSizeY, int numReferencePixels, int leeBlockSize) {
int z = blockDim.x * blockIdx.x + threadIdx.x;
long long output_offset = DMDwidth*DMDheight*z;
long long input_offset = patternSizeX*patternSizeY*z;
double phaseAngle = 0;
for (int x = 0; x < DMDwidth; x++)
{
int sampleX = (x - numReferencePixels) / leeBlockSize;
for (int y = 0; y < DMDheight; y++)
{
phaseAngle = 0.0; //good
if (y >= numReferencePixels && y < DMDheight - numReferencePixels && x >= numReferencePixels && x < effectiveDMDwidth - numReferencePixels)
{
int sampleY = (y - numReferencePixels) / leeBlockSize;
assert(sampleX >= 0 && sampleY >= 0 && sampleX < patternSizeX && sampleY < patternSizeY);
phaseAngle = inputPhases[input_offset + sampleX*patternSizeY + sampleY];
}
binaryPatterns[output_offset + x*DMDheight + y] = (0.5 * (1 + cos(carrierWave[x*DMDheight + y] - phaseAngle))) > 0.5;
}
}
}
void compute(int z, double *inputPhases, bool *binaryPatterns, double *carrierWave, int patternSizeX, int patternSizeY, int numReferencePixels, int leeBlockSize)
{
long long output_offset = DMDwidth*DMDheight*z;
long long input_offset = patternSizeX*patternSizeY*z;
double phaseAngle = 0;
for (int x = 0; x < DMDwidth; x++)
{
int sampleX = (x - numReferencePixels) / leeBlockSize;
for (int y = 0; y < DMDheight; y++)
{
phaseAngle = 0.0;
if (y >= numReferencePixels && y < DMDheight - numReferencePixels && x >= numReferencePixels && x < effectiveDMDwidth - numReferencePixels)
{
int sampleY = (y - numReferencePixels) / leeBlockSize;
assert(sampleX >= 0 && sampleY >= 0 && sampleX < patternSizeX && sampleY < patternSizeY);
phaseAngle = inputPhases[input_offset + sampleX*patternSizeY + sampleY];
}
binaryPatterns[output_offset + x*DMDheight + y] = (0.5 * (1 + cos(carrierWave[x*DMDheight + y] - phaseAngle))) > 0.5;
}
}
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[]) {
if (nrhs < 3 || nlhs != 1)
{
mexPrintf("Use: OutputBinaryPatterns = FastLeeHologram(inputPhases (NxNxM), numReferencePixels, leeBlockSize, selectedCarrier);");
return;
}
double *inputPhases = (double*) mxGetData(prhs[0]);
int numReferencePixels = *(double*)mxGetData(prhs[1]);
int leeBlockSize = *(double*)mxGetData(prhs[2]);
double selectedCarrier = *(double*) mxGetData(prhs[3]);
const int numDim = mxGetNumberOfDimensions(prhs[0]);
const int *dataSize = mxGetDimensions(prhs[0]);
int numPatterns = 1;
int patternSizeX = dataSize[0];
int patternSizeY = dataSize[1];
if (numDim > 2)
{
numPatterns = dataSize[2];
}
// allocate memory for output
const int outputDimSize[3] = { DMDheight, DMDwidth, numPatterns };
plhs[0] = mxCreateLogicalArray(3, outputDimSize);
bool* binaryPatterns = (bool*)mxGetData(plhs[0]);
// allocate memory for the reference wave
double *carrierWave = new double[DMDheight*DMDwidth];
for (int x = 0; x < DMDwidth; x++)
{
for (int y = 0; y < DMDheight; y++)
{
carrierWave[x*DMDheight +y] = 2.0 * M_PI*(x - y)*selectedCarrier;
}
}
double* d_inputPhases;
long inputSize = sizeof(double) * patternSizeX * patternSizeY * numPatterns;
hipMalloc(&d_inputPhases, inputSize);
hipMemcpy(d_inputPhases, inputPhases, inputSize, hipMemcpyHostToDevice);
int maxThreadsPerBlock = 256;
int numBlocks = numPatterns / maxThreadsPerBlock;
computeCuda << <numBlocks, maxThreadsPerBlock >> >(inputPhases, binaryPatterns, carrierWave, patternSizeX, patternSizeY, numReferencePixels, leeBlockSize);
/*
for (int z = 0; z < numPatterns; z++)
{
compute(z, inputPhases, binaryPatterns, carrierWave, patternSizeX, patternSizeY, numReferencePixels, leeBlockSize);
}
*/
delete carrierWave;
}
| f5383b98fcae78967dda62320e762e6f689203ca.cu | /*
Fast Lee Hologram computation using CUDA
Programmed by Shay Ohayon
DiCarlo Lab @ MIT
Revision History
Version 0.1 10/22/2014
*/
#include <stdio.h>
#include "mex.h"
#include <Windows.h>
#include <math.h>
#define MIN(a,b) (a)<(b)?(a):(b)
#define M_PI 3.14159265358979323846
const int DMDwidth = 1024;
const int DMDheight = 768;
const int effectiveDMDwidth = DMDheight;
__global__ void computeCuda(double *inputPhases, bool *binaryPatterns, double *carrierWave, int patternSizeX, int patternSizeY, int numReferencePixels, int leeBlockSize) {
int z = blockDim.x * blockIdx.x + threadIdx.x;
long long output_offset = DMDwidth*DMDheight*z;
long long input_offset = patternSizeX*patternSizeY*z;
double phaseAngle = 0;
for (int x = 0; x < DMDwidth; x++)
{
int sampleX = (x - numReferencePixels) / leeBlockSize;
for (int y = 0; y < DMDheight; y++)
{
phaseAngle = 0.0; //good
if (y >= numReferencePixels && y < DMDheight - numReferencePixels && x >= numReferencePixels && x < effectiveDMDwidth - numReferencePixels)
{
int sampleY = (y - numReferencePixels) / leeBlockSize;
assert(sampleX >= 0 && sampleY >= 0 && sampleX < patternSizeX && sampleY < patternSizeY);
phaseAngle = inputPhases[input_offset + sampleX*patternSizeY + sampleY];
}
binaryPatterns[output_offset + x*DMDheight + y] = (0.5 * (1 + cos(carrierWave[x*DMDheight + y] - phaseAngle))) > 0.5;
}
}
}
void compute(int z, double *inputPhases, bool *binaryPatterns, double *carrierWave, int patternSizeX, int patternSizeY, int numReferencePixels, int leeBlockSize)
{
long long output_offset = DMDwidth*DMDheight*z;
long long input_offset = patternSizeX*patternSizeY*z;
double phaseAngle = 0;
for (int x = 0; x < DMDwidth; x++)
{
int sampleX = (x - numReferencePixels) / leeBlockSize;
for (int y = 0; y < DMDheight; y++)
{
phaseAngle = 0.0;
if (y >= numReferencePixels && y < DMDheight - numReferencePixels && x >= numReferencePixels && x < effectiveDMDwidth - numReferencePixels)
{
int sampleY = (y - numReferencePixels) / leeBlockSize;
assert(sampleX >= 0 && sampleY >= 0 && sampleX < patternSizeX && sampleY < patternSizeY);
phaseAngle = inputPhases[input_offset + sampleX*patternSizeY + sampleY];
}
binaryPatterns[output_offset + x*DMDheight + y] = (0.5 * (1 + cos(carrierWave[x*DMDheight + y] - phaseAngle))) > 0.5;
}
}
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[]) {
if (nrhs < 3 || nlhs != 1)
{
mexPrintf("Use: OutputBinaryPatterns = FastLeeHologram(inputPhases (NxNxM), numReferencePixels, leeBlockSize, selectedCarrier);");
return;
}
double *inputPhases = (double*) mxGetData(prhs[0]);
int numReferencePixels = *(double*)mxGetData(prhs[1]);
int leeBlockSize = *(double*)mxGetData(prhs[2]);
double selectedCarrier = *(double*) mxGetData(prhs[3]);
const int numDim = mxGetNumberOfDimensions(prhs[0]);
const int *dataSize = mxGetDimensions(prhs[0]);
int numPatterns = 1;
int patternSizeX = dataSize[0];
int patternSizeY = dataSize[1];
if (numDim > 2)
{
numPatterns = dataSize[2];
}
// allocate memory for output
const int outputDimSize[3] = { DMDheight, DMDwidth, numPatterns };
plhs[0] = mxCreateLogicalArray(3, outputDimSize);
bool* binaryPatterns = (bool*)mxGetData(plhs[0]);
// allocate memory for the reference wave
double *carrierWave = new double[DMDheight*DMDwidth];
for (int x = 0; x < DMDwidth; x++)
{
for (int y = 0; y < DMDheight; y++)
{
carrierWave[x*DMDheight +y] = 2.0 * M_PI*(x - y)*selectedCarrier;
}
}
double* d_inputPhases;
long inputSize = sizeof(double) * patternSizeX * patternSizeY * numPatterns;
cudaMalloc(&d_inputPhases, inputSize);
cudaMemcpy(d_inputPhases, inputPhases, inputSize, cudaMemcpyHostToDevice);
int maxThreadsPerBlock = 256;
int numBlocks = numPatterns / maxThreadsPerBlock;
computeCuda << <numBlocks, maxThreadsPerBlock >> >(inputPhases, binaryPatterns, carrierWave, patternSizeX, patternSizeY, numReferencePixels, leeBlockSize);
/*
for (int z = 0; z < numPatterns; z++)
{
compute(z, inputPhases, binaryPatterns, carrierWave, patternSizeX, patternSizeY, numReferencePixels, leeBlockSize);
}
*/
delete carrierWave;
}
|
864d732ee394ea4ad6fc1186426907b14b30fa89.hip | // !!! This is a file automatically generated by hipify!!!
#include "GDALImage.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <assert.h>
#include <rocblas.h>
#include "hipError_t.h"
#include <errno.h>
#include <unistd.h>
/**
* \brief Constructor
*
* @param filename a std::string with the raster image file name
*/
GDALImage::GDALImage(std::string filename, int band, int cacheSizeInGB, int useMmap)
: _useMmap(useMmap)
{
// open the file as dataset
_poDataset = (GDALDataset *) GDALOpen(filename.c_str(), GA_ReadOnly );
// if something is wrong, throw an exception
// GDAL reports the error message
if(!_poDataset)
throw;
// check the band info
int count = _poDataset->GetRasterCount();
if(band > count)
{
std::cout << "The desired band " << band << " is greated than " << count << " bands available";
throw;
}
// get the desired band
_poBand = _poDataset->GetRasterBand(band);
if(!_poBand)
throw;
// get the width(x), and height(y)
_width = _poBand->GetXSize();
_height = _poBand->GetYSize();
_dataType = _poBand->GetRasterDataType();
// determine the image type
_isComplex = GDALDataTypeIsComplex(_dataType);
// determine the pixel size in bytes
_pixelSize = GDALGetDataTypeSize(_dataType);
_bufferSize = 1024*1024*cacheSizeInGB;
// checking whether using memory map
if(_useMmap) {
char **papszOptions = NULL;
// if cacheSizeInGB = 0, use default
// else set the option
if(cacheSizeInGB > 0)
papszOptions = CSLSetNameValue( papszOptions,
"CACHE_SIZE",
std::to_string(_bufferSize).c_str());
// space between two lines
GIntBig pnLineSpace;
// set up the virtual mem buffer
_poBandVirtualMem = GDALGetVirtualMemAuto(
static_cast<GDALRasterBandH>(_poBand),
GF_Read,
&_pixelSize,
&pnLineSpace,
papszOptions);
// check it
if(!_poBandVirtualMem)
throw;
// get the starting pointer
_memPtr = CPLVirtualMemGetAddr(_poBandVirtualMem);
}
else { // use a buffer
checkCudaErrors(hipHostMalloc((void **)&_memPtr, _bufferSize));
}
// make sure memPtr is not Null
if (!_memPtr)
throw;
// all done
}
/// load a tile of data h_tile x w_tile from CPU (mmap) to GPU
/// @param dArray pointer for array in device memory
/// @param h_offset Down/Height offset
/// @param w_offset Across/Width offset
/// @param h_tile Down/Height tile size
/// @param w_tile Across/Width tile size
/// @param stream CUDA stream for copying
void GDALImage::loadToDevice(void *dArray, size_t h_offset, size_t w_offset, size_t h_tile, size_t w_tile, hipStream_t stream)
{
size_t tileStartOffset = (h_offset*_width + w_offset)*_pixelSize;
char * startPtr = (char *)_memPtr ;
startPtr += tileStartOffset;
// @note
// We assume down/across directions as rows/cols. Therefore, SLC mmap and device array are both row major.
// cuBlas assumes both source and target arrays are column major.
// To use hipblasSetMatrix, we need to switch w_tile/h_tile for rows/cols
// checkCudaErrors(hipblasSetMatrixAsync(w_tile, h_tile, sizeof(float2), startPtr, width, dArray, w_tile, stream));
if (_useMmap)
checkCudaErrors(hipMemcpy2DAsync(dArray, w_tile*_pixelSize, startPtr, _width*_pixelSize,
w_tile*_pixelSize, h_tile, hipMemcpyHostToDevice,stream));
else {
// get the total tile size in bytes
size_t tileSize = h_tile*w_tile*_pixelSize;
// if the size is bigger than existing buffer, reallocate
if (tileSize > _bufferSize) {
// maybe we need to make it to fit the pagesize
_bufferSize = tileSize;
checkCudaErrors(hipFree(_memPtr));
checkCudaErrors(hipHostMalloc((void **)&_memPtr, _bufferSize));
}
// copy from file to buffer
CPLErr err = _poBand->RasterIO(GF_Read, //eRWFlag
w_offset, h_offset, //nXOff, nYOff
w_tile, h_tile, // nXSize, nYSize
_memPtr, // pData
w_tile*h_tile, 1, // nBufXSize, nBufYSize
_dataType, //eBufType
0, 0, //nPixelSpace, nLineSpace in pData
NULL //psExtraArg extra resampling callback
);
if(err != CE_None)
throw;
// copy from buffer to gpu
checkCudaErrors(hipMemcpyAsync(dArray, _memPtr, tileSize, hipMemcpyHostToDevice, stream));
}
}
GDALImage::~GDALImage()
{
// free the virtual memory
CPLVirtualMemFree(_poBandVirtualMem),
// free the GDAL Dataset, close the file
delete _poDataset;
}
// end of file
| 864d732ee394ea4ad6fc1186426907b14b30fa89.cu | #include "GDALImage.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <assert.h>
#include <cublas_v2.h>
#include "cudaError.h"
#include <errno.h>
#include <unistd.h>
/**
* \brief Constructor
*
* @param filename a std::string with the raster image file name
*/
GDALImage::GDALImage(std::string filename, int band, int cacheSizeInGB, int useMmap)
: _useMmap(useMmap)
{
// open the file as dataset
_poDataset = (GDALDataset *) GDALOpen(filename.c_str(), GA_ReadOnly );
// if something is wrong, throw an exception
// GDAL reports the error message
if(!_poDataset)
throw;
// check the band info
int count = _poDataset->GetRasterCount();
if(band > count)
{
std::cout << "The desired band " << band << " is greated than " << count << " bands available";
throw;
}
// get the desired band
_poBand = _poDataset->GetRasterBand(band);
if(!_poBand)
throw;
// get the width(x), and height(y)
_width = _poBand->GetXSize();
_height = _poBand->GetYSize();
_dataType = _poBand->GetRasterDataType();
// determine the image type
_isComplex = GDALDataTypeIsComplex(_dataType);
// determine the pixel size in bytes
_pixelSize = GDALGetDataTypeSize(_dataType);
_bufferSize = 1024*1024*cacheSizeInGB;
// checking whether using memory map
if(_useMmap) {
char **papszOptions = NULL;
// if cacheSizeInGB = 0, use default
// else set the option
if(cacheSizeInGB > 0)
papszOptions = CSLSetNameValue( papszOptions,
"CACHE_SIZE",
std::to_string(_bufferSize).c_str());
// space between two lines
GIntBig pnLineSpace;
// set up the virtual mem buffer
_poBandVirtualMem = GDALGetVirtualMemAuto(
static_cast<GDALRasterBandH>(_poBand),
GF_Read,
&_pixelSize,
&pnLineSpace,
papszOptions);
// check it
if(!_poBandVirtualMem)
throw;
// get the starting pointer
_memPtr = CPLVirtualMemGetAddr(_poBandVirtualMem);
}
else { // use a buffer
checkCudaErrors(cudaMallocHost((void **)&_memPtr, _bufferSize));
}
// make sure memPtr is not Null
if (!_memPtr)
throw;
// all done
}
/// load a tile of data h_tile x w_tile from CPU (mmap) to GPU
/// @param dArray pointer for array in device memory
/// @param h_offset Down/Height offset
/// @param w_offset Across/Width offset
/// @param h_tile Down/Height tile size
/// @param w_tile Across/Width tile size
/// @param stream CUDA stream for copying
void GDALImage::loadToDevice(void *dArray, size_t h_offset, size_t w_offset, size_t h_tile, size_t w_tile, cudaStream_t stream)
{
size_t tileStartOffset = (h_offset*_width + w_offset)*_pixelSize;
char * startPtr = (char *)_memPtr ;
startPtr += tileStartOffset;
// @note
// We assume down/across directions as rows/cols. Therefore, SLC mmap and device array are both row major.
// cuBlas assumes both source and target arrays are column major.
// To use cublasSetMatrix, we need to switch w_tile/h_tile for rows/cols
// checkCudaErrors(cublasSetMatrixAsync(w_tile, h_tile, sizeof(float2), startPtr, width, dArray, w_tile, stream));
if (_useMmap)
checkCudaErrors(cudaMemcpy2DAsync(dArray, w_tile*_pixelSize, startPtr, _width*_pixelSize,
w_tile*_pixelSize, h_tile, cudaMemcpyHostToDevice,stream));
else {
// get the total tile size in bytes
size_t tileSize = h_tile*w_tile*_pixelSize;
// if the size is bigger than existing buffer, reallocate
if (tileSize > _bufferSize) {
// maybe we need to make it to fit the pagesize
_bufferSize = tileSize;
checkCudaErrors(cudaFree(_memPtr));
checkCudaErrors(cudaMallocHost((void **)&_memPtr, _bufferSize));
}
// copy from file to buffer
CPLErr err = _poBand->RasterIO(GF_Read, //eRWFlag
w_offset, h_offset, //nXOff, nYOff
w_tile, h_tile, // nXSize, nYSize
_memPtr, // pData
w_tile*h_tile, 1, // nBufXSize, nBufYSize
_dataType, //eBufType
0, 0, //nPixelSpace, nLineSpace in pData
NULL //psExtraArg extra resampling callback
);
if(err != CE_None)
throw;
// copy from buffer to gpu
checkCudaErrors(cudaMemcpyAsync(dArray, _memPtr, tileSize, cudaMemcpyHostToDevice, stream));
}
}
GDALImage::~GDALImage()
{
// free the virtual memory
CPLVirtualMemFree(_poBandVirtualMem),
// free the GDAL Dataset, close the file
delete _poDataset;
}
// end of file
|
3afdb48d013da6209c23f8b3c49aeb51e66d5c3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string>
#include <fstream>
#include <cmath>
#include <string>
#include <iostream>
using namespace std;
typedef struct {
double x,y,z;
} XYZ;
typedef struct {
XYZ p[8];
double val[8];
} GRIDCELL;
typedef struct {
XYZ p[3]; /* Vertices */
XYZ c; /* Centroid */
XYZ n[3]; /* Normal */
} TRIANGLE;
#define ABS(x) (x < 0 ? -(x) : (x))
// Prototypes
//__global__
//int PolygoniseCube(GRIDCELL,double,TRIANGLE *);
//XYZ VertexInterp(double,XYZ,XYZ,double,double);
/*
#define NX 200
#define NY 160
#define NZ 160
*/
#define NX 68//200
#define NY 256//160
#define NZ 256//160
void fillMatrix(XYZ* a, int n)
{
int i;
for (i = 0; i < n; ++i)
{
a[i].x = 3;
a[i].y = 2;
a[i].z = 5;//rand()%5;
}
}
__global__
void matrixAdition(XYZ * b, XYZ *a,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
if(ij<n)
{
b[ij].x = a[ij].x+2;
b[ij].y = a[ij].y+3;
b[ij].z = a[ij].z+0;
//printf("da %d \n" , b[ij].x);
}
}
void printMatrix(string s, XYZ *a , int tam){
cout<<s;
for(int i=0;i<tam;i++)
{
if(a[i].x!=0 && a[i].y!=0 && a[i].z!=0)
{
cout<<a[i].x<<" "<<a[i].y<<" "<<a[i].z<<" ";
cout<<endl;
}
}
}
void assingMem(int *** data)
{
int i,j;
data = (int ***)malloc(NX*sizeof(short int **));
for (i=0;i<NX;i++)
data[i] = (int **)malloc(NY*sizeof(short int *));
for (i=0;i<NX;i++)
for (j=0;j<NY;j++)
data[i][j] = (int *)malloc(NZ*sizeof(short int));
}
void readFile(FILE *fptr, const char * namefile , int themin , int themax, int *** data)
{
int i,j,k,c;
fprintf(stderr,"Load data ...\n");
if ((fptr = fopen(namefile,"rb")) == NULL) {
fprintf(stderr,"Error al leer archivo\n");
exit(-1);
}
for (k=0;k<NZ;k++) {
for (j=0;j<NY;j++) {
for (i=0;i<NX;i++) {
if ((c = fgetc(fptr)) == EOF) {
fprintf(stderr,"Error en tamao\n");
exit(-1);
}
data[i][j][k] = c;
cout<<"leyendo :"<<c<<endl;
if (c > themax)
themax = c;
if (c < themin)
themin = c;
}
}
}
fclose(fptr);
fprintf(stderr,"Rango del volumen: %d -> %d\n",themin,themax);
}
void constructCubes(GRIDCELL * vectGrids, int *** data, int gtam)
{
int i,j,k;
//fprintf(stderr,"Construyendo Cubos ...\n");
int cont=0;
for (i=0;i<NX-1;i++) {
//cout<<i<<endl;
//if (i % (NX/10) == 0)
//fprintf(stderr," Slice %d de %d\n",i,NX);
for (j=0;j<NY-1;j++) {
for (k=0;k<NZ-1;k++) {
GRIDCELL grid;
grid.p[0].x = i;
grid.p[0].y = j;
grid.p[0].z = k;
grid.val[0] = data[i][j][k];
grid.p[1].x = i+1;
grid.p[1].y = j;
grid.p[1].z = k;
grid.val[1] = data[i+1][j][k];
grid.p[2].x = i+1;
grid.p[2].y = j+1;
grid.p[2].z = k;
grid.val[2] = data[i+1][j+1][k];
grid.p[3].x = i;
grid.p[3].y = j+1;
grid.p[3].z = k;
grid.val[3] = data[i][j+1][k];
grid.p[4].x = i;
grid.p[4].y = j;
grid.p[4].z = k+1;
grid.val[4] = data[i][j][k+1];
grid.p[5].x = i+1;
grid.p[5].y = j;
grid.p[5].z = k+1;
grid.val[5] = data[i+1][j][k+1];
grid.p[6].x = i+1;
grid.p[6].y = j+1;
grid.p[6].z = k+1;
grid.val[6] = data[i+1][j+1][k+1];
grid.p[7].x = i;
grid.p[7].y = j+1;
grid.p[7].z = k+1;
grid.val[7] = data[i][j+1][k+1];
vectGrids[i+j*NY+k*NY*NZ]=grid;
cont++;
//cout<<cont<<endl;
}
}
}
}
__device__
XYZ VertexInterp(double isolevel,XYZ p1,XYZ p2,double valp1,double valp2)
{
double mu;
XYZ p;
if (ABS(isolevel-valp1) < 0.00001)
return(p1);
if (ABS(isolevel-valp2) < 0.00001)
return(p2);
if (ABS(valp1-valp2) < 0.00001)
return(p1);
mu = (isolevel - valp1) / (valp2 - valp1);
p.x = p1.x + mu * (p2.x - p1.x);
p.y = p1.y + mu * (p2.y - p1.y);
p.z = p1.z + mu * (p2.z - p1.z);
return p;
}
__device__
void copyXYZ(XYZ &a, XYZ &b)
{
a.x=b.x ; a.y=b.y ; a.z = b.z;
}
__device__
XYZ defect()
{
XYZ a;
a.x=300 ; a.y=300 ; a.z = 300;
return a;
}
__global__
void coyGRID(GRIDCELL * a, GRIDCELL * b, int x, int y, int z)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
/*if(i<x && j<y && k<z)
{
a[ij].p = b[ij].p;
a[ij].val = b[ij].val;
}*/
}
__global__
void copyGRID1(GRIDCELL * a, GRIDCELL * b, int x, int y, int z)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if(i<x && j<y && k<z)
{
for(int w=0;w<8;w++)
{
a[i+j*y+k*y*z].p[w] = b[i+j*y+k*y*z].p[w];
a[i+j*y+k*y*z].val[w] = b[i+j*y+k*y*z].val[w];
}
}
}
/*
__global__
void PolygoniseCube(XYZ * vertlist ,GRIDCELL * g ,double iso, int x ,int y , int z)
*/
__global__
void PolygoniseCube(XYZ * vertlist ,GRIDCELL * g ,double iso, int x ,int y , int z)
{
//printf("g %d \n",iso);
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if(i<x && j<y && k<z)
{
//printf("thread %d \n", g[i].p[7].x);
int cubeindex;
//int tamVert=12;
//XYZ vertlist[12];
int edgeTable[256]={
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 };
//int i,ntri = 0;
cubeindex = 0;
if (g[i+j*y+k*y*z].val[0] < iso) cubeindex |= 1;
if (g[i+j*y+k*y*z].val[1] < iso) cubeindex |= 2;
if (g[i+j*y+k*y*z].val[2] < iso) cubeindex |= 4;
if (g[i+j*y+k*y*z].val[3] < iso) cubeindex |= 8;
if (g[i+j*y+k*y*z].val[4] < iso) cubeindex |= 16;
if (g[i+j*y+k*y*z].val[5] < iso) cubeindex |= 32;
if (g[i+j*y+k*y*z].val[6] < iso) cubeindex |= 64;
if (g[i+j*y+k*y*z].val[7] < iso) cubeindex |= 128;
//XYZ a;
//a.x=20 ; a.y=50; a,z=0;
//vertlist[i+j*y+k*y*z+0].x=g[i+j*y+k*y*z].val[6];
//vertlist[i+j*y+k*y*z+0].y=10;
//vertlist[i+j*y+k*y*z+0].z=10;*/
/* Cube is entirely in/out of the surface */
if (edgeTable[cubeindex] == 0)
return;
/* Find the vertices where the surface intersects the cube */
if (edgeTable[cubeindex] & 1) {
vertlist[i+j*y+k*y*z+0] = VertexInterp(iso,g[i+j*y+k*y*z].p[0],g[i+j*y+k*y*z].p[1],g[i+j*y+k*y*z].val[0],g[i+j*y+k*y*z].val[1]);
}
if (edgeTable[cubeindex] & 2) {
vertlist[i+j*y+k*y*z+1] = VertexInterp(iso,g[i+j*y+k*y*z].p[1],g[i+j*y+k*y*z].p[2],g[i+j*y+k*y*z].val[1],g[i+j*y+k*y*z].val[2]);
}
if (edgeTable[cubeindex] & 4) {
vertlist[i+j*y+k*y*z+2] = VertexInterp(iso,g[i+j*y+k*y*z].p[2],g[i+j*y+k*y*z].p[3],g[i+j*y+k*y*z].val[2],g[i+j*y+k*y*z].val[3]);
}
if (edgeTable[cubeindex] & 8) {
vertlist[i+j*y+k*y*z+3] = VertexInterp(iso,g[i+j*y+k*y*z].p[3],g[i+j*y+k*y*z].p[0],g[i+j*y+k*y*z].val[3],g[i+j*y+k*y*z].val[0]);
}
if (edgeTable[cubeindex] & 16) {
vertlist[i+j*y+k*y*z+4] = VertexInterp(iso,g[i+j*y+k*y*z].p[4],g[i+j*y+k*y*z].p[5],g[i+j*y+k*y*z].val[4],g[i+j*y+k*y*z].val[5]);
}
if (edgeTable[cubeindex] & 32) {
vertlist[i+j*y+k*y*z+5] = VertexInterp(iso,g[i+j*y+k*y*z].p[5],g[i+j*y+k*y*z].p[6],g[i+j*y+k*y*z].val[5],g[i+j*y+k*y*z].val[6]);
}
if (edgeTable[cubeindex] & 64) {
vertlist[i+j*y+k*y*z+6] = VertexInterp(iso,g[i+j*y+k*y*z].p[6],g[i+j*y+k*y*z].p[7],g[i+j*y+k*y*z].val[6],g[i+j*y+k*y*z].val[7]);
}
if (edgeTable[cubeindex] & 128) {
vertlist[i+j*y+k*y*z+7] = VertexInterp(iso,g[i+j*y+k*y*z].p[7],g[i+j*y+k*y*z].p[4],g[i+j*y+k*y*z].val[7],g[i+j*y+k*y*z].val[4]);
}
if (edgeTable[cubeindex] & 256) {
vertlist[i+j*y+k*y*z+8] = VertexInterp(iso,g[i+j*y+k*y*z].p[0],g[i+j*y+k*y*z].p[4],g[i+j*y+k*y*z].val[0],g[i+j*y+k*y*z].val[4]);
}
if (edgeTable[cubeindex] & 512) {
vertlist[i+j*y+k*y*z+9] = VertexInterp(iso,g[i+j*y+k*y*z].p[1],g[i+j*y+k*y*z].p[5],g[i+j*y+k*y*z].val[1],g[i+j*y+k*y*z].val[5]);
}
if (edgeTable[cubeindex] & 1024) {
vertlist[i+j*y+k*y*z+10] = VertexInterp(iso,g[i+j*y+k*y*z].p[2],g[i+j*y+k*y*z].p[6],g[i+j*y+k*y*z].val[2],g[i+j*y+k*y*z].val[6]);
}
if (edgeTable[cubeindex] & 2048) {
vertlist[i+j*y+k*y*z+11] = VertexInterp(iso,g[i+j*y+k*y*z].p[3],g[i+j*y+k*y*z].p[7],g[i+j*y+k*y*z].val[3],g[i+j*y+k*y*z].val[7]);
}
// printf("hasta aqui llega \n");
}
}
void printGrid(string a, GRIDCELL * g, int tam)
{
cout<<a;
for(int i =0; i<tam ;i++)
for(int j=0;j<8;j++)
//printf("%f %f %f \n", g[i].p[j].x ,g[i].p[j].y,g[i].p[j].z);
printf("%f \n", g[i].val[j]);
}
int main(int argc, char *argv[])
{
int i,j,k,c;
int ***data;
FILE *fptr;
int N= (NX*NY*NZ);
cout<<N<<endl; //return 1;
int THREADS_PER_BLOCK =8;
int themin=255;
int themax=0;
int isolevel=80;
//const char* FILENAME = "mri.raw";
//assingMem(data);
//readFile(fptr,FILENAME,themin, themax,data);
// Malloc the volumetric data, hardwired size!
data = (int***)malloc(NX*sizeof(int **));
for (i=0;i<NX;i++)
data[i] = (int**)malloc(NY*sizeof(int *));
for (i=0;i<NX;i++)
for (j=0;j<NY;j++)
data[i][j] = (int*)malloc(NZ*sizeof(int));
//cout<<data[199][60][0]<<endl;
// Open and read the raw data
fprintf(stderr,"Reading data ...\n");
if ((fptr = fopen(argv[argc-1],"rb")) == NULL) {
fprintf(stderr,"File open failed\n");
exit(-1);
}
cout<<"llega"<<endl;
for (k=0;k<NZ;k++) {
for (j=0;j<NY;j++) {
for (i=0;i<NX;i++) {
if ((c = fgetc(fptr)) == EOF) {
fprintf(stderr,"Unexpected end of file\n");
exit(-1);
}
data[i][j][k] = c;
//cout<<i<<" "<<j <<" "<<k <<" data : "<<data[i][j][k]<<endl;
if (c > themax)
themax = c;
if (c < themin)
themin = c;
}
}
}
fclose(fptr);
fprintf(stderr,"Volumetric data range: %d -> %d\n",themin,themax);
int sizeGRID = N*sizeof(GRIDCELL);
cout<<"pasa"<<endl;
int sizeXYZ = N*12*sizeof(XYZ);
cout<<"sizeGRID "<<sizeGRID<<endl;
cout<<"sizeXYZ "<<sizeXYZ<<endl;
//hipMalloc((void **)&d_b, size);
GRIDCELL * vectGrids;
GRIDCELL * d_vectGrid;
XYZ * d_points;
XYZ * points;
points = (XYZ *)malloc(sizeXYZ);
vectGrids = (GRIDCELL *)malloc(sizeGRID);
constructCubes(vectGrids,data,N);
/*
typedef struct {
double x,y,z;
} XYZ;
typedef struct {
XYZ p[8];
double val[8];
} GRIDCELL;
*/
XYZ * d_p; double * d_val;
size_t available, total;
hipMemGetInfo(&available, &total);
cout<<"available: " << available<<" total: "<<total <<endl;
hipMalloc((void **)&d_vectGrid, sizeGRID);
/*
for(int i=0;i<N;i++)
{
hipMalloc((void**)&d_p,8*sizeof(XYZ));
//hipMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
hipMalloc((void**)&d_val,8*sizeof(double));
//hipMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
hipMemcpy(d_p,vectGrids[i].p,8*sizeof(XYZ),hipMemcpyHostToDevice);
//for(int w=0;w<8;w++)
//{
cout<<vectGrids[i].p[w].y<<endl;
//}
//hipMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
hipMemcpy(d_val,vectGrids[i].val,8*sizeof(double),hipMemcpyHostToDevice);
//hipMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl
hipMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
hipMemcpy(d_vectGrid[i].val, d_val, 8*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_vectGrid[i].p, d_p, 8*sizeof(XYZ),hipMemcpyHostToDevice);
}*/
hipMemcpy(d_vectGrid,vectGrids, sizeGRID, hipMemcpyHostToDevice);
cout<<"termino de asignar memoria"<<endl;
XYZ * d_a, * d_sal;
GRIDCELL * d_res;
d_sal=(XYZ *)malloc(sizeXYZ);
hipMalloc((void **)&d_res, sizeGRID);
hipMalloc((void **)&d_a, sizeXYZ);
hipMalloc((void **)&d_points, sizeXYZ);
//cout<<"grid "<<vectGrids<<endl;
//cout<<"point "<<points<<endl;
//fillMatrix(points, N);
printMatrix("imprimiendo pruevba",points, 10);
hipMemcpy(d_points, points, sizeXYZ, hipMemcpyHostToDevice);
cout<<"grid "<<d_vectGrid<<endl;
cout<<"pointsssss "<<d_points<<endl;
//printf("dir %d \n",*d_points);
cout<<"separa memoria sin problemas"<<endl;
//printGrid("imprimiendo Grid inicial en Host \n ",vectGrids,N);
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
int x = NX; int y = NY ; int z = NZ;
int blockX= (NX + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
int blockY= (NY + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
int blockZ= (NZ + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
cout<<"blocks : "<<blockX<<" threds: "<<THREADS_PER_BLOCK<<endl;
cout<<"blocks : "<<blockY<<" threds: "<<THREADS_PER_BLOCK<<endl;
cout<<"blocks : "<<blockZ<<" threds: "<<THREADS_PER_BLOCK<<endl;
//int blocks= (10 + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
/*cout<<"blocks : \n"<<blocks<<"\n threds: \n "<<THREADS_PER_BLOCK<<endl; */
dim3 dimGrid(blockX, blockY, blockZ);
dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, THREADS_PER_BLOCK);
hipEventRecord(start,0);
isolevel=10;
//copyGRID1<<<dimGrid,dimBlock>>>(d_res,d_vectGrid,x,y,z);
hipLaunchKernelGGL(( PolygoniseCube), dim3(dimGrid),dim3(dimBlock), 0, 0, d_points,d_vectGrid,isolevel,x,y,z);
//PolygoniseCube<<<blocks,THREADS_PER_BLOCK>>>(d_points,d_vectGrids,isolevel);
//matrixAdition<<<blocks,THREADS_PER_BLOCK>>>(d_a, d_points,10);
//matrixAditionCol<<<blocks2,THREADS_PER_BLOCK>>>( d_c, d_a, d_b,N);
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
hipMemcpy(points,d_points, sizeXYZ, hipMemcpyDeviceToHost);
//GRIDCELL * res;
//res = (GRIDCELL *)malloc(sizeGRID);
//hipMemcpy(res,d_vectGrid, sizeGRID, hipMemcpyDeviceToHost);
//printGrid("imprimiendo Grid final despues de la copia \n ",res,N);
//printMatrix("Printing Matrix A \n",points,N);
/*/printMatrix("Printing Matrix B \n",b,N);
//printMatrix("Printing Matrix C \n",c,N);
*/
free(points); free(vectGrids);
hipFree(d_points); hipFree(d_vectGrid);
return 0;
} | 3afdb48d013da6209c23f8b3c49aeb51e66d5c3b.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string>
#include <fstream>
#include <cmath>
#include <string>
#include <iostream>
using namespace std;
typedef struct {
double x,y,z;
} XYZ;
typedef struct {
XYZ p[8];
double val[8];
} GRIDCELL;
typedef struct {
XYZ p[3]; /* Vertices */
XYZ c; /* Centroid */
XYZ n[3]; /* Normal */
} TRIANGLE;
#define ABS(x) (x < 0 ? -(x) : (x))
// Prototypes
//__global__
//int PolygoniseCube(GRIDCELL,double,TRIANGLE *);
//XYZ VertexInterp(double,XYZ,XYZ,double,double);
/*
#define NX 200
#define NY 160
#define NZ 160
*/
#define NX 68//200
#define NY 256//160
#define NZ 256//160
void fillMatrix(XYZ* a, int n)
{
int i;
for (i = 0; i < n; ++i)
{
a[i].x = 3;
a[i].y = 2;
a[i].z = 5;//rand()%5;
}
}
__global__
void matrixAdition(XYZ * b, XYZ *a,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
if(ij<n)
{
b[ij].x = a[ij].x+2;
b[ij].y = a[ij].y+3;
b[ij].z = a[ij].z+0;
//printf("da %d \n" , b[ij].x);
}
}
void printMatrix(string s, XYZ *a , int tam){
cout<<s;
for(int i=0;i<tam;i++)
{
if(a[i].x!=0 && a[i].y!=0 && a[i].z!=0)
{
cout<<a[i].x<<" "<<a[i].y<<" "<<a[i].z<<" ";
cout<<endl;
}
}
}
void assingMem(int *** data)
{
int i,j;
data = (int ***)malloc(NX*sizeof(short int **));
for (i=0;i<NX;i++)
data[i] = (int **)malloc(NY*sizeof(short int *));
for (i=0;i<NX;i++)
for (j=0;j<NY;j++)
data[i][j] = (int *)malloc(NZ*sizeof(short int));
}
void readFile(FILE *fptr, const char * namefile , int themin , int themax, int *** data)
{
int i,j,k,c;
fprintf(stderr,"Load data ...\n");
if ((fptr = fopen(namefile,"rb")) == NULL) {
fprintf(stderr,"Error al leer archivo\n");
exit(-1);
}
for (k=0;k<NZ;k++) {
for (j=0;j<NY;j++) {
for (i=0;i<NX;i++) {
if ((c = fgetc(fptr)) == EOF) {
fprintf(stderr,"Error en tamaño\n");
exit(-1);
}
data[i][j][k] = c;
cout<<"leyendo :"<<c<<endl;
if (c > themax)
themax = c;
if (c < themin)
themin = c;
}
}
}
fclose(fptr);
fprintf(stderr,"Rango del volumen: %d -> %d\n",themin,themax);
}
void constructCubes(GRIDCELL * vectGrids, int *** data, int gtam)
{
int i,j,k;
//fprintf(stderr,"Construyendo Cubos ...\n");
int cont=0;
for (i=0;i<NX-1;i++) {
//cout<<i<<endl;
//if (i % (NX/10) == 0)
//fprintf(stderr," Slice %d de %d\n",i,NX);
for (j=0;j<NY-1;j++) {
for (k=0;k<NZ-1;k++) {
GRIDCELL grid;
grid.p[0].x = i;
grid.p[0].y = j;
grid.p[0].z = k;
grid.val[0] = data[i][j][k];
grid.p[1].x = i+1;
grid.p[1].y = j;
grid.p[1].z = k;
grid.val[1] = data[i+1][j][k];
grid.p[2].x = i+1;
grid.p[2].y = j+1;
grid.p[2].z = k;
grid.val[2] = data[i+1][j+1][k];
grid.p[3].x = i;
grid.p[3].y = j+1;
grid.p[3].z = k;
grid.val[3] = data[i][j+1][k];
grid.p[4].x = i;
grid.p[4].y = j;
grid.p[4].z = k+1;
grid.val[4] = data[i][j][k+1];
grid.p[5].x = i+1;
grid.p[5].y = j;
grid.p[5].z = k+1;
grid.val[5] = data[i+1][j][k+1];
grid.p[6].x = i+1;
grid.p[6].y = j+1;
grid.p[6].z = k+1;
grid.val[6] = data[i+1][j+1][k+1];
grid.p[7].x = i;
grid.p[7].y = j+1;
grid.p[7].z = k+1;
grid.val[7] = data[i][j+1][k+1];
vectGrids[i+j*NY+k*NY*NZ]=grid;
cont++;
//cout<<cont<<endl;
}
}
}
}
__device__
XYZ VertexInterp(double isolevel,XYZ p1,XYZ p2,double valp1,double valp2)
{
double mu;
XYZ p;
if (ABS(isolevel-valp1) < 0.00001)
return(p1);
if (ABS(isolevel-valp2) < 0.00001)
return(p2);
if (ABS(valp1-valp2) < 0.00001)
return(p1);
mu = (isolevel - valp1) / (valp2 - valp1);
p.x = p1.x + mu * (p2.x - p1.x);
p.y = p1.y + mu * (p2.y - p1.y);
p.z = p1.z + mu * (p2.z - p1.z);
return p;
}
__device__
void copyXYZ(XYZ &a, XYZ &b)
{
a.x=b.x ; a.y=b.y ; a.z = b.z;
}
__device__
XYZ defect()
{
XYZ a;
a.x=300 ; a.y=300 ; a.z = 300;
return a;
}
__global__
void coyGRID(GRIDCELL * a, GRIDCELL * b, int x, int y, int z)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
/*if(i<x && j<y && k<z)
{
a[ij].p = b[ij].p;
a[ij].val = b[ij].val;
}*/
}
__global__
void copyGRID1(GRIDCELL * a, GRIDCELL * b, int x, int y, int z)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if(i<x && j<y && k<z)
{
for(int w=0;w<8;w++)
{
a[i+j*y+k*y*z].p[w] = b[i+j*y+k*y*z].p[w];
a[i+j*y+k*y*z].val[w] = b[i+j*y+k*y*z].val[w];
}
}
}
/*
__global__
void PolygoniseCube(XYZ * vertlist ,GRIDCELL * g ,double iso, int x ,int y , int z)
*/
__global__
void PolygoniseCube(XYZ * vertlist ,GRIDCELL * g ,double iso, int x ,int y , int z)
{
//printf("g %d \n",iso);
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if(i<x && j<y && k<z)
{
//printf("thread %d \n", g[i].p[7].x);
int cubeindex;
//int tamVert=12;
//XYZ vertlist[12];
int edgeTable[256]={
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 };
//int i,ntri = 0;
cubeindex = 0;
if (g[i+j*y+k*y*z].val[0] < iso) cubeindex |= 1;
if (g[i+j*y+k*y*z].val[1] < iso) cubeindex |= 2;
if (g[i+j*y+k*y*z].val[2] < iso) cubeindex |= 4;
if (g[i+j*y+k*y*z].val[3] < iso) cubeindex |= 8;
if (g[i+j*y+k*y*z].val[4] < iso) cubeindex |= 16;
if (g[i+j*y+k*y*z].val[5] < iso) cubeindex |= 32;
if (g[i+j*y+k*y*z].val[6] < iso) cubeindex |= 64;
if (g[i+j*y+k*y*z].val[7] < iso) cubeindex |= 128;
//XYZ a;
//a.x=20 ; a.y=50; a,z=0;
//vertlist[i+j*y+k*y*z+0].x=g[i+j*y+k*y*z].val[6];
//vertlist[i+j*y+k*y*z+0].y=10;
//vertlist[i+j*y+k*y*z+0].z=10;*/
/* Cube is entirely in/out of the surface */
if (edgeTable[cubeindex] == 0)
return;
/* Find the vertices where the surface intersects the cube */
if (edgeTable[cubeindex] & 1) {
vertlist[i+j*y+k*y*z+0] = VertexInterp(iso,g[i+j*y+k*y*z].p[0],g[i+j*y+k*y*z].p[1],g[i+j*y+k*y*z].val[0],g[i+j*y+k*y*z].val[1]);
}
if (edgeTable[cubeindex] & 2) {
vertlist[i+j*y+k*y*z+1] = VertexInterp(iso,g[i+j*y+k*y*z].p[1],g[i+j*y+k*y*z].p[2],g[i+j*y+k*y*z].val[1],g[i+j*y+k*y*z].val[2]);
}
if (edgeTable[cubeindex] & 4) {
vertlist[i+j*y+k*y*z+2] = VertexInterp(iso,g[i+j*y+k*y*z].p[2],g[i+j*y+k*y*z].p[3],g[i+j*y+k*y*z].val[2],g[i+j*y+k*y*z].val[3]);
}
if (edgeTable[cubeindex] & 8) {
vertlist[i+j*y+k*y*z+3] = VertexInterp(iso,g[i+j*y+k*y*z].p[3],g[i+j*y+k*y*z].p[0],g[i+j*y+k*y*z].val[3],g[i+j*y+k*y*z].val[0]);
}
if (edgeTable[cubeindex] & 16) {
vertlist[i+j*y+k*y*z+4] = VertexInterp(iso,g[i+j*y+k*y*z].p[4],g[i+j*y+k*y*z].p[5],g[i+j*y+k*y*z].val[4],g[i+j*y+k*y*z].val[5]);
}
if (edgeTable[cubeindex] & 32) {
vertlist[i+j*y+k*y*z+5] = VertexInterp(iso,g[i+j*y+k*y*z].p[5],g[i+j*y+k*y*z].p[6],g[i+j*y+k*y*z].val[5],g[i+j*y+k*y*z].val[6]);
}
if (edgeTable[cubeindex] & 64) {
vertlist[i+j*y+k*y*z+6] = VertexInterp(iso,g[i+j*y+k*y*z].p[6],g[i+j*y+k*y*z].p[7],g[i+j*y+k*y*z].val[6],g[i+j*y+k*y*z].val[7]);
}
if (edgeTable[cubeindex] & 128) {
vertlist[i+j*y+k*y*z+7] = VertexInterp(iso,g[i+j*y+k*y*z].p[7],g[i+j*y+k*y*z].p[4],g[i+j*y+k*y*z].val[7],g[i+j*y+k*y*z].val[4]);
}
if (edgeTable[cubeindex] & 256) {
vertlist[i+j*y+k*y*z+8] = VertexInterp(iso,g[i+j*y+k*y*z].p[0],g[i+j*y+k*y*z].p[4],g[i+j*y+k*y*z].val[0],g[i+j*y+k*y*z].val[4]);
}
if (edgeTable[cubeindex] & 512) {
vertlist[i+j*y+k*y*z+9] = VertexInterp(iso,g[i+j*y+k*y*z].p[1],g[i+j*y+k*y*z].p[5],g[i+j*y+k*y*z].val[1],g[i+j*y+k*y*z].val[5]);
}
if (edgeTable[cubeindex] & 1024) {
vertlist[i+j*y+k*y*z+10] = VertexInterp(iso,g[i+j*y+k*y*z].p[2],g[i+j*y+k*y*z].p[6],g[i+j*y+k*y*z].val[2],g[i+j*y+k*y*z].val[6]);
}
if (edgeTable[cubeindex] & 2048) {
vertlist[i+j*y+k*y*z+11] = VertexInterp(iso,g[i+j*y+k*y*z].p[3],g[i+j*y+k*y*z].p[7],g[i+j*y+k*y*z].val[3],g[i+j*y+k*y*z].val[7]);
}
// printf("hasta aqui llega \n");
}
}
void printGrid(string a, GRIDCELL * g, int tam)
{
cout<<a;
for(int i =0; i<tam ;i++)
for(int j=0;j<8;j++)
//printf("%f %f %f \n", g[i].p[j].x ,g[i].p[j].y,g[i].p[j].z);
printf("%f \n", g[i].val[j]);
}
int main(int argc, char *argv[])
{
int i,j,k,c;
int ***data;
FILE *fptr;
int N= (NX*NY*NZ);
cout<<N<<endl; //return 1;
int THREADS_PER_BLOCK =8;
int themin=255;
int themax=0;
int isolevel=80;
//const char* FILENAME = "mri.raw";
//assingMem(data);
//readFile(fptr,FILENAME,themin, themax,data);
// Malloc the volumetric data, hardwired size!
data = (int***)malloc(NX*sizeof(int **));
for (i=0;i<NX;i++)
data[i] = (int**)malloc(NY*sizeof(int *));
for (i=0;i<NX;i++)
for (j=0;j<NY;j++)
data[i][j] = (int*)malloc(NZ*sizeof(int));
//cout<<data[199][60][0]<<endl;
// Open and read the raw data
fprintf(stderr,"Reading data ...\n");
if ((fptr = fopen(argv[argc-1],"rb")) == NULL) {
fprintf(stderr,"File open failed\n");
exit(-1);
}
cout<<"llega"<<endl;
for (k=0;k<NZ;k++) {
for (j=0;j<NY;j++) {
for (i=0;i<NX;i++) {
if ((c = fgetc(fptr)) == EOF) {
fprintf(stderr,"Unexpected end of file\n");
exit(-1);
}
data[i][j][k] = c;
//cout<<i<<" "<<j <<" "<<k <<" data : "<<data[i][j][k]<<endl;
if (c > themax)
themax = c;
if (c < themin)
themin = c;
}
}
}
fclose(fptr);
fprintf(stderr,"Volumetric data range: %d -> %d\n",themin,themax);
int sizeGRID = N*sizeof(GRIDCELL);
cout<<"pasa"<<endl;
int sizeXYZ = N*12*sizeof(XYZ);
cout<<"sizeGRID "<<sizeGRID<<endl;
cout<<"sizeXYZ "<<sizeXYZ<<endl;
//cudaMalloc((void **)&d_b, size);
GRIDCELL * vectGrids;
GRIDCELL * d_vectGrid;
XYZ * d_points;
XYZ * points;
points = (XYZ *)malloc(sizeXYZ);
vectGrids = (GRIDCELL *)malloc(sizeGRID);
constructCubes(vectGrids,data,N);
/*
typedef struct {
double x,y,z;
} XYZ;
typedef struct {
XYZ p[8];
double val[8];
} GRIDCELL;
*/
XYZ * d_p; double * d_val;
size_t available, total;
cudaMemGetInfo(&available, &total);
cout<<"available: " << available<<" total: "<<total <<endl;
cudaMalloc((void **)&d_vectGrid, sizeGRID);
/*
for(int i=0;i<N;i++)
{
cudaMalloc((void**)&d_p,8*sizeof(XYZ));
//cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
cudaMalloc((void**)&d_val,8*sizeof(double));
//cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
cudaMemcpy(d_p,vectGrids[i].p,8*sizeof(XYZ),cudaMemcpyHostToDevice);
//for(int w=0;w<8;w++)
//{
cout<<vectGrids[i].p[w].y<<endl;
//}
//cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
cudaMemcpy(d_val,vectGrids[i].val,8*sizeof(double),cudaMemcpyHostToDevice);
//cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl
cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
cudaMemcpy(d_vectGrid[i].val, d_val, 8*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_vectGrid[i].p, d_p, 8*sizeof(XYZ),cudaMemcpyHostToDevice);
}*/
cudaMemcpy(d_vectGrid,vectGrids, sizeGRID, cudaMemcpyHostToDevice);
cout<<"termino de asignar memoria"<<endl;
XYZ * d_a, * d_sal;
GRIDCELL * d_res;
d_sal=(XYZ *)malloc(sizeXYZ);
cudaMalloc((void **)&d_res, sizeGRID);
cudaMalloc((void **)&d_a, sizeXYZ);
cudaMalloc((void **)&d_points, sizeXYZ);
//cout<<"grid "<<vectGrids<<endl;
//cout<<"point "<<points<<endl;
//fillMatrix(points, N);
printMatrix("imprimiendo pruevba",points, 10);
cudaMemcpy(d_points, points, sizeXYZ, cudaMemcpyHostToDevice);
cout<<"grid "<<d_vectGrid<<endl;
cout<<"pointsssss "<<d_points<<endl;
//printf("dir %d \n",*d_points);
cout<<"separa memoria sin problemas"<<endl;
//printGrid("imprimiendo Grid inicial en Host \n ",vectGrids,N);
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
int x = NX; int y = NY ; int z = NZ;
int blockX= (NX + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
int blockY= (NY + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
int blockZ= (NZ + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
cout<<"blocks : "<<blockX<<" threds: "<<THREADS_PER_BLOCK<<endl;
cout<<"blocks : "<<blockY<<" threds: "<<THREADS_PER_BLOCK<<endl;
cout<<"blocks : "<<blockZ<<" threds: "<<THREADS_PER_BLOCK<<endl;
//int blocks= (10 + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
/*cout<<"blocks : \n"<<blocks<<"\n threds: \n "<<THREADS_PER_BLOCK<<endl; */
dim3 dimGrid(blockX, blockY, blockZ);
dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, THREADS_PER_BLOCK);
cudaEventRecord(start,0);
isolevel=10;
//copyGRID1<<<dimGrid,dimBlock>>>(d_res,d_vectGrid,x,y,z);
PolygoniseCube<<<dimGrid,dimBlock>>>(d_points,d_vectGrid,isolevel,x,y,z);
//PolygoniseCube<<<blocks,THREADS_PER_BLOCK>>>(d_points,d_vectGrids,isolevel);
//matrixAdition<<<blocks,THREADS_PER_BLOCK>>>(d_a, d_points,10);
//matrixAditionCol<<<blocks2,THREADS_PER_BLOCK>>>( d_c, d_a, d_b,N);
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
cudaMemcpy(points,d_points, sizeXYZ, cudaMemcpyDeviceToHost);
//GRIDCELL * res;
//res = (GRIDCELL *)malloc(sizeGRID);
//cudaMemcpy(res,d_vectGrid, sizeGRID, cudaMemcpyDeviceToHost);
//printGrid("imprimiendo Grid final despues de la copia \n ",res,N);
//printMatrix("Printing Matrix A \n",points,N);
/*/printMatrix("Printing Matrix B \n",b,N);
//printMatrix("Printing Matrix C \n",c,N);
*/
free(points); free(vectGrids);
cudaFree(d_points); cudaFree(d_vectGrid);
return 0;
} |
e6e84aea09b44bbdcd9daca9463b3cb3da2a9af8.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include<stdlib.h>
#include<stdio.h>
#define min(a,b) (a<b?a:b)
const int threadsPerBlock = 256;
int blocksPerGrid = 32;
__global__ void reduce(float *data, float *output, int N){
__shared__ float scratch[threadsPerBlock];
int global_index = threadIdx.x + blockIdx.x * blockDim.x;
int local_index = threadIdx.x;
//if(global_index >= N) return;
float soma_bloco = 0;
while (global_index < N) {
soma_bloco += data[global_index];
global_index += blockDim.x * gridDim.x;
}
scratch[local_index] = soma_bloco;
__syncthreads();
//Reduo paralela
int i = blockDim.x/2;
while (i != 0) {
if (local_index < i && local_index+i < N )
scratch[local_index] += scratch[local_index + i];
__syncthreads();
i /= 2;
}
if (local_index == 0)
output[blockIdx.x] = scratch[0];
}
float soma_seq(float *x, int tamanho){
int i;
float sum=0;
for(i=0;i<tamanho;i++) {
sum+=x[i];
}
return sum;
}
int main(int argc, char * argv[])
{
const int N = atoi(argv[1]);
blocksPerGrid = min(32, (N+threadsPerBlock-1) / threadsPerBlock);
float *a, *b;
int size = N;
float *dev_a, *dev_b; int *dev_size;
a = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
hipMalloc( (void**)&dev_a, N * sizeof(float) );
hipMalloc( (void**)&dev_b, N * sizeof(float) );
hipMalloc( (void**)&dev_size, sizeof(int) );
for (int i=0; i<N; i++ ){
a[i]= 1.0f; b[i] = 0;
}
float valor_seq = soma_seq(a, N);
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMemcpy (dev_a,a, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy (dev_b,b, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy (dev_size, &size, 1*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( reduce), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a, dev_b, size);
hipMemcpy(b, dev_b, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost);
float soma = 0;
//Segundo estgio da reduo
for (int i=0; i<blocksPerGrid; i++) {
soma += b[i];
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("%.10f\n", time);
if(soma != valor_seq)
printf("Soma incorreta\n");
//printf("Soma: %f\n", soma);
hipFree(dev_a);
hipFree(dev_b);
return 0;
}
| e6e84aea09b44bbdcd9daca9463b3cb3da2a9af8.cu | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include<stdlib.h>
#include<stdio.h>
#define min(a,b) (a<b?a:b)
const int threadsPerBlock = 256;
int blocksPerGrid = 32;
__global__ void reduce(float *data, float *output, int N){
__shared__ float scratch[threadsPerBlock];
int global_index = threadIdx.x + blockIdx.x * blockDim.x;
int local_index = threadIdx.x;
//if(global_index >= N) return;
float soma_bloco = 0;
while (global_index < N) {
soma_bloco += data[global_index];
global_index += blockDim.x * gridDim.x;
}
scratch[local_index] = soma_bloco;
__syncthreads();
//Redução paralela
int i = blockDim.x/2;
while (i != 0) {
if (local_index < i && local_index+i < N )
scratch[local_index] += scratch[local_index + i];
__syncthreads();
i /= 2;
}
if (local_index == 0)
output[blockIdx.x] = scratch[0];
}
float soma_seq(float *x, int tamanho){
int i;
float sum=0;
for(i=0;i<tamanho;i++) {
sum+=x[i];
}
return sum;
}
int main(int argc, char * argv[])
{
const int N = atoi(argv[1]);
blocksPerGrid = min(32, (N+threadsPerBlock-1) / threadsPerBlock);
float *a, *b;
int size = N;
float *dev_a, *dev_b; int *dev_size;
a = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
cudaMalloc( (void**)&dev_a, N * sizeof(float) );
cudaMalloc( (void**)&dev_b, N * sizeof(float) );
cudaMalloc( (void**)&dev_size, sizeof(int) );
for (int i=0; i<N; i++ ){
a[i]= 1.0f; b[i] = 0;
}
float valor_seq = soma_seq(a, N);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy (dev_a,a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy (dev_b,b, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy (dev_size, &size, 1*sizeof(int), cudaMemcpyHostToDevice);
reduce<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, size);
cudaMemcpy(b, dev_b, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost);
float soma = 0;
//Segundo estágio da redução
for (int i=0; i<blocksPerGrid; i++) {
soma += b[i];
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("%.10f\n", time);
if(soma != valor_seq)
printf("Soma incorreta\n");
//printf("Soma: %f\n", soma);
cudaFree(dev_a);
cudaFree(dev_b);
return 0;
}
|
ca5ad315162efc3ebf6568ad71cb74d28c0b49b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N (2048 * 2048)
#define THREADS_PER_BLOCK 512
__global__ void add(int *a,int *b,int *c,int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < n) {
c[index] = a[index] + b[index];
}
}
int main(void) {
int *a,*b,*c;
int *d_a,*d_b,*d_c;
int size = N * sizeof(int);
hipMalloc((void **)&d_a,size);
hipMalloc((void **)&d_b,size);
hipMalloc((void **)&d_c,size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
for(int i = 0; i < N;i++) {
a[i] = i+1;
b[i] = i+1;
}
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3((N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK), 0, 0, d_a,d_b,d_c,N);
hipMemcpy(c,d_c,size,hipMemcpyDeviceToHost);
printf("Hello world %d\n",c[100]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| ca5ad315162efc3ebf6568ad71cb74d28c0b49b1.cu | #include <stdio.h>
#define N (2048 * 2048)
#define THREADS_PER_BLOCK 512
__global__ void add(int *a,int *b,int *c,int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < n) {
c[index] = a[index] + b[index];
}
}
int main(void) {
int *a,*b,*c;
int *d_a,*d_b,*d_c;
int size = N * sizeof(int);
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_b,size);
cudaMalloc((void **)&d_c,size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
for(int i = 0; i < N;i++) {
a[i] = i+1;
b[i] = i+1;
}
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
add<<<(N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a,d_b,d_c,N);
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("Hello world %d\n",c[100]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
7963a5509603f63d82ab56b32a8a0836ab2b0bc0.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#ifdef BAMBOO_PROFILING
#include "record_data.hip"
#endif
#define MAX_THREADS_PER_BLOCK 512
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include <kernel.cu>
#include <kernel2.cu>
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int* d_cost;
hipMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes);
hipMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
hipMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
bambooLogKernelBegin(k);
hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);;
k++;
bambooLogRecordOff();
hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ;
}
while(stop);
bambooLogKernelEnd();
printf("Kernel Executed %d times\n",k);
// copy result from device to host
hipMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
hipFree(d_graph_nodes);
hipFree(d_graph_edges);
hipFree(d_graph_mask);
hipFree(d_updating_graph_mask);
hipFree(d_graph_visited);
hipFree(d_cost);
}
| 7963a5509603f63d82ab56b32a8a0836ab2b0bc0.cu | /***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#ifdef BAMBOO_PROFILING
#include "record_data.cu"
#endif
#define MAX_THREADS_PER_BLOCK 512
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include <kernel.cu>
#include <kernel2.cu>
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int* d_cost;
cudaMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes);
cudaMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
cudaMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
bambooLogKernelBegin(k);
cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice);
Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);;
k++;
bambooLogRecordOff();
Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
}
while(stop);
bambooLogKernelEnd();
printf("Kernel Executed %d times\n",k);
// copy result from device to host
cudaMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_graph_edges);
cudaFree(d_graph_mask);
cudaFree(d_updating_graph_mask);
cudaFree(d_graph_visited);
cudaFree(d_cost);
}
|
373eb54f6c1618834387b1039c9589f352f562b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************
Copyright 2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
__device__ __forceinline__
float4 convert_float4(uchar4 v) {
float4 res;
res.x = (float) v.x;
res.y = (float) v.y;
res.z = (float) v.z;
res.w = (float) v.w;
return res;
}
__device__ __forceinline__
uchar4 convert_uchar4_sat(float4 v) {
uchar4 res;
res.x = (unsigned char) ((v.x > 255.f) ? 255.f : (v.x < 0.f ? 0.f : v.x));
res.y = (unsigned char) ((v.y > 255.f) ? 255.f : (v.y < 0.f ? 0.f : v.y));
res.z = (unsigned char) ((v.z > 255.f) ? 255.f : (v.z < 0.f ? 0.f : v.z));
res.w = (unsigned char) ((v.w > 255.f) ? 255.f : (v.w < 0.f ? 0.f : v.w));
return res;
}
/* Generate uniform random deviation */
/* Park-Miller with Bays-Durham shuffle and added safeguards
Returns a uniform random deviate between (-FACTOR/2, FACTOR/2)
input seed should be negative */
__device__
float ran1(int idum, int *iv)
{
int j;
int k;
int iy = 0;
int tid = threadIdx.x;
for(j = NTAB; j >=0; j--) //Load the shuffle
{
k = idum / IQ;
idum = IA * (idum - k * IQ) - IR * k;
if(idum < 0)
idum += IM;
if(j < NTAB)
iv[NTAB* tid + j] = idum;
}
iy = iv[NTAB* tid];
k = idum / IQ;
idum = IA * (idum - k * IQ) - IR * k;
if(idum < 0)
idum += IM;
j = iy / NDIV;
iy = iv[NTAB * tid + j];
return (AM * iy); //AM *iy will be between 0.0 and 1.0
}
__device__ __forceinline__
float4 operator+(float4 a, float b)
{
return make_float4(a.x + b, a.y + b, a.z + b, a.w + b);
}
__global__
void noise_uniform(
const uchar4*__restrict__ inputImage,
uchar4*__restrict__ outputImage,
const int factor)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
float4 temp = convert_float4(inputImage[pos]);
/* compute average value of a pixel from its compoments */
float avg = (temp.x + temp.y + temp.z + temp.w) / 4.0f;
/* Each thread has NTAB private values */
__shared__ int iv[NTAB * GROUP_SIZE];
/* Calculate deviation from the avg value of a pixel */
float dev = ran1(-avg, iv);
dev = (dev - 0.55f) * (float)factor;
/* Saturate(clamp) the values */
outputImage[pos] = convert_uchar4_sat(temp + dev);
}
| 373eb54f6c1618834387b1039c9589f352f562b2.cu | /**********************************************************************
Copyright ©2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
• Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
• Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
__device__ __forceinline__
float4 convert_float4(uchar4 v) {
float4 res;
res.x = (float) v.x;
res.y = (float) v.y;
res.z = (float) v.z;
res.w = (float) v.w;
return res;
}
__device__ __forceinline__
uchar4 convert_uchar4_sat(float4 v) {
uchar4 res;
res.x = (unsigned char) ((v.x > 255.f) ? 255.f : (v.x < 0.f ? 0.f : v.x));
res.y = (unsigned char) ((v.y > 255.f) ? 255.f : (v.y < 0.f ? 0.f : v.y));
res.z = (unsigned char) ((v.z > 255.f) ? 255.f : (v.z < 0.f ? 0.f : v.z));
res.w = (unsigned char) ((v.w > 255.f) ? 255.f : (v.w < 0.f ? 0.f : v.w));
return res;
}
/* Generate uniform random deviation */
/* Park-Miller with Bays-Durham shuffle and added safeguards
Returns a uniform random deviate between (-FACTOR/2, FACTOR/2)
input seed should be negative */
__device__
float ran1(int idum, int *iv)
{
int j;
int k;
int iy = 0;
int tid = threadIdx.x;
for(j = NTAB; j >=0; j--) //Load the shuffle
{
k = idum / IQ;
idum = IA * (idum - k * IQ) - IR * k;
if(idum < 0)
idum += IM;
if(j < NTAB)
iv[NTAB* tid + j] = idum;
}
iy = iv[NTAB* tid];
k = idum / IQ;
idum = IA * (idum - k * IQ) - IR * k;
if(idum < 0)
idum += IM;
j = iy / NDIV;
iy = iv[NTAB * tid + j];
return (AM * iy); //AM *iy will be between 0.0 and 1.0
}
__device__ __forceinline__
float4 operator+(float4 a, float b)
{
return make_float4(a.x + b, a.y + b, a.z + b, a.w + b);
}
__global__
void noise_uniform(
const uchar4*__restrict__ inputImage,
uchar4*__restrict__ outputImage,
const int factor)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
float4 temp = convert_float4(inputImage[pos]);
/* compute average value of a pixel from its compoments */
float avg = (temp.x + temp.y + temp.z + temp.w) / 4.0f;
/* Each thread has NTAB private values */
__shared__ int iv[NTAB * GROUP_SIZE];
/* Calculate deviation from the avg value of a pixel */
float dev = ran1(-avg, iv);
dev = (dev - 0.55f) * (float)factor;
/* Saturate(clamp) the values */
outputImage[pos] = convert_uchar4_sat(temp + dev);
}
|
04351a82a781de03ed7dcc99fdd175db31f34e75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by gautam on 25/04/20.
//
#include "sql_select.cuh"
#define NUM_THREADS 512
__global__ void selectKernel(void *data, int rowSize, int *offset, int offsetSize, ColType *types, myExpr *exprs, int numRows, const int *dispCols, int numDispCols) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
void *res;
int resType = 1;
void *row;
bool flag;
for (unsigned int i = start; i < end; i++) {
if (i < numRows) {
row = (char *)data + i * rowSize;
eval(row, offset, types, exprs, res, resType);
flag = false;
if (resType == RESTYPE_INT) {
flag = *(int *) res != 0;
} else if (resType == RESTYPE_FLT) {
flag = *(float *) res != 0;
}
free(res);
if (!flag) continue;
// Condition is satisfied, write code here
// printf("Row id: %d", i);
printRowDevice(row, types, offsetSize, dispCols, numDispCols, offset);
}
}
}
__global__ void selectKernelRes(void *data, int rowSize, int *offset, int offsetSize, ColType *types, myExpr *exprs,
int numRows, void *resData, unsigned int *top) {
if (threadIdx.x == 0) {
*top = 0;
}
__syncthreads();
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
void *res;
int resType = 1;
void *row;
bool flag;
unsigned int old;
for (unsigned int i = start; i < end; i++) {
if (i < numRows) {
row = (char *)data + i * rowSize;
eval(row, offset, types, exprs, res, resType);
flag = false;
if (resType == RESTYPE_INT) {
flag = *(int *) res != 0;
} else if (resType == RESTYPE_FLT) {
flag = *(float *) res != 0;
}
free(res);
if (!flag) continue;
// Condition is satisfied, write code here
// printRowDevice(row, types, offsetSize);
old = atomicInc(top, numRows + 1);
memcpy((char *) resData + old * rowSize, row, rowSize);
}
}
}
__global__ void
joinKernel(void *left, void *right, void *join, myExpr *joinExpr, int *offset, int numCols, ColType *types, int rowSizeL, int rowSizeR, int numRowsL, int numRowsR,
unsigned int *numRowsRes, bool *matchedL) {
if (threadIdx.x == 0) {
*numRowsRes = 0;
}
__syncthreads();
int rowsPerThread = (numRowsL * numRowsR + NUM_THREADS - 1) / NUM_THREADS;
const unsigned start = rowsPerThread * threadIdx.x;
const unsigned end = rowsPerThread * (threadIdx.x + 1);
const int rowSizeRes = rowSizeL + rowSizeR;
void *res;
int resType = 0;
void *row;
bool flag;
unsigned old;
unsigned l_prev = numRowsL + 1;
unsigned l, r;
for (unsigned i = start; i < end; ++i) {
// row i in join is obtained from i / numRowsR from left and i % numRowsR in right
l = i / numRowsR;
r = i % numRowsR;
if (l >= numRowsL || r >= numRowsR) break;
// printf("[%d, %d, (%d, %d)]\n", threadIdx.x, i, l, r);
row = malloc(rowSizeRes);
memcpy(row, (char *)left + l * rowSizeL, rowSizeL);
memcpy((char *) row + rowSizeL, (char *)right + r * rowSizeR, rowSizeR);
eval(row, offset, types, joinExpr, res, resType);
flag = false;
if (resType == RESTYPE_INT) {
flag = *(int *)res != 0;
} else if (resType == RESTYPE_FLT) {
flag = *(float *)res != 0;
}
free(res);
if (!flag) continue;
if (l != l_prev) {
matchedL[l] = true;
l_prev = l;
}
old = atomicInc(numRowsRes, numRowsL * numRowsR);
memcpy((char *) join + old * rowSizeRes, row, rowSizeRes);
// printRowDevice(row, types, numCols);
}
}
__global__ void
getLeft(void *data, bool *matched, int numRows, ColType *typesNew, const int numColsOld, const int numColsNew,
const int rowSizeOld, const int rowSizeNew, void *resData, unsigned int *top) {
if (threadIdx.x == 0) {
*top = 0;
}
__syncthreads();
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
void *rowOld, *row, *cell;
row = malloc(rowSizeNew);
cell = (char *)row + rowSizeOld;
for (int j = numColsOld; j < numColsNew; ++j) {
switch (typesNew[j].type) {
case TYPE_INT: {
int *x = (int *) cell;
*x = getNullInt();
break;
}
case TYPE_FLOAT: {
float *x = (float *) cell;
*x = getNullFlt();
break;
}
case TYPE_VARCHAR: {
getNullStr((char *)cell, typesNew[j].size);
break;
}
default:
printf("Not implemented\n");
return;
}
cell = (char *) cell + typesNew[j].size;
}
unsigned int old;
for (int i = start; i < end; ++i) {
if (i >= numRows) break;
if (!matched[i]) {
rowOld = (char *) data + rowSizeOld * i;
memcpy(row, rowOld, rowSizeOld);
// printRowDevice(row, typesNew, numColsNew);
old = atomicInc(top, numRows + 1);
memcpy((char *) resData + old * rowSizeNew, row, rowSizeNew);
}
}
free(row);
}
void sql_select::execute(std::string &query) {
hsql::SQLParserResult *result = hsql::SQLParser::parseSQLString(query);
std::vector<int> columnNames;
if(!result->isValid()) {
utils::invalidQuery(result->errorMsg());
return;
}
hipDeviceReset();
const auto *stmt = (const hsql::SelectStatement *) result->getStatement(0);
Data *d = getData(stmt->fromTable);
for (hsql::Expr* expr : *stmt->selectList){
switch (expr->type) {
case hsql::kExprStar:
// columnNames.emplace_back("*");
columnNames.push_back(-1);
break;
case hsql::kExprColumnRef:
// columnNames.emplace_back(expr->name);
columnNames.push_back(d->mdata.colMap[expr->name]);
break;
// case hsql::kExprTableColumnRef:
// inprint(expr->table, expr->name, numIndent);
// break;
case hsql::kExprLiteralFloat:
// columnNames.push_back(std::to_string(expr->fval));
columnNames.push_back(-2);
break;
case hsql::kExprLiteralInt:
// columnNames.push_back(std::to_string(expr->ival));
columnNames.push_back(-2);
break;
case hsql::kExprLiteralString:
// columnNames.emplace_back(expr->name);
columnNames.push_back(-2);
break;
// TODO: kExprFunctionRef (Distinct ?), kExprOperator (col1 + col2 ?)
// case hsql::kExprFunctionRef:
// inprint(expr->name, numIndent);
// inprint(expr->expr->name, numIndent + 1);
// break;
// case hsql::kExprOperator:
// printOperatorExpression(expr, numIndent);
// break;
default:
fprintf(stderr, "Unrecognized expression type %d\n", expr->type);
return;
}
}
std::vector<myExpr> whereExpr;
if (stmt->whereClause != nullptr) {
exprToVec(stmt->whereClause, whereExpr, d->mdata.columns, *d);
} else {
whereExpr.push_back(newExpr(CONSTANT_INT, (long) 1));
}
int rowSize = d->mdata.rowSize;
void *data = malloc(d->chunkSize * rowSize);
void *data_d;
int numCols = d->mdata.columns.size();
ColType *type_d;
hipMalloc(&type_d, sizeof(ColType) * numCols);
hipMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * numCols, hipMemcpyHostToDevice);
myExpr *where_d;
hipMalloc(&where_d, sizeof(myExpr) * whereExpr.size());
hipMemcpy(where_d, &whereExpr[0], sizeof(myExpr) * whereExpr.size(), hipMemcpyHostToDevice);
std::vector<int> offsets(numCols + 1);
offsets[0] = 0;
for (int i = 1; i <= numCols; ++i) {
offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
}
int *offsets_d;
hipMalloc(&offsets_d, sizeof(int) * (numCols + 1));
hipMemcpy(offsets_d, &offsets[0], sizeof(int) * (numCols + 1), hipMemcpyHostToDevice);
int *dispCols_d;
hipMalloc(&dispCols_d, sizeof(int) * columnNames.size());
hipMemcpy(dispCols_d, &columnNames[0], sizeof(int) * columnNames.size(), hipMemcpyHostToDevice);
int rowsRead = d->read(data);
// printf("HERE____________________%d\n", rowSize);
// utils::printMultiple(data, d->mdata.datatypes, d->mdata.rowSize, d->mdata.rowCount);
hipMalloc(&data_d, d->chunkSize * rowSize);
while (rowsRead > 0) {
hipMemcpy(data_d, data, rowSize * rowsRead, hipMemcpyHostToDevice);
// hipMemcpy(data, data_d, rowSize * rowsRead, hipMemcpyDeviceToHost);
// printf("HERE____________________%d\n", rowSize);
// utils::printMultiple(data, d->mdata.datatypes, d->mdata.rowSize, d->mdata.rowCount);
hipLaunchKernelGGL(( selectKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, rowSize, offsets_d, numCols, type_d, where_d, rowsRead, dispCols_d,
columnNames.size());
rowsRead = d->read(data);
hipDeviceSynchronize();
}
d->~Data();
free(d);
free(data);
hipFree(data_d);
hipFree(type_d);
hipFree(where_d);
hipFree(offsets_d);
}
Data *sql_select::getData(hsql::TableRef *fromTable) {
switch (fromTable->type) {
case hsql::kTableName:
return new Data(fromTable->name);
case hsql::kTableSelect:
return selectData(fromTable->select);
case hsql::kTableJoin: {
Data *d, *dL, *dR;
if (fromTable->join->type != hsql::kJoinRight) {
dL = getData(fromTable->join->left);
dR = getData(fromTable->join->right);
} else {
dL = getData(fromTable->join->right);
dR = getData(fromTable->join->left);
}
d = new Data(dL, dR);
dL->chunkSize = dR->chunkSize = d->chunkSize;
std::vector<myExpr> joinCondition;
exprToVec(fromTable->join->condition, joinCondition, d->mdata.columns, *d);
myExpr *joinCondition_d;
hipMalloc(&joinCondition_d, joinCondition.size() * sizeof(myExpr));
hipMemcpy(joinCondition_d, &joinCondition[0], sizeof(myExpr) * joinCondition.size(),
hipMemcpyHostToDevice);
std::vector<int> offsets(d->mdata.columns.size() + 1);
offsets[0] = 0;
for (int i = 1; i <= d->mdata.columns.size(); ++i) {
offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
}
int *offsets_d;
hipMalloc(&offsets_d, sizeof(int) * (d->mdata.columns.size() + 1));
hipMemcpy(offsets_d, &offsets[0], sizeof(int) * (d->mdata.columns.size() + 1), hipMemcpyHostToDevice);
ColType *type_d;
hipMalloc(&type_d, sizeof(ColType) * d->mdata.columns.size());
hipMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * d->mdata.columns.size(),
hipMemcpyHostToDevice);
void *join = malloc(d->chunkSize * d->chunkSize * d->mdata.rowSize);
void *dataL = malloc(d->chunkSize * dL->mdata.rowSize), *dataL_d;
void *dataR = malloc(d->chunkSize * dR->mdata.rowSize), *dataR_d;
void *join_d; // Upto n^2 rows can be stored
hipMalloc(&join_d, d->chunkSize * d->chunkSize * d->mdata.rowSize);
hipMalloc(&dataL_d, dL->chunkSize * dL->mdata.rowSize);
hipMalloc(&dataR_d, dR->chunkSize * dR->mdata.rowSize);
unsigned int numRowsJoin = 0;
unsigned int *numRowsJoin_d;
hipMalloc(&numRowsJoin_d, sizeof(unsigned int));
std::vector<myExpr> whereClause;
int rowsReadL = dL->read(dataL), rowsReadR;
hipMemcpy(dataL_d, dataL, rowsReadL * dL->mdata.rowSize, hipMemcpyHostToDevice);
bool *matched_d;
hipMalloc(&matched_d, sizeof(bool) * dL->chunkSize);
while (rowsReadL > 0) {
hipMemset(matched_d, 0, sizeof(bool) * d->chunkSize);
dR->restartRead();
rowsReadR = dR->read(dataR);
hipMemcpy(dataR_d, dataR, rowsReadR * dR->mdata.rowSize, hipMemcpyHostToDevice);
while (rowsReadR > 0) {
hipLaunchKernelGGL(( joinKernel), dim3(1), dim3(NUM_THREADS), 0, 0, dataL_d, dataR_d, join_d, joinCondition_d, offsets_d,
d->mdata.columns.size(),
type_d, dL->mdata.rowSize, dR->mdata.rowSize, rowsReadL, rowsReadR,
numRowsJoin_d, matched_d);
rowsReadR = dR->read(dataR);
hipDeviceSynchronize();
hipMemcpy(&numRowsJoin, numRowsJoin_d, sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(join, join_d, numRowsJoin * d->mdata.rowSize, hipMemcpyDeviceToHost);
d->write(join, numRowsJoin * d->mdata.rowSize);
fflush(stdout);
//hipLaunchKernelGGL(( selectKernel), dim3(1), dim3(NUM_THREADS), 0, 0, join_d, d->mdata.rowSize, offsets_d, d->mdata.columns.size(),
// type_d, whereClause_d, numRowsJoin);
hipDeviceSynchronize();
hipMemcpy(dataR_d, dataR, rowsReadR * dR->mdata.rowSize, hipMemcpyHostToDevice);
}
if (fromTable->join->type == hsql::kJoinLeft || fromTable->join->type == hsql::kJoinRight) {
hipLaunchKernelGGL(( getLeft), dim3(1), dim3(NUM_THREADS), 0, 0, dataL_d, matched_d, rowsReadL, type_d, dL->mdata.columns.size(),
d->mdata.columns.size(), dL->mdata.rowSize, d->mdata.rowSize, join_d,
numRowsJoin_d);
}
rowsReadL = dL->read(dataL);
hipDeviceSynchronize();
if (fromTable->join->type == hsql::kJoinLeft || fromTable->join->type == hsql::kJoinRight) {
hipMemcpy(&numRowsJoin, numRowsJoin_d, sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(join, join_d, numRowsJoin * d->mdata.rowSize, hipMemcpyDeviceToHost);
d->write(join, numRowsJoin * d->mdata.rowSize);
}
hipMemcpy(dataL_d, dataL, rowsReadL * dL->mdata.rowSize, hipMemcpyHostToDevice);
}
dL->~Data();
dR->~Data();
free(dL);
free(dR);
free(dataL);
free(dataR);
free(join);
hipFree(dataL_d);
hipFree(dataR_d);
hipFree(join_d);
hipFree(joinCondition_d);
hipFree(offsets_d);
hipFree(type_d);
hipFree(numRowsJoin_d);
hipFree(matched_d);
d->switchToRead();
return d;
}
case hsql::kTableCrossProduct:
utils::invalidQuery("Cross Product is not Implemented.");
return nullptr;
}
return nullptr;
}
Data *sql_select::selectData(hsql::SelectStatement *stmt) {
std::vector<std::string> columnNames;
for (hsql::Expr* expr : *stmt->selectList){
switch (expr->type) {
case hsql::kExprStar:
columnNames.emplace_back("*");
break;
case hsql::kExprColumnRef:
columnNames.emplace_back(expr->name);
break;
// case hsql::kExprTableColumnRef:
// inprint(expr->table, expr->name, numIndent);
// break;
case hsql::kExprLiteralFloat:
columnNames.push_back(std::to_string(expr->fval));
break;
case hsql::kExprLiteralInt:
columnNames.push_back(std::to_string(expr->ival));
break;
case hsql::kExprLiteralString:
columnNames.emplace_back(expr->name);
break;
// TODO: kExprFunctionRef (Distinct ?), kExprOperator (col1 + col2 ?)
// case hsql::kExprFunctionRef:
// inprint(expr->name, numIndent);
// inprint(expr->expr->name, numIndent + 1);
// break;
// case hsql::kExprOperator:
// printOperatorExpression(expr, numIndent);
// break;
default:
fprintf(stderr, "Unrecognized expression type %d\n", expr->type);
return nullptr;
}
}
Data *d = getData(stmt->fromTable);
Data *result = new Data(d);
std::vector<myExpr> whereExpr;
if (stmt->whereClause != nullptr) {
exprToVec(stmt->whereClause, whereExpr, d->mdata.columns, *d);
} else {
whereExpr.push_back(newExpr(CONSTANT_INT, (long) 1));
}
int rowSize = d->mdata.rowSize;
void *data = malloc(d->chunkSize * rowSize);
void *resData = malloc(d->chunkSize * rowSize);
void *data_d, *resData_d;
int numCols = d->mdata.columns.size();
ColType *type_d;
hipMalloc(&type_d, sizeof(ColType) * numCols);
hipMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * numCols, hipMemcpyHostToDevice);
myExpr *where_d;
hipMalloc(&where_d, sizeof(myExpr) * whereExpr.size());
hipMemcpy(where_d, &whereExpr[0], sizeof(myExpr) * whereExpr.size(), hipMemcpyHostToDevice);
std::vector<int> offsets(numCols + 1);
offsets[0] = 0;
for (int i = 1; i <= numCols; ++i) {
offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
}
int *offsets_d;
hipMalloc(&offsets_d, sizeof(int) * (numCols + 1));
hipMemcpy(offsets_d, &offsets[0], sizeof(int) * (numCols + 1), hipMemcpyHostToDevice);
int rowsRead = d->read(data);
hipMalloc(&data_d, d->chunkSize * rowSize);
hipMalloc(&resData_d, d->chunkSize * rowSize);
unsigned int numMatches, *numMatches_d;
hipMalloc(&numMatches_d, sizeof(unsigned int));
while (rowsRead > 0) {
hipMemcpy(data_d, data, rowSize * rowsRead, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( selectKernelRes), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, rowSize, offsets_d, numCols, type_d, where_d, rowsRead, resData_d,
numMatches_d);
rowsRead = d->read(data);
hipDeviceSynchronize();
hipMemcpy(&numMatches, numMatches_d, sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(resData, resData_d, rowSize * numMatches, hipMemcpyDeviceToHost);
result->write(resData, rowSize * numMatches);
// utils::printMultiple(resData, d->mdata.datatypes, rowSize, numMatches);
}
d->~Data();
free(d);
free(data);
free(resData);
hipFree(data_d);
hipFree(resData_d);
hipFree(type_d);
hipFree(where_d);
hipFree(offsets_d);
result->switchToRead();
result->restartRead();
return result;
}
// void sql_select::execute(std::string &query) {
//
// hsql::SQLParserResult *result;
// std::vector<std::string> columnNames;
//
// result = hsql::SQLParser::parseSQLString(query);
// columnNames = std::vector<std::string>();
//
// if(result->isValid()){
// const auto *stmt = (const hsql::SelectStatement *) result->getStatement(0);
// // Get column names
// for (hsql::Expr* expr : *stmt->selectList){
// switch (expr->type) {
// case hsql::kExprStar:
// columnNames.emplace_back("*");
// break;
// case hsql::kExprColumnRef:
// columnNames.emplace_back(expr->name);
// break;
// // case hsql::kExprTableColumnRef:
// // inprint(expr->table, expr->name, numIndent);
// // break;
// case hsql::kExprLiteralFloat:
// columnNames.push_back(std::to_string(expr->fval));
// break;
// case hsql::kExprLiteralInt:
// columnNames.push_back(std::to_string(expr->ival));
// break;
// case hsql::kExprLiteralString:
// columnNames.emplace_back(expr->name);
// break;
// // TODO: kExprFunctionRef (Distinct ?), kExprOperator (col1 + col2 ?)
// // case hsql::kExprFunctionRef:
// // inprint(expr->name, numIndent);
// // inprint(expr->expr->name, numIndent + 1);
// // break;
// // case hsql::kExprOperator:
// // printOperatorExpression(expr, numIndent);
// // break;
// default:
// fprintf(stderr, "Unrecognized expression type %d\n", expr->type);
// return;
// }
// }
// // Get tables reference
// auto table = stmt->fromTable;
// Data *d;
// switch (table->type) {
// case hsql::kTableName:
// // inprint(table->name, numIndent);
// d = new Data(table->name);
// break;
// // case hsql::kTableSelect:
// // // printSelectStatementInfo(table->select, numIndent);
// // break;
// case hsql::kTableJoin: {
// std::string leftTable, rightTable;
// if (table->join->type != hsql::kJoinRight) {
// d = new Data(table->join->left->name, table->join->right->name);
// leftTable = table->join->left->name;
// rightTable = table->join->right->name;
// } else {
// d = new Data(table->join->right->name, table->join->left->name);
// leftTable = table->join->right->name;
// rightTable = table->join->left->name;
// }
// Data dL(leftTable);
// dL.chunkSize = d->chunkSize;
// Data dR(rightTable);
// dR.chunkSize = d->chunkSize;
//
// std::vector<myExpr> joinCondition;
// exprToVec(table->join->condition, joinCondition, d->mdata.columns, *d);
// myExpr *joinCondition_d;
// hipMalloc(&joinCondition_d, joinCondition.size() * sizeof(myExpr));
// hipMemcpy(joinCondition_d, &joinCondition[0], sizeof(myExpr) * joinCondition.size(),
// hipMemcpyHostToDevice);
//
// std::vector<int> offsets(d->mdata.columns.size() + 1);
// offsets[0] = 0;
// for (int i = 1; i <= d->mdata.columns.size(); ++i) {
// offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
// }
// int *offsets_d;
// hipMalloc(&offsets_d, sizeof(int) * (d->mdata.columns.size() + 1));
// hipMemcpy(offsets_d, &offsets[0], sizeof(int) * (d->mdata.columns.size() + 1), hipMemcpyHostToDevice);
//
// ColType *type_d;
// hipMalloc(&type_d, sizeof(ColType) * d->mdata.columns.size());
// hipMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * d->mdata.columns.size(),
// hipMemcpyHostToDevice);
//
// void *join = malloc(d->chunkSize * d->chunkSize * d->mdata.rowSize);
// void *dataL = malloc(d->chunkSize * dL.mdata.rowSize), *dataL_d;
// void *dataR = malloc(d->chunkSize * dR.mdata.rowSize), *dataR_d;
// void *join_d; // Upto n^2 rows can be stored
// hipMalloc(&join_d, d->chunkSize * d->chunkSize * d->mdata.rowSize);
// hipMalloc(&dataL_d, dL.chunkSize * dL.mdata.rowSize);
// hipMalloc(&dataR_d, dR.chunkSize * dR.mdata.rowSize);
// unsigned int numRowsJoin = 0;
// unsigned int *numRowsJoin_d;
// hipMalloc(&numRowsJoin_d, sizeof(unsigned int));
//
// std::vector<myExpr> whereClause;
//
// int rowsReadL = dL.read(dataL), rowsReadR;
// hipMemcpy(dataL_d, dataL, rowsReadL * dL.mdata.rowSize, hipMemcpyHostToDevice);
//
// bool *matched_d;
// hipMalloc(&matched_d, sizeof(bool) * dL.chunkSize);
// while (rowsReadL > 0) {
// hipMemset(matched_d, 0, sizeof(bool) * dL.chunkSize);
// dR.restartRead();
// rowsReadR = dR.read(dataR);
// hipMemcpy(dataR_d, dataR, rowsReadR * dR.mdata.rowSize, hipMemcpyHostToDevice);
// while (rowsReadR > 0) {
// hipLaunchKernelGGL(( joinKernel), dim3(1), dim3(NUM_THREADS), 0, 0, dataL_d, dataR_d, join_d, joinCondition_d, offsets_d,
// d->mdata.columns.size(),
// type_d, dL.mdata.rowSize, dR.mdata.rowSize, rowsReadL, rowsReadR,
// numRowsJoin_d, matched_d);
// rowsReadR = dR.read(dataR);
// hipDeviceSynchronize();
//
// hipMemcpy(&numRowsJoin, numRowsJoin_d, sizeof(unsigned int), hipMemcpyDeviceToHost);
// hipMemcpy(join, join_d, numRowsJoin * d->mdata.rowSize, hipMemcpyDeviceToHost);
// d->write(join, numRowsJoin * d->mdata.rowSize);
// fflush(stdout);
// //hipLaunchKernelGGL(( selectKernel), dim3(1), dim3(NUM_THREADS), 0, 0, join_d, d->mdata.rowSize, offsets_d, d->mdata.columns.size(),
// // type_d, whereClause_d, numRowsJoin);
// hipDeviceSynchronize();
// hipMemcpy(dataR_d, dataR, rowsReadR * dR.mdata.rowSize, hipMemcpyHostToDevice);
// }
// if (table->join->type == hsql::kJoinLeft || table->join->type == hsql::kJoinRight) {
// hipLaunchKernelGGL(( getLeft), dim3(1), dim3(NUM_THREADS), 0, 0, dataL_d, matched_d, rowsReadL, type_d, dL.mdata.columns.size(),
// d->mdata.columns.size(), dL.mdata.rowSize, d->mdata.rowSize);
//
// }
// rowsReadL = dL.read(dataL);
// hipDeviceSynchronize();
// hipMemcpy(dataL_d, dataL, rowsReadL * dL.mdata.rowSize, hipMemcpyHostToDevice);
// }
//
// myExpr *whereClause_d;
// if (stmt->whereClause != nullptr) {
// exprToVec(stmt->whereClause, whereClause, d->mdata.columns, *d);
// hipMalloc(&whereClause_d, sizeof(myExpr) * whereClause.size());
// hipMemcpy(whereClause_d, &whereClause[0], sizeof(myExpr) * whereClause.size(),
// hipMemcpyHostToDevice);
// }
//
// // change chunk size before select
// // d->chunkSize = 500 * 1024 * 1024 / d->mdata.rowSize;
// // if chunksize is changed, join and join_d might need to be reallocated
// d->chunkSize *= d->chunkSize;
//
// // printf("____________________________________________________\n");
// d->switchToRead();
// int numRowsRead;
// numRowsRead = d->read(join);
// while (numRowsRead > 0) {
// hipMemcpy(join_d, join, numRowsRead * d->mdata.rowSize, hipMemcpyHostToDevice);
// hipLaunchKernelGGL(( selectKernel), dim3(1), dim3(NUM_THREADS), 0, 0, join_d, d->mdata.rowSize, offsets_d, d->mdata.columns.size(),
// type_d, whereClause_d, numRowsRead);
// hipDeviceSynchronize();
// numRowsRead = d->read(join);
// }
//
// d->~Data();
// free(d);
// free(dataL);
// free(dataR);
//
// hipFree(dataL_d);
// hipFree(dataR_d);
// hipFree(join_d);
// hipFree(joinCondition_d);
// hipFree(offsets_d);
// hipFree(type_d);
// hipFree(numRowsJoin_d);
// hipFree(whereClause_d);
// hipFree(offsets_d);
// hipFree(matched_d);
// return;
// break;
// }
// // case hsql::kTableCrossProduct:
// // // for (TableRef* tbl : *table->list) printTableRefInfo(tbl, numIndent);
// // break;
// default:
// printf("Will be handled later\n");
// return;
// }
// if (stmt->whereClause != nullptr) {
// // Get where
// std::vector<myExpr> tree;
//
// auto expr = stmt->whereClause;
// exprToVec(expr, tree, d->mdata.columns, *d);
// free(expr);
//
// int rowSize = d->mdata.rowSize;
// void *data = malloc(d->chunkSize * rowSize);
// void *data_d;
// int numCols = d->mdata.columns.size();
// hipSetDevice(0);
// hipDeviceReset();
//
// ColType *type_d;
// hipMalloc(&type_d, sizeof(ColType) * numCols);
// hipMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * numCols, hipMemcpyHostToDevice);
// myExpr *where_d;
// hipMalloc(&where_d, sizeof(myExpr) * tree.size());
// hipMemcpy(where_d, &tree[0], sizeof(myExpr) * tree.size(), hipMemcpyHostToDevice);
// int *offsets = (int *) malloc(sizeof(int) * (numCols + 1));
// offsets[0] = 0;
// for (int i = 1; i <= numCols; i++) {
// offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
// }
// int *offsets_d;
// hipMalloc(&offsets_d, sizeof(int) * (numCols + 1));
// hipMemcpy(offsets_d, offsets, sizeof(int) * (numCols + 1), hipMemcpyHostToDevice);
// int numRows = d->read(data);
//
// // printing data in table
// // utils::printMultiple(data, d.mdata.datatypes, d.mdata.rowSize, d.mdata.rowCount);
//
// hipMalloc(&data_d, d->chunkSize * rowSize);
// while (numRows > 0) {
// hipMemcpy(data_d, data, rowSize * numRows, hipMemcpyHostToDevice);
// hipLaunchKernelGGL(( selectKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, rowSize, offsets_d, numCols, type_d, where_d, numRows);
// numRows = d->read(data);
// hipDeviceSynchronize();
// }
//
// // Free all the data
// d->~Data();
// free(d);
// free(data);
// free(offsets);
// hipFree(data_d);
// hipFree(type_d);
// hipFree(where_d);
// hipFree(offsets_d);
// } else {
// // RETURN ALL ROWS
// }
// } else {
// fprintf(stderr, "Given string is not a valid SQL query.\n");
// fprintf(stderr, "%s (L%d:%d)\n",
// result->errorMsg(),
// result->errorLine(),
// result->errorColumn());
// }
// free(result);
// } | 04351a82a781de03ed7dcc99fdd175db31f34e75.cu | //
// Created by gautam on 25/04/20.
//
#include "sql_select.cuh"
#define NUM_THREADS 512
__global__ void selectKernel(void *data, int rowSize, int *offset, int offsetSize, ColType *types, myExpr *exprs, int numRows, const int *dispCols, int numDispCols) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
void *res;
int resType = 1;
void *row;
bool flag;
for (unsigned int i = start; i < end; i++) {
if (i < numRows) {
row = (char *)data + i * rowSize;
eval(row, offset, types, exprs, res, resType);
flag = false;
if (resType == RESTYPE_INT) {
flag = *(int *) res != 0;
} else if (resType == RESTYPE_FLT) {
flag = *(float *) res != 0;
}
free(res);
if (!flag) continue;
// Condition is satisfied, write code here
// printf("Row id: %d", i);
printRowDevice(row, types, offsetSize, dispCols, numDispCols, offset);
}
}
}
__global__ void selectKernelRes(void *data, int rowSize, int *offset, int offsetSize, ColType *types, myExpr *exprs,
int numRows, void *resData, unsigned int *top) {
if (threadIdx.x == 0) {
*top = 0;
}
__syncthreads();
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
void *res;
int resType = 1;
void *row;
bool flag;
unsigned int old;
for (unsigned int i = start; i < end; i++) {
if (i < numRows) {
row = (char *)data + i * rowSize;
eval(row, offset, types, exprs, res, resType);
flag = false;
if (resType == RESTYPE_INT) {
flag = *(int *) res != 0;
} else if (resType == RESTYPE_FLT) {
flag = *(float *) res != 0;
}
free(res);
if (!flag) continue;
// Condition is satisfied, write code here
// printRowDevice(row, types, offsetSize);
old = atomicInc(top, numRows + 1);
memcpy((char *) resData + old * rowSize, row, rowSize);
}
}
}
__global__ void
joinKernel(void *left, void *right, void *join, myExpr *joinExpr, int *offset, int numCols, ColType *types, int rowSizeL, int rowSizeR, int numRowsL, int numRowsR,
unsigned int *numRowsRes, bool *matchedL) {
if (threadIdx.x == 0) {
*numRowsRes = 0;
}
__syncthreads();
int rowsPerThread = (numRowsL * numRowsR + NUM_THREADS - 1) / NUM_THREADS;
const unsigned start = rowsPerThread * threadIdx.x;
const unsigned end = rowsPerThread * (threadIdx.x + 1);
const int rowSizeRes = rowSizeL + rowSizeR;
void *res;
int resType = 0;
void *row;
bool flag;
unsigned old;
unsigned l_prev = numRowsL + 1;
unsigned l, r;
for (unsigned i = start; i < end; ++i) {
// row i in join is obtained from i / numRowsR from left and i % numRowsR in right
l = i / numRowsR;
r = i % numRowsR;
if (l >= numRowsL || r >= numRowsR) break;
// printf("[%d, %d, (%d, %d)]\n", threadIdx.x, i, l, r);
row = malloc(rowSizeRes);
memcpy(row, (char *)left + l * rowSizeL, rowSizeL);
memcpy((char *) row + rowSizeL, (char *)right + r * rowSizeR, rowSizeR);
eval(row, offset, types, joinExpr, res, resType);
flag = false;
if (resType == RESTYPE_INT) {
flag = *(int *)res != 0;
} else if (resType == RESTYPE_FLT) {
flag = *(float *)res != 0;
}
free(res);
if (!flag) continue;
if (l != l_prev) {
matchedL[l] = true;
l_prev = l;
}
old = atomicInc(numRowsRes, numRowsL * numRowsR);
memcpy((char *) join + old * rowSizeRes, row, rowSizeRes);
// printRowDevice(row, types, numCols);
}
}
__global__ void
getLeft(void *data, bool *matched, int numRows, ColType *typesNew, const int numColsOld, const int numColsNew,
const int rowSizeOld, const int rowSizeNew, void *resData, unsigned int *top) {
if (threadIdx.x == 0) {
*top = 0;
}
__syncthreads();
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
void *rowOld, *row, *cell;
row = malloc(rowSizeNew);
cell = (char *)row + rowSizeOld;
for (int j = numColsOld; j < numColsNew; ++j) {
switch (typesNew[j].type) {
case TYPE_INT: {
int *x = (int *) cell;
*x = getNullInt();
break;
}
case TYPE_FLOAT: {
float *x = (float *) cell;
*x = getNullFlt();
break;
}
case TYPE_VARCHAR: {
getNullStr((char *)cell, typesNew[j].size);
break;
}
default:
printf("Not implemented\n");
return;
}
cell = (char *) cell + typesNew[j].size;
}
unsigned int old;
for (int i = start; i < end; ++i) {
if (i >= numRows) break;
if (!matched[i]) {
rowOld = (char *) data + rowSizeOld * i;
memcpy(row, rowOld, rowSizeOld);
// printRowDevice(row, typesNew, numColsNew);
old = atomicInc(top, numRows + 1);
memcpy((char *) resData + old * rowSizeNew, row, rowSizeNew);
}
}
free(row);
}
void sql_select::execute(std::string &query) {
hsql::SQLParserResult *result = hsql::SQLParser::parseSQLString(query);
std::vector<int> columnNames;
if(!result->isValid()) {
utils::invalidQuery(result->errorMsg());
return;
}
cudaDeviceReset();
const auto *stmt = (const hsql::SelectStatement *) result->getStatement(0);
Data *d = getData(stmt->fromTable);
for (hsql::Expr* expr : *stmt->selectList){
switch (expr->type) {
case hsql::kExprStar:
// columnNames.emplace_back("*");
columnNames.push_back(-1);
break;
case hsql::kExprColumnRef:
// columnNames.emplace_back(expr->name);
columnNames.push_back(d->mdata.colMap[expr->name]);
break;
// case hsql::kExprTableColumnRef:
// inprint(expr->table, expr->name, numIndent);
// break;
case hsql::kExprLiteralFloat:
// columnNames.push_back(std::to_string(expr->fval));
columnNames.push_back(-2);
break;
case hsql::kExprLiteralInt:
// columnNames.push_back(std::to_string(expr->ival));
columnNames.push_back(-2);
break;
case hsql::kExprLiteralString:
// columnNames.emplace_back(expr->name);
columnNames.push_back(-2);
break;
// TODO: kExprFunctionRef (Distinct ?), kExprOperator (col1 + col2 ?)
// case hsql::kExprFunctionRef:
// inprint(expr->name, numIndent);
// inprint(expr->expr->name, numIndent + 1);
// break;
// case hsql::kExprOperator:
// printOperatorExpression(expr, numIndent);
// break;
default:
fprintf(stderr, "Unrecognized expression type %d\n", expr->type);
return;
}
}
std::vector<myExpr> whereExpr;
if (stmt->whereClause != nullptr) {
exprToVec(stmt->whereClause, whereExpr, d->mdata.columns, *d);
} else {
whereExpr.push_back(newExpr(CONSTANT_INT, (long) 1));
}
int rowSize = d->mdata.rowSize;
void *data = malloc(d->chunkSize * rowSize);
void *data_d;
int numCols = d->mdata.columns.size();
ColType *type_d;
cudaMalloc(&type_d, sizeof(ColType) * numCols);
cudaMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * numCols, cudaMemcpyHostToDevice);
myExpr *where_d;
cudaMalloc(&where_d, sizeof(myExpr) * whereExpr.size());
cudaMemcpy(where_d, &whereExpr[0], sizeof(myExpr) * whereExpr.size(), cudaMemcpyHostToDevice);
std::vector<int> offsets(numCols + 1);
offsets[0] = 0;
for (int i = 1; i <= numCols; ++i) {
offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
}
int *offsets_d;
cudaMalloc(&offsets_d, sizeof(int) * (numCols + 1));
cudaMemcpy(offsets_d, &offsets[0], sizeof(int) * (numCols + 1), cudaMemcpyHostToDevice);
int *dispCols_d;
cudaMalloc(&dispCols_d, sizeof(int) * columnNames.size());
cudaMemcpy(dispCols_d, &columnNames[0], sizeof(int) * columnNames.size(), cudaMemcpyHostToDevice);
int rowsRead = d->read(data);
// printf("HERE____________________%d\n", rowSize);
// utils::printMultiple(data, d->mdata.datatypes, d->mdata.rowSize, d->mdata.rowCount);
cudaMalloc(&data_d, d->chunkSize * rowSize);
while (rowsRead > 0) {
cudaMemcpy(data_d, data, rowSize * rowsRead, cudaMemcpyHostToDevice);
// cudaMemcpy(data, data_d, rowSize * rowsRead, cudaMemcpyDeviceToHost);
// printf("HERE____________________%d\n", rowSize);
// utils::printMultiple(data, d->mdata.datatypes, d->mdata.rowSize, d->mdata.rowCount);
selectKernel<<<1, NUM_THREADS>>>(data_d, rowSize, offsets_d, numCols, type_d, where_d, rowsRead, dispCols_d,
columnNames.size());
rowsRead = d->read(data);
cudaDeviceSynchronize();
}
d->~Data();
free(d);
free(data);
cudaFree(data_d);
cudaFree(type_d);
cudaFree(where_d);
cudaFree(offsets_d);
}
Data *sql_select::getData(hsql::TableRef *fromTable) {
switch (fromTable->type) {
case hsql::kTableName:
return new Data(fromTable->name);
case hsql::kTableSelect:
return selectData(fromTable->select);
case hsql::kTableJoin: {
Data *d, *dL, *dR;
if (fromTable->join->type != hsql::kJoinRight) {
dL = getData(fromTable->join->left);
dR = getData(fromTable->join->right);
} else {
dL = getData(fromTable->join->right);
dR = getData(fromTable->join->left);
}
d = new Data(dL, dR);
dL->chunkSize = dR->chunkSize = d->chunkSize;
std::vector<myExpr> joinCondition;
exprToVec(fromTable->join->condition, joinCondition, d->mdata.columns, *d);
myExpr *joinCondition_d;
cudaMalloc(&joinCondition_d, joinCondition.size() * sizeof(myExpr));
cudaMemcpy(joinCondition_d, &joinCondition[0], sizeof(myExpr) * joinCondition.size(),
cudaMemcpyHostToDevice);
std::vector<int> offsets(d->mdata.columns.size() + 1);
offsets[0] = 0;
for (int i = 1; i <= d->mdata.columns.size(); ++i) {
offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
}
int *offsets_d;
cudaMalloc(&offsets_d, sizeof(int) * (d->mdata.columns.size() + 1));
cudaMemcpy(offsets_d, &offsets[0], sizeof(int) * (d->mdata.columns.size() + 1), cudaMemcpyHostToDevice);
ColType *type_d;
cudaMalloc(&type_d, sizeof(ColType) * d->mdata.columns.size());
cudaMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * d->mdata.columns.size(),
cudaMemcpyHostToDevice);
void *join = malloc(d->chunkSize * d->chunkSize * d->mdata.rowSize);
void *dataL = malloc(d->chunkSize * dL->mdata.rowSize), *dataL_d;
void *dataR = malloc(d->chunkSize * dR->mdata.rowSize), *dataR_d;
void *join_d; // Upto n^2 rows can be stored
cudaMalloc(&join_d, d->chunkSize * d->chunkSize * d->mdata.rowSize);
cudaMalloc(&dataL_d, dL->chunkSize * dL->mdata.rowSize);
cudaMalloc(&dataR_d, dR->chunkSize * dR->mdata.rowSize);
unsigned int numRowsJoin = 0;
unsigned int *numRowsJoin_d;
cudaMalloc(&numRowsJoin_d, sizeof(unsigned int));
std::vector<myExpr> whereClause;
int rowsReadL = dL->read(dataL), rowsReadR;
cudaMemcpy(dataL_d, dataL, rowsReadL * dL->mdata.rowSize, cudaMemcpyHostToDevice);
bool *matched_d;
cudaMalloc(&matched_d, sizeof(bool) * dL->chunkSize);
while (rowsReadL > 0) {
cudaMemset(matched_d, 0, sizeof(bool) * d->chunkSize);
dR->restartRead();
rowsReadR = dR->read(dataR);
cudaMemcpy(dataR_d, dataR, rowsReadR * dR->mdata.rowSize, cudaMemcpyHostToDevice);
while (rowsReadR > 0) {
joinKernel<<<1, NUM_THREADS>>>(dataL_d, dataR_d, join_d, joinCondition_d, offsets_d,
d->mdata.columns.size(),
type_d, dL->mdata.rowSize, dR->mdata.rowSize, rowsReadL, rowsReadR,
numRowsJoin_d, matched_d);
rowsReadR = dR->read(dataR);
cudaDeviceSynchronize();
cudaMemcpy(&numRowsJoin, numRowsJoin_d, sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(join, join_d, numRowsJoin * d->mdata.rowSize, cudaMemcpyDeviceToHost);
d->write(join, numRowsJoin * d->mdata.rowSize);
fflush(stdout);
// selectKernel<<<1, NUM_THREADS>>>(join_d, d->mdata.rowSize, offsets_d, d->mdata.columns.size(),
// type_d, whereClause_d, numRowsJoin);
cudaDeviceSynchronize();
cudaMemcpy(dataR_d, dataR, rowsReadR * dR->mdata.rowSize, cudaMemcpyHostToDevice);
}
if (fromTable->join->type == hsql::kJoinLeft || fromTable->join->type == hsql::kJoinRight) {
getLeft<<<1, NUM_THREADS>>>(dataL_d, matched_d, rowsReadL, type_d, dL->mdata.columns.size(),
d->mdata.columns.size(), dL->mdata.rowSize, d->mdata.rowSize, join_d,
numRowsJoin_d);
}
rowsReadL = dL->read(dataL);
cudaDeviceSynchronize();
if (fromTable->join->type == hsql::kJoinLeft || fromTable->join->type == hsql::kJoinRight) {
cudaMemcpy(&numRowsJoin, numRowsJoin_d, sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(join, join_d, numRowsJoin * d->mdata.rowSize, cudaMemcpyDeviceToHost);
d->write(join, numRowsJoin * d->mdata.rowSize);
}
cudaMemcpy(dataL_d, dataL, rowsReadL * dL->mdata.rowSize, cudaMemcpyHostToDevice);
}
dL->~Data();
dR->~Data();
free(dL);
free(dR);
free(dataL);
free(dataR);
free(join);
cudaFree(dataL_d);
cudaFree(dataR_d);
cudaFree(join_d);
cudaFree(joinCondition_d);
cudaFree(offsets_d);
cudaFree(type_d);
cudaFree(numRowsJoin_d);
cudaFree(matched_d);
d->switchToRead();
return d;
}
case hsql::kTableCrossProduct:
utils::invalidQuery("Cross Product is not Implemented.");
return nullptr;
}
return nullptr;
}
Data *sql_select::selectData(hsql::SelectStatement *stmt) {
std::vector<std::string> columnNames;
for (hsql::Expr* expr : *stmt->selectList){
switch (expr->type) {
case hsql::kExprStar:
columnNames.emplace_back("*");
break;
case hsql::kExprColumnRef:
columnNames.emplace_back(expr->name);
break;
// case hsql::kExprTableColumnRef:
// inprint(expr->table, expr->name, numIndent);
// break;
case hsql::kExprLiteralFloat:
columnNames.push_back(std::to_string(expr->fval));
break;
case hsql::kExprLiteralInt:
columnNames.push_back(std::to_string(expr->ival));
break;
case hsql::kExprLiteralString:
columnNames.emplace_back(expr->name);
break;
// TODO: kExprFunctionRef (Distinct ?), kExprOperator (col1 + col2 ?)
// case hsql::kExprFunctionRef:
// inprint(expr->name, numIndent);
// inprint(expr->expr->name, numIndent + 1);
// break;
// case hsql::kExprOperator:
// printOperatorExpression(expr, numIndent);
// break;
default:
fprintf(stderr, "Unrecognized expression type %d\n", expr->type);
return nullptr;
}
}
Data *d = getData(stmt->fromTable);
Data *result = new Data(d);
std::vector<myExpr> whereExpr;
if (stmt->whereClause != nullptr) {
exprToVec(stmt->whereClause, whereExpr, d->mdata.columns, *d);
} else {
whereExpr.push_back(newExpr(CONSTANT_INT, (long) 1));
}
int rowSize = d->mdata.rowSize;
void *data = malloc(d->chunkSize * rowSize);
void *resData = malloc(d->chunkSize * rowSize);
void *data_d, *resData_d;
int numCols = d->mdata.columns.size();
ColType *type_d;
cudaMalloc(&type_d, sizeof(ColType) * numCols);
cudaMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * numCols, cudaMemcpyHostToDevice);
myExpr *where_d;
cudaMalloc(&where_d, sizeof(myExpr) * whereExpr.size());
cudaMemcpy(where_d, &whereExpr[0], sizeof(myExpr) * whereExpr.size(), cudaMemcpyHostToDevice);
std::vector<int> offsets(numCols + 1);
offsets[0] = 0;
for (int i = 1; i <= numCols; ++i) {
offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
}
int *offsets_d;
cudaMalloc(&offsets_d, sizeof(int) * (numCols + 1));
cudaMemcpy(offsets_d, &offsets[0], sizeof(int) * (numCols + 1), cudaMemcpyHostToDevice);
int rowsRead = d->read(data);
cudaMalloc(&data_d, d->chunkSize * rowSize);
cudaMalloc(&resData_d, d->chunkSize * rowSize);
unsigned int numMatches, *numMatches_d;
cudaMalloc(&numMatches_d, sizeof(unsigned int));
while (rowsRead > 0) {
cudaMemcpy(data_d, data, rowSize * rowsRead, cudaMemcpyHostToDevice);
selectKernelRes<<<1, NUM_THREADS>>>(data_d, rowSize, offsets_d, numCols, type_d, where_d, rowsRead, resData_d,
numMatches_d);
rowsRead = d->read(data);
cudaDeviceSynchronize();
cudaMemcpy(&numMatches, numMatches_d, sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(resData, resData_d, rowSize * numMatches, cudaMemcpyDeviceToHost);
result->write(resData, rowSize * numMatches);
// utils::printMultiple(resData, d->mdata.datatypes, rowSize, numMatches);
}
d->~Data();
free(d);
free(data);
free(resData);
cudaFree(data_d);
cudaFree(resData_d);
cudaFree(type_d);
cudaFree(where_d);
cudaFree(offsets_d);
result->switchToRead();
result->restartRead();
return result;
}
// void sql_select::execute(std::string &query) {
//
// hsql::SQLParserResult *result;
// std::vector<std::string> columnNames;
//
// result = hsql::SQLParser::parseSQLString(query);
// columnNames = std::vector<std::string>();
//
// if(result->isValid()){
// const auto *stmt = (const hsql::SelectStatement *) result->getStatement(0);
// // Get column names
// for (hsql::Expr* expr : *stmt->selectList){
// switch (expr->type) {
// case hsql::kExprStar:
// columnNames.emplace_back("*");
// break;
// case hsql::kExprColumnRef:
// columnNames.emplace_back(expr->name);
// break;
// // case hsql::kExprTableColumnRef:
// // inprint(expr->table, expr->name, numIndent);
// // break;
// case hsql::kExprLiteralFloat:
// columnNames.push_back(std::to_string(expr->fval));
// break;
// case hsql::kExprLiteralInt:
// columnNames.push_back(std::to_string(expr->ival));
// break;
// case hsql::kExprLiteralString:
// columnNames.emplace_back(expr->name);
// break;
// // TODO: kExprFunctionRef (Distinct ?), kExprOperator (col1 + col2 ?)
// // case hsql::kExprFunctionRef:
// // inprint(expr->name, numIndent);
// // inprint(expr->expr->name, numIndent + 1);
// // break;
// // case hsql::kExprOperator:
// // printOperatorExpression(expr, numIndent);
// // break;
// default:
// fprintf(stderr, "Unrecognized expression type %d\n", expr->type);
// return;
// }
// }
// // Get tables reference
// auto table = stmt->fromTable;
// Data *d;
// switch (table->type) {
// case hsql::kTableName:
// // inprint(table->name, numIndent);
// d = new Data(table->name);
// break;
// // case hsql::kTableSelect:
// // // printSelectStatementInfo(table->select, numIndent);
// // break;
// case hsql::kTableJoin: {
// std::string leftTable, rightTable;
// if (table->join->type != hsql::kJoinRight) {
// d = new Data(table->join->left->name, table->join->right->name);
// leftTable = table->join->left->name;
// rightTable = table->join->right->name;
// } else {
// d = new Data(table->join->right->name, table->join->left->name);
// leftTable = table->join->right->name;
// rightTable = table->join->left->name;
// }
// Data dL(leftTable);
// dL.chunkSize = d->chunkSize;
// Data dR(rightTable);
// dR.chunkSize = d->chunkSize;
//
// std::vector<myExpr> joinCondition;
// exprToVec(table->join->condition, joinCondition, d->mdata.columns, *d);
// myExpr *joinCondition_d;
// cudaMalloc(&joinCondition_d, joinCondition.size() * sizeof(myExpr));
// cudaMemcpy(joinCondition_d, &joinCondition[0], sizeof(myExpr) * joinCondition.size(),
// cudaMemcpyHostToDevice);
//
// std::vector<int> offsets(d->mdata.columns.size() + 1);
// offsets[0] = 0;
// for (int i = 1; i <= d->mdata.columns.size(); ++i) {
// offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
// }
// int *offsets_d;
// cudaMalloc(&offsets_d, sizeof(int) * (d->mdata.columns.size() + 1));
// cudaMemcpy(offsets_d, &offsets[0], sizeof(int) * (d->mdata.columns.size() + 1), cudaMemcpyHostToDevice);
//
// ColType *type_d;
// cudaMalloc(&type_d, sizeof(ColType) * d->mdata.columns.size());
// cudaMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * d->mdata.columns.size(),
// cudaMemcpyHostToDevice);
//
// void *join = malloc(d->chunkSize * d->chunkSize * d->mdata.rowSize);
// void *dataL = malloc(d->chunkSize * dL.mdata.rowSize), *dataL_d;
// void *dataR = malloc(d->chunkSize * dR.mdata.rowSize), *dataR_d;
// void *join_d; // Upto n^2 rows can be stored
// cudaMalloc(&join_d, d->chunkSize * d->chunkSize * d->mdata.rowSize);
// cudaMalloc(&dataL_d, dL.chunkSize * dL.mdata.rowSize);
// cudaMalloc(&dataR_d, dR.chunkSize * dR.mdata.rowSize);
// unsigned int numRowsJoin = 0;
// unsigned int *numRowsJoin_d;
// cudaMalloc(&numRowsJoin_d, sizeof(unsigned int));
//
// std::vector<myExpr> whereClause;
//
// int rowsReadL = dL.read(dataL), rowsReadR;
// cudaMemcpy(dataL_d, dataL, rowsReadL * dL.mdata.rowSize, cudaMemcpyHostToDevice);
//
// bool *matched_d;
// cudaMalloc(&matched_d, sizeof(bool) * dL.chunkSize);
// while (rowsReadL > 0) {
// cudaMemset(matched_d, 0, sizeof(bool) * dL.chunkSize);
// dR.restartRead();
// rowsReadR = dR.read(dataR);
// cudaMemcpy(dataR_d, dataR, rowsReadR * dR.mdata.rowSize, cudaMemcpyHostToDevice);
// while (rowsReadR > 0) {
// joinKernel<<<1, NUM_THREADS>>>(dataL_d, dataR_d, join_d, joinCondition_d, offsets_d,
// d->mdata.columns.size(),
// type_d, dL.mdata.rowSize, dR.mdata.rowSize, rowsReadL, rowsReadR,
// numRowsJoin_d, matched_d);
// rowsReadR = dR.read(dataR);
// cudaDeviceSynchronize();
//
// cudaMemcpy(&numRowsJoin, numRowsJoin_d, sizeof(unsigned int), cudaMemcpyDeviceToHost);
// cudaMemcpy(join, join_d, numRowsJoin * d->mdata.rowSize, cudaMemcpyDeviceToHost);
// d->write(join, numRowsJoin * d->mdata.rowSize);
// fflush(stdout);
// // selectKernel<<<1, NUM_THREADS>>>(join_d, d->mdata.rowSize, offsets_d, d->mdata.columns.size(),
// // type_d, whereClause_d, numRowsJoin);
// cudaDeviceSynchronize();
// cudaMemcpy(dataR_d, dataR, rowsReadR * dR.mdata.rowSize, cudaMemcpyHostToDevice);
// }
// if (table->join->type == hsql::kJoinLeft || table->join->type == hsql::kJoinRight) {
// getLeft<<<1, NUM_THREADS>>>(dataL_d, matched_d, rowsReadL, type_d, dL.mdata.columns.size(),
// d->mdata.columns.size(), dL.mdata.rowSize, d->mdata.rowSize);
//
// }
// rowsReadL = dL.read(dataL);
// cudaDeviceSynchronize();
// cudaMemcpy(dataL_d, dataL, rowsReadL * dL.mdata.rowSize, cudaMemcpyHostToDevice);
// }
//
// myExpr *whereClause_d;
// if (stmt->whereClause != nullptr) {
// exprToVec(stmt->whereClause, whereClause, d->mdata.columns, *d);
// cudaMalloc(&whereClause_d, sizeof(myExpr) * whereClause.size());
// cudaMemcpy(whereClause_d, &whereClause[0], sizeof(myExpr) * whereClause.size(),
// cudaMemcpyHostToDevice);
// }
//
// // change chunk size before select
// // d->chunkSize = 500 * 1024 * 1024 / d->mdata.rowSize;
// // if chunksize is changed, join and join_d might need to be reallocated
// d->chunkSize *= d->chunkSize;
//
// // printf("____________________________________________________\n");
// d->switchToRead();
// int numRowsRead;
// numRowsRead = d->read(join);
// while (numRowsRead > 0) {
// cudaMemcpy(join_d, join, numRowsRead * d->mdata.rowSize, cudaMemcpyHostToDevice);
// selectKernel<<<1, NUM_THREADS>>>(join_d, d->mdata.rowSize, offsets_d, d->mdata.columns.size(),
// type_d, whereClause_d, numRowsRead);
// cudaDeviceSynchronize();
// numRowsRead = d->read(join);
// }
//
// d->~Data();
// free(d);
// free(dataL);
// free(dataR);
//
// cudaFree(dataL_d);
// cudaFree(dataR_d);
// cudaFree(join_d);
// cudaFree(joinCondition_d);
// cudaFree(offsets_d);
// cudaFree(type_d);
// cudaFree(numRowsJoin_d);
// cudaFree(whereClause_d);
// cudaFree(offsets_d);
// cudaFree(matched_d);
// return;
// break;
// }
// // case hsql::kTableCrossProduct:
// // // for (TableRef* tbl : *table->list) printTableRefInfo(tbl, numIndent);
// // break;
// default:
// printf("Will be handled later\n");
// return;
// }
// if (stmt->whereClause != nullptr) {
// // Get where
// std::vector<myExpr> tree;
//
// auto expr = stmt->whereClause;
// exprToVec(expr, tree, d->mdata.columns, *d);
// free(expr);
//
// int rowSize = d->mdata.rowSize;
// void *data = malloc(d->chunkSize * rowSize);
// void *data_d;
// int numCols = d->mdata.columns.size();
// cudaSetDevice(0);
// cudaDeviceReset();
//
// ColType *type_d;
// cudaMalloc(&type_d, sizeof(ColType) * numCols);
// cudaMemcpy(type_d, &d->mdata.datatypes[0], sizeof(ColType) * numCols, cudaMemcpyHostToDevice);
// myExpr *where_d;
// cudaMalloc(&where_d, sizeof(myExpr) * tree.size());
// cudaMemcpy(where_d, &tree[0], sizeof(myExpr) * tree.size(), cudaMemcpyHostToDevice);
// int *offsets = (int *) malloc(sizeof(int) * (numCols + 1));
// offsets[0] = 0;
// for (int i = 1; i <= numCols; i++) {
// offsets[i] = offsets[i - 1] + d->mdata.datatypes[i - 1].size;
// }
// int *offsets_d;
// cudaMalloc(&offsets_d, sizeof(int) * (numCols + 1));
// cudaMemcpy(offsets_d, offsets, sizeof(int) * (numCols + 1), cudaMemcpyHostToDevice);
// int numRows = d->read(data);
//
// // printing data in table
// // utils::printMultiple(data, d.mdata.datatypes, d.mdata.rowSize, d.mdata.rowCount);
//
// cudaMalloc(&data_d, d->chunkSize * rowSize);
// while (numRows > 0) {
// cudaMemcpy(data_d, data, rowSize * numRows, cudaMemcpyHostToDevice);
// selectKernel<<<1, NUM_THREADS>>>(data_d, rowSize, offsets_d, numCols, type_d, where_d, numRows);
// numRows = d->read(data);
// cudaDeviceSynchronize();
// }
//
// // Free all the data
// d->~Data();
// free(d);
// free(data);
// free(offsets);
// cudaFree(data_d);
// cudaFree(type_d);
// cudaFree(where_d);
// cudaFree(offsets_d);
// } else {
// // RETURN ALL ROWS
// }
// } else {
// fprintf(stderr, "Given string is not a valid SQL query.\n");
// fprintf(stderr, "%s (L%d:%d)\n",
// result->errorMsg(),
// result->errorLine(),
// result->errorColumn());
// }
// free(result);
// } |
2fe26a3807c00a34d17633994e35528116b6a89b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <climits>
#include <vector>
//Include other CUDA files
#include "main.cuh"
#include "features.cuh"
#include "haarfinder.cuh"
#include "sat.cuh"
#include "common_hip.cuh"
using namespace std;
#define NUM_EXAMPLES 60000
#define BLOCK_SIZE 1024
//Define constant memory for reading from features and labels
__constant__ feature f[numFeatures];
__constant__ unsigned char l[NUM_EXAMPLES];
//Strong classifier class
class _strong_classifier
{
private:
struct _weak_classifier
{
uint16_t threshold;
float e;
};
public:
_weak_classifier weak_classifier[numFeatures];
float* alpha;
void classify(int i)
{
cout << "Hello World! " << i << endl;
}
};
//CUDA weak_classifier
__global__ weak_classifier(float* w, _weak_classifier* weak_classifier, uint16_t range, uint32_t num_examples)
{
i = blockDim.x*blockIdx.x + threadIdx.x;
uint8_t theta = range;
uint32_t minimum = MAX_INT;
uint32_t error = 0;
float e = 0;
float min_error = 0;
int32_t perfect_haar = 255*0.5*(f[i].x2 - f[i].x1)*(f[i].y2 - f[i].y1);
while(theta > 0)
{
error = 0;
e = 0;
for(int j = 0; j < num_examples; j++)
{
if(abs(f[i].mag - perfect_haar) < theta)
{
if(l[i] == 0)
{
e += w[i];
error++;
}
}
else
{
if(l[i] == 1)
{
e += w[i];
error++;
}
}
}
__syncthreads();
//Track the minimum error
if(error < minimum)
{
minimum = error;
min_error = e;
min_theta = theta;
}
theta--;
}
//Set the weak classifier
weak_classifier.threshold = min_theta;
weak_classifier.e = min_error;
}
int main(int argc, char *argv[])
{
//Initialize error rates and targets
float error_rate = 0;
float error_target = atof(argv[1]);
uint16_t min_error = 0;
//Initialize example weights
float w[NUM_EXAMPLES];
float tot_w;
//Classifier threshold range
uint16_t theta = 5000;
//Strong classifier
_strong_classifier* strong_classifier_host;
//Allocate space for CUDA
dim3 dimGrid(ceil((numFeatures)/BLOCK_SIZE), 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
_strong_classifier* strong_classifier_dev;
//Allocate space for cumulative error and weak classifier structure
hipError_t cuda_error[2];
cuda_error[0] = hipMalloc((void**) &strong_classifier_dev.weak_classifier, numFeatures*sizeof(_weak_classifier));
cuda_error[1] = hipMalloc((void**) &w_dev, NUM_EXAMPLES*sizeof(float));
for(int i = 0; i < 2; i++)
{
if(cuda_error[i] != 0)
{
cout << "hipMalloc error (" << (i == 0)?("error"):("weights") << "): " << hipGetErrorString(cuda_error[i]) << endl;
}
}
//Copy weak classifier structure to GPU
cuda_error[0] = hipMemcpy(strong_classifier_dev.weak_classifier, strong_classifier_host.weak_classifier, numFeatures*sizeof(_weak_classifier));
if(cuda_error[0] != 0)
{
cout << "hipMemcpy error (weak_classifier): " << hipGetErrorString(cuda_error[0]) << endl;
}
//Copy constant memory to GPU
cuda_error[0] = hipMemcpyToSymbol(f, features, numFeatures*sizeof(feature));
cuda_error[1] = hipMemcpyToSymbol(l, labels, numLabels*sizeof(unsigned char));
for(int i = 0; i < 2; i++)
{
if(cuda_error[i] != 0)
{
cout << "hipMemcpyToSymbol error (" << (i == 0)?("f"):("l") << "): " << hipGetErrorString(cuda_error[i]) << endl;
}
}
//Training stage. Loop T stages as input from user
for(int t = 0; t < T; t++)
{
//Initialize weights
while(error_rate > error_target)
{
for(int i = 0; i < NUM_EXAMPLES; i++)
{
w[i] = 1/NUM_EXAMPLES;
}
//Train weak classifier h_j for each feature j
cuda_error[0] = hipMemcpy(strong_classifier_dev.weak_classifier, strong_classifier_host.weak_classifier, numFeatures*sizeof(_weak_classifier), hipMemcpyHostToDevice);
cuda_error[1] = cuamMemcpy(w_dev, w, NUM_EXAMPLES*sizeof(float), hipMemcpyHostToDevice);
for(int i = 0; i < 2; i++)
{
if(cuda_error[i] != 0)
{
cout << "hipMemcpyToSymbol error (" << (i == 0)?("weak_classifier"):("weights") << "): " << hipGetErrorString(cuda_error[i]) << endl;
}
}
hipLaunchKernelGGL(( weak_classifier), dim3(dimGrid), dim3(dimBlock), 0, 0, w_dev, strong_classifier_dev.weak_classifier, theta, count);
hipDeviceSynchronize();
cuda_error[0] = hipMemcpy(strong_classifier_host.weak_classifier, strong_classifier_dev.weak_classifier, numFeatures*sizeof(_weak_classifier), hipMemcpyDeviceToHost);
if(cuda_error[0] != 0)
{
cout << "hipMemcpy error (error): " << hipGetErrorString(error[0]) << endl;
}
//Pick feature with minimized weighted error
for(int i = 0; i < numFeatures; i++)
{
if(strong_classifier_host.weak_classifier[i].e < min_error)
{
min_error = error;
}
}
//Update error and weights
for(int i = 0; i < NUM_EXAMPLES; i++)
{
if(features[i].mag < strong_classifier_host.weak_classifier.threshold)
{
if(labels[i] == 1)
{
w[i] *= error/(1-error);
}
}
}
//Normalize weights
for(int i = 0; i < NUM_EXAMPLES; i++)
{
tot_w += w[i];
}
for(int i = 0; i < NUM_EXAMPLES; i++)
{
w[i] /= tot_w;
}
strong_classifier_host.alpha[i] = log((1 - error)/error);
}
}
return 0;
} | 2fe26a3807c00a34d17633994e35528116b6a89b.cu | #include <iostream>
#include <cstdlib>
#include <cmath>
#include <climits>
#include <vector>
//Include other CUDA files
#include "main.cuh"
#include "features.cuh"
#include "haarfinder.cuh"
#include "sat.cuh"
#include "common.cuh"
using namespace std;
#define NUM_EXAMPLES 60000
#define BLOCK_SIZE 1024
//Define constant memory for reading from features and labels
__constant__ feature f[numFeatures];
__constant__ unsigned char l[NUM_EXAMPLES];
//Strong classifier class
class _strong_classifier
{
private:
struct _weak_classifier
{
uint16_t threshold;
float e;
};
public:
_weak_classifier weak_classifier[numFeatures];
float* alpha;
void classify(int i)
{
cout << "Hello World! " << i << endl;
}
};
//CUDA weak_classifier
__global__ weak_classifier(float* w, _weak_classifier* weak_classifier, uint16_t range, uint32_t num_examples)
{
i = blockDim.x*blockIdx.x + threadIdx.x;
uint8_t theta = range;
uint32_t minimum = MAX_INT;
uint32_t error = 0;
float e = 0;
float min_error = 0;
int32_t perfect_haar = 255*0.5*(f[i].x2 - f[i].x1)*(f[i].y2 - f[i].y1);
while(theta > 0)
{
error = 0;
e = 0;
for(int j = 0; j < num_examples; j++)
{
if(abs(f[i].mag - perfect_haar) < theta)
{
if(l[i] == 0)
{
e += w[i];
error++;
}
}
else
{
if(l[i] == 1)
{
e += w[i];
error++;
}
}
}
__syncthreads();
//Track the minimum error
if(error < minimum)
{
minimum = error;
min_error = e;
min_theta = theta;
}
theta--;
}
//Set the weak classifier
weak_classifier.threshold = min_theta;
weak_classifier.e = min_error;
}
int main(int argc, char *argv[])
{
//Initialize error rates and targets
float error_rate = 0;
float error_target = atof(argv[1]);
uint16_t min_error = 0;
//Initialize example weights
float w[NUM_EXAMPLES];
float tot_w;
//Classifier threshold range
uint16_t theta = 5000;
//Strong classifier
_strong_classifier* strong_classifier_host;
//Allocate space for CUDA
dim3 dimGrid(ceil((numFeatures)/BLOCK_SIZE), 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
_strong_classifier* strong_classifier_dev;
//Allocate space for cumulative error and weak classifier structure
cudaError_t cuda_error[2];
cuda_error[0] = cudaMalloc((void**) &strong_classifier_dev.weak_classifier, numFeatures*sizeof(_weak_classifier));
cuda_error[1] = cudaMalloc((void**) &w_dev, NUM_EXAMPLES*sizeof(float));
for(int i = 0; i < 2; i++)
{
if(cuda_error[i] != 0)
{
cout << "cudaMalloc error (" << (i == 0)?("error"):("weights") << "): " << cudaGetErrorString(cuda_error[i]) << endl;
}
}
//Copy weak classifier structure to GPU
cuda_error[0] = cudaMemcpy(strong_classifier_dev.weak_classifier, strong_classifier_host.weak_classifier, numFeatures*sizeof(_weak_classifier));
if(cuda_error[0] != 0)
{
cout << "cudaMemcpy error (weak_classifier): " << cudaGetErrorString(cuda_error[0]) << endl;
}
//Copy constant memory to GPU
cuda_error[0] = cudaMemcpyToSymbol(f, features, numFeatures*sizeof(feature));
cuda_error[1] = cudaMemcpyToSymbol(l, labels, numLabels*sizeof(unsigned char));
for(int i = 0; i < 2; i++)
{
if(cuda_error[i] != 0)
{
cout << "cudaMemcpyToSymbol error (" << (i == 0)?("f"):("l") << "): " << cudaGetErrorString(cuda_error[i]) << endl;
}
}
//Training stage. Loop T stages as input from user
for(int t = 0; t < T; t++)
{
//Initialize weights
while(error_rate > error_target)
{
for(int i = 0; i < NUM_EXAMPLES; i++)
{
w[i] = 1/NUM_EXAMPLES;
}
//Train weak classifier h_j for each feature j
cuda_error[0] = cudaMemcpy(strong_classifier_dev.weak_classifier, strong_classifier_host.weak_classifier, numFeatures*sizeof(_weak_classifier), cudaMemcpyHostToDevice);
cuda_error[1] = cuamMemcpy(w_dev, w, NUM_EXAMPLES*sizeof(float), cudaMemcpyHostToDevice);
for(int i = 0; i < 2; i++)
{
if(cuda_error[i] != 0)
{
cout << "cudaMemcpyToSymbol error (" << (i == 0)?("weak_classifier"):("weights") << "): " << cudaGetErrorString(cuda_error[i]) << endl;
}
}
weak_classifier<<<dimGrid, dimBlock>>>(w_dev, strong_classifier_dev.weak_classifier, theta, count);
cudaDeviceSynchronize();
cuda_error[0] = cudaMemcpy(strong_classifier_host.weak_classifier, strong_classifier_dev.weak_classifier, numFeatures*sizeof(_weak_classifier), cudaMemcpyDeviceToHost);
if(cuda_error[0] != 0)
{
cout << "cudaMemcpy error (error): " << cudaGetErrorString(error[0]) << endl;
}
//Pick feature with minimized weighted error
for(int i = 0; i < numFeatures; i++)
{
if(strong_classifier_host.weak_classifier[i].e < min_error)
{
min_error = error;
}
}
//Update error and weights
for(int i = 0; i < NUM_EXAMPLES; i++)
{
if(features[i].mag < strong_classifier_host.weak_classifier.threshold)
{
if(labels[i] == 1)
{
w[i] *= error/(1-error);
}
}
}
//Normalize weights
for(int i = 0; i < NUM_EXAMPLES; i++)
{
tot_w += w[i];
}
for(int i = 0; i < NUM_EXAMPLES; i++)
{
w[i] /= tot_w;
}
strong_classifier_host.alpha[i] = log((1 - error)/error);
}
}
return 0;
} |
7f47a6097cd2b796e8c2fa091a9820251599b439.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* deformable convolution
*/
#include "cuda_kernels.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = (blockIdx.x * blockDim.x) + threadIdx.x; i < (n); i += (blockDim.x * gridDim.x))
__device__ inline float sigmoid(float data) {
return 1.0f / (1.0f + expf(-data));
}
template<typename T>
__global__ void add_bias(T *x, const T *bias, int n) {
const int bid = blockIdx.x;
auto b = bias[bid];
for (int tid = threadIdx.x; tid < n; tid += blockDim.x)
x[bid * n + tid] += b;
}
// [channel, batch, H, W] x + [channel] bias
template<typename T>
void add_bias_kernelLauncher(T *x, const T *bias, int channel, int batch, int H, int W, hipStream_t stream) {
dim3 grid(channel);
int n = W * H * batch;
int blockSize = n;
if (blockSize > 1024)
blockSize = 1024;
hipLaunchKernelGGL(( add_bias), dim3(grid), dim3(blockSize), 0, stream, x, bias, n);
}
template<typename T>
__global__ void transpose(T *output, const T *input, int n) {
int c = blockIdx.y;
int bs = blockIdx.x;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
output[(bs * gridDim.y + c) * n + tid] = input[(c * gridDim.x + bs) * n + tid];
}
}
// [c, n, h, w] => [n, c, h, w]
template<typename T>
void transpose_kernelLauncher(T *output, const T *input, int bs, int c, int h, int w, hipStream_t stream) {
dim3 grid(bs, c);
int n = w * h;
int blockSize = n;
if (std::is_same<T, half>::value && n % 2 == 0) {
blockSize /= 2;
if (blockSize > 1024) {
blockSize = 1024;
}
hipLaunchKernelGGL(( transpose), dim3(grid), dim3(blockSize), 0, stream, (half2 *) output, (const half2 *) input, n / 2);
} else {
if (blockSize > 1024) {
blockSize = 1024;
}
hipLaunchKernelGGL(( transpose), dim3(grid), dim3(blockSize), 0, stream, output, input, n);
}
}
template<typename T>
__device__ T bilinear_interpolate(const T *in, int height, int width, T h, T w) {
if (h <= T(-1) || T(height) <= h || w <= T(-1) || T(width) <= w) {
return T(0);
}
int h_low = floor((float) h);
int w_low = floor((float) w);
int h_high = h_low + 1;
int w_high = w_low + 1;
T lh = h - T(h_low);
T lw = w - T(w_low);
T hh = T(1) - lh, hw = T(1) - lw;
T v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = in[h_low * width + w_low];
T v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = in[h_low * width + w_high];
T v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = in[h_high * width + w_low];
T v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = in[h_high * width + w_high];
T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template<typename T>
__global__ void deformable_im2col_kernel(
int n,
const T *input_ptr,
const T *offset_ptr,
const T *mask_ptr,
int height,
int width,
int weight_h,
int weight_w,
int pad_h,
int pad_w,
int stride_h,
int stride_w,
int dilation_h,
int dilation_w,
int batch_sz,
int n_in_channels,
int n_offset_grps,
int out_h,
int out_w,
bool use_mask,
T *columns_ptr) {
for (int index = (blockIdx.x * blockDim.x) + threadIdx.x; index < (n); index += (blockDim.x * gridDim.x)) {
const int out_x = index % out_w;
const int out_y = (index / out_w) % out_h;
const int out_b = (index / (out_w * out_h)) % batch_sz;
const int in_c = index / (out_w * out_h * batch_sz);
const int out_c = in_c * weight_h * weight_w;
int c_per_offset_grp = n_in_channels / n_offset_grps;
const int grp_idx = in_c / c_per_offset_grp;
columns_ptr += (out_c * (batch_sz * out_h * out_w) + out_b * (out_h * out_w) + out_y * out_w + out_x);
input_ptr += (out_b * (n_in_channels * height * width) + in_c * (height * width));
offset_ptr += (out_b * n_offset_grps + grp_idx) * 3 * weight_h * weight_w * out_h * out_w;
if (use_mask) {
// mask_ptr += (out_b * n_offset_grps + grp_idx) * weight_h * weight_w * out_h * out_w;
mask_ptr = offset_ptr + 2 * weight_h * weight_w * out_h * out_w;
}
for (int i = 0; i < weight_h; ++i) {
for (int j = 0; j < weight_w; ++j) {
const int mask_idx = i * weight_w + j;
const int offset_idx = 2 * mask_idx;
T mask_value = 1;
if (use_mask) {
mask_value = mask_ptr[mask_idx * (out_h * out_w) + out_y * out_w + out_x];
mask_value = sigmoid(mask_value);
}
const T offset_h = offset_ptr[offset_idx * (out_h * out_w) + out_y * out_w + out_x];
const T offset_w = offset_ptr[(offset_idx + 1) * (out_h * out_w) + out_y * out_w + out_x];
const T y = T(out_y * stride_h - pad_h) + T(i * dilation_h) + offset_h;
const T x = T(out_x * stride_w - pad_w) + T(j * dilation_w) + offset_w;
*columns_ptr = mask_value * bilinear_interpolate(input_ptr, height, width, y, x);
columns_ptr += batch_sz * out_h * out_w;
}
}
}
}
// input, weight, output are row-major
template<typename T>
void gemm(
T *C,
const T *A,
const T *B,
const int m,
const int n,
const int k,
const int lda,
const int ldb,
const int ldc,
hipblasOperation_t trans_a,
hipblasOperation_t trans_b,
hipblasHandle_t cublas_handle,
float scale = 1.0f) {
hipDataType Atype, Btype, Ctype, computeType;
float alpha_float = scale;
float beta_float = 0.0f;
half alpha_half = half(scale);
half beta_half = half(0.0f);
void *alpha, *beta;
int cublasAlgo;
if (std::is_same<T, float>::value) {
computeType = HIP_R_32F;
Atype = HIP_R_32F;
Btype = HIP_R_32F;
Ctype = HIP_R_32F;
alpha = &alpha_float;
beta = &beta_float;
cublasAlgo = HIPBLAS_GEMM_DEFAULT;
} else {
computeType = HIP_R_16F;
Atype = HIP_R_16F;
Btype = HIP_R_16F;
Ctype = HIP_R_16F;
alpha = &alpha_half;
beta = &beta_half;
cublasAlgo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
}
hipblasGemmEx(
cublas_handle,
trans_a,
trans_b,
m,
n,
k,
alpha,
A,
Atype,
lda,
B,
Btype,
ldb,
beta,
C,
Ctype,
ldc,
computeType,
static_cast<hipblasGemmAlgo_t>(cublasAlgo));
}
template<typename T>
void deform_conv2d_kernel_launcher(T *output_ptr, T *tmp_output_ptr, T *columns_ptr, const T *input_ptr, const T *offset_ptr, const T *mask_ptr, const T *weight_ptr, const T *bias_ptr, int bs, int in_h, int in_w, int out_c, int in_c, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int offset_groups, int out_h, int out_w, bool use_mask, bool use_bias, hipblasHandle_t mCublas, hipStream_t stream) {
int num_kernels = in_c * bs * out_h * out_w;
const unsigned int threads = 512;
const unsigned int blocks = (num_kernels + threads - 1) / threads;
hipLaunchKernelGGL(( deformable_im2col_kernel), dim3(blocks), dim3(threads), 0, stream,
num_kernels,
(const T *) input_ptr,
(const T *) offset_ptr,
(const T *) mask_ptr,
in_h,
in_w,
kernel_h,
kernel_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
bs,
in_c,
offset_groups,
out_h,
out_w,
use_mask,
(T *) columns_ptr);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
int m = out_c;
int n = bs * out_h * out_w;
int k = in_c * kernel_h * kernel_w;
gemm((T *) tmp_output_ptr, (T *) columns_ptr, (T *) weight_ptr, n, m, k, n, k, n, HIPBLAS_OP_N, HIPBLAS_OP_N, mCublas);
hipError_t gemm_err = hipGetLastError();
if (gemm_err != hipSuccess) {
printf("error in gemm: %s\n", hipGetErrorString(gemm_err));
}
if (use_bias) {
//output [out_c, bs, out_h, out_w]
add_bias_kernelLauncher((T *) tmp_output_ptr, (const T *) bias_ptr, out_c, bs, out_h, out_w, stream);
hipError_t bias_err = hipGetLastError();
if (bias_err != hipSuccess) {
printf("error in add_bias_kernelLauncher: %s\n", hipGetErrorString(bias_err));
}
}
// transpose [b, c, h, w]
transpose_kernelLauncher((T *) output_ptr, (const T *) tmp_output_ptr, bs, out_c, out_h, out_w, stream);
hipError_t transpose_err = hipGetLastError();
if (transpose_err != hipSuccess) {
printf("error in transpose_kernelLauncher: %s\n", hipGetErrorString(transpose_err));
}
}
template void deform_conv2d_kernel_launcher(
float *output_ptr,
float *tmp_output_ptr,
float *columns_ptr,
const float *input_ptr,
const float *offset_ptr,
const float *mask_ptr,
const float *weight_ptr,
const float *bias_ptr,
int bs,
int in_h,
int in_w,
int out_c,
int in_c,
int kernel_h,
int kernel_w,
int pad_h,
int pad_w,
int stride_h,
int stride_w,
int dilation_h,
int dilation_w,
int offset_groups,
int out_h,
int out_w,
bool use_mask,
bool use_bias,
hipblasHandle_t mCublas,
hipStream_t stream);
| 7f47a6097cd2b796e8c2fa091a9820251599b439.cu | /**
* deformable convolution
*/
#include "cuda_kernels.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = (blockIdx.x * blockDim.x) + threadIdx.x; i < (n); i += (blockDim.x * gridDim.x))
__device__ inline float sigmoid(float data) {
return 1.0f / (1.0f + expf(-data));
}
template<typename T>
__global__ void add_bias(T *x, const T *bias, int n) {
const int bid = blockIdx.x;
auto b = bias[bid];
for (int tid = threadIdx.x; tid < n; tid += blockDim.x)
x[bid * n + tid] += b;
}
// [channel, batch, H, W] x + [channel] bias
template<typename T>
void add_bias_kernelLauncher(T *x, const T *bias, int channel, int batch, int H, int W, cudaStream_t stream) {
dim3 grid(channel);
int n = W * H * batch;
int blockSize = n;
if (blockSize > 1024)
blockSize = 1024;
add_bias<<<grid, blockSize, 0, stream>>>(x, bias, n);
}
template<typename T>
__global__ void transpose(T *output, const T *input, int n) {
int c = blockIdx.y;
int bs = blockIdx.x;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
output[(bs * gridDim.y + c) * n + tid] = input[(c * gridDim.x + bs) * n + tid];
}
}
// [c, n, h, w] => [n, c, h, w]
template<typename T>
void transpose_kernelLauncher(T *output, const T *input, int bs, int c, int h, int w, cudaStream_t stream) {
dim3 grid(bs, c);
int n = w * h;
int blockSize = n;
if (std::is_same<T, half>::value && n % 2 == 0) {
blockSize /= 2;
if (blockSize > 1024) {
blockSize = 1024;
}
transpose<<<grid, blockSize, 0, stream>>>((half2 *) output, (const half2 *) input, n / 2);
} else {
if (blockSize > 1024) {
blockSize = 1024;
}
transpose<<<grid, blockSize, 0, stream>>>(output, input, n);
}
}
template<typename T>
__device__ T bilinear_interpolate(const T *in, int height, int width, T h, T w) {
if (h <= T(-1) || T(height) <= h || w <= T(-1) || T(width) <= w) {
return T(0);
}
int h_low = floor((float) h);
int w_low = floor((float) w);
int h_high = h_low + 1;
int w_high = w_low + 1;
T lh = h - T(h_low);
T lw = w - T(w_low);
T hh = T(1) - lh, hw = T(1) - lw;
T v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = in[h_low * width + w_low];
T v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = in[h_low * width + w_high];
T v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = in[h_high * width + w_low];
T v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = in[h_high * width + w_high];
T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template<typename T>
__global__ void deformable_im2col_kernel(
int n,
const T *input_ptr,
const T *offset_ptr,
const T *mask_ptr,
int height,
int width,
int weight_h,
int weight_w,
int pad_h,
int pad_w,
int stride_h,
int stride_w,
int dilation_h,
int dilation_w,
int batch_sz,
int n_in_channels,
int n_offset_grps,
int out_h,
int out_w,
bool use_mask,
T *columns_ptr) {
for (int index = (blockIdx.x * blockDim.x) + threadIdx.x; index < (n); index += (blockDim.x * gridDim.x)) {
const int out_x = index % out_w;
const int out_y = (index / out_w) % out_h;
const int out_b = (index / (out_w * out_h)) % batch_sz;
const int in_c = index / (out_w * out_h * batch_sz);
const int out_c = in_c * weight_h * weight_w;
int c_per_offset_grp = n_in_channels / n_offset_grps;
const int grp_idx = in_c / c_per_offset_grp;
columns_ptr += (out_c * (batch_sz * out_h * out_w) + out_b * (out_h * out_w) + out_y * out_w + out_x);
input_ptr += (out_b * (n_in_channels * height * width) + in_c * (height * width));
offset_ptr += (out_b * n_offset_grps + grp_idx) * 3 * weight_h * weight_w * out_h * out_w;
if (use_mask) {
// mask_ptr += (out_b * n_offset_grps + grp_idx) * weight_h * weight_w * out_h * out_w;
mask_ptr = offset_ptr + 2 * weight_h * weight_w * out_h * out_w;
}
for (int i = 0; i < weight_h; ++i) {
for (int j = 0; j < weight_w; ++j) {
const int mask_idx = i * weight_w + j;
const int offset_idx = 2 * mask_idx;
T mask_value = 1;
if (use_mask) {
mask_value = mask_ptr[mask_idx * (out_h * out_w) + out_y * out_w + out_x];
mask_value = sigmoid(mask_value);
}
const T offset_h = offset_ptr[offset_idx * (out_h * out_w) + out_y * out_w + out_x];
const T offset_w = offset_ptr[(offset_idx + 1) * (out_h * out_w) + out_y * out_w + out_x];
const T y = T(out_y * stride_h - pad_h) + T(i * dilation_h) + offset_h;
const T x = T(out_x * stride_w - pad_w) + T(j * dilation_w) + offset_w;
*columns_ptr = mask_value * bilinear_interpolate(input_ptr, height, width, y, x);
columns_ptr += batch_sz * out_h * out_w;
}
}
}
}
// input, weight, output are row-major
template<typename T>
void gemm(
T *C,
const T *A,
const T *B,
const int m,
const int n,
const int k,
const int lda,
const int ldb,
const int ldc,
cublasOperation_t trans_a,
cublasOperation_t trans_b,
cublasHandle_t cublas_handle,
float scale = 1.0f) {
cudaDataType_t Atype, Btype, Ctype, computeType;
float alpha_float = scale;
float beta_float = 0.0f;
half alpha_half = half(scale);
half beta_half = half(0.0f);
void *alpha, *beta;
int cublasAlgo;
if (std::is_same<T, float>::value) {
computeType = CUDA_R_32F;
Atype = CUDA_R_32F;
Btype = CUDA_R_32F;
Ctype = CUDA_R_32F;
alpha = &alpha_float;
beta = &beta_float;
cublasAlgo = CUBLAS_GEMM_DEFAULT;
} else {
computeType = CUDA_R_16F;
Atype = CUDA_R_16F;
Btype = CUDA_R_16F;
Ctype = CUDA_R_16F;
alpha = &alpha_half;
beta = &beta_half;
cublasAlgo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
}
cublasGemmEx(
cublas_handle,
trans_a,
trans_b,
m,
n,
k,
alpha,
A,
Atype,
lda,
B,
Btype,
ldb,
beta,
C,
Ctype,
ldc,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo));
}
template<typename T>
void deform_conv2d_kernel_launcher(T *output_ptr, T *tmp_output_ptr, T *columns_ptr, const T *input_ptr, const T *offset_ptr, const T *mask_ptr, const T *weight_ptr, const T *bias_ptr, int bs, int in_h, int in_w, int out_c, int in_c, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int offset_groups, int out_h, int out_w, bool use_mask, bool use_bias, cublasHandle_t mCublas, cudaStream_t stream) {
int num_kernels = in_c * bs * out_h * out_w;
const unsigned int threads = 512;
const unsigned int blocks = (num_kernels + threads - 1) / threads;
deformable_im2col_kernel<<<blocks, threads, 0, stream>>>(
num_kernels,
(const T *) input_ptr,
(const T *) offset_ptr,
(const T *) mask_ptr,
in_h,
in_w,
kernel_h,
kernel_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
bs,
in_c,
offset_groups,
out_h,
out_w,
use_mask,
(T *) columns_ptr);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
int m = out_c;
int n = bs * out_h * out_w;
int k = in_c * kernel_h * kernel_w;
gemm((T *) tmp_output_ptr, (T *) columns_ptr, (T *) weight_ptr, n, m, k, n, k, n, CUBLAS_OP_N, CUBLAS_OP_N, mCublas);
cudaError_t gemm_err = cudaGetLastError();
if (gemm_err != cudaSuccess) {
printf("error in gemm: %s\n", cudaGetErrorString(gemm_err));
}
if (use_bias) {
//output [out_c, bs, out_h, out_w]
add_bias_kernelLauncher((T *) tmp_output_ptr, (const T *) bias_ptr, out_c, bs, out_h, out_w, stream);
cudaError_t bias_err = cudaGetLastError();
if (bias_err != cudaSuccess) {
printf("error in add_bias_kernelLauncher: %s\n", cudaGetErrorString(bias_err));
}
}
// transpose [b, c, h, w]
transpose_kernelLauncher((T *) output_ptr, (const T *) tmp_output_ptr, bs, out_c, out_h, out_w, stream);
cudaError_t transpose_err = cudaGetLastError();
if (transpose_err != cudaSuccess) {
printf("error in transpose_kernelLauncher: %s\n", cudaGetErrorString(transpose_err));
}
}
template void deform_conv2d_kernel_launcher(
float *output_ptr,
float *tmp_output_ptr,
float *columns_ptr,
const float *input_ptr,
const float *offset_ptr,
const float *mask_ptr,
const float *weight_ptr,
const float *bias_ptr,
int bs,
int in_h,
int in_w,
int out_c,
int in_c,
int kernel_h,
int kernel_w,
int pad_h,
int pad_w,
int stride_h,
int stride_w,
int dilation_h,
int dilation_w,
int offset_groups,
int out_h,
int out_w,
bool use_mask,
bool use_bias,
cublasHandle_t mCublas,
cudaStream_t stream);
|
125273597059affb2e75e3ddeb6b56394375b0a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cmath>
#include <cstdio>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define BLOCK_SIZE 32
#define ERROR 1.0e-9
typedef unsigned long long int LONG;
void printArr(double *A, LONG N)
{
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
cout << A[i*N+j] << " ";
cout << endl;
}
}
void safe_call(hipError_t ret, int line)
{
if(ret!=hipSuccess)
{
cout << "Error at line " << line << " : " << hipGetErrorString(ret) << endl;
exit(-1);
}
}
void printMat(double *A, LONG N)
{
LONG i,j;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
cout << A[i*N+j] << " ";
cout<<endl;
}
}
__global__ void gpuMM(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; n++)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
struct timeval t1,t2, tp;
double tt, gflops;
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
LONG N,K,S;
cin >> K >> S;
N = K*BLOCK_SIZE;
if(N%S)
{
cout << S << " should be divisible by " << N << endl;
return 0;
}
CUDA_SAFE_CALL(hipSetDevice(0));
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
// Allocate memory on the host
double *hA,*hB,*hC;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
// Initialize matrices on the host
srand(time(NULL));
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
hA[j*N+i] = drand48();
hB[j*N+i] = drand48();
}
}
// Allocate memory on the device
LONG size = N*N*sizeof(double); // Size of the memory in bytes
double *dA,*dB,*dC,*dAT,*dCT;
// Allocate memory to store the GPU answer on the host
double *C;
C = new double[N*N];
CUDA_SAFE_CALL(hipHostMalloc(&dB,size));
CUDA_SAFE_CALL(hipHostMalloc(&dA,(S*size/N)));
CUDA_SAFE_CALL(hipHostMalloc(&dC,(S*size/N)));
CUDA_SAFE_CALL(hipHostMalloc(&dAT,(S*size/N)));
CUDA_SAFE_CALL(hipHostMalloc(&dCT,(S*size/N)));
dim3 threadBlock(BLOCK_SIZE,S);
dim3 grid(K);
hipStream_t * str = (hipStream_t *) malloc((N/S) * sizeof(hipStream_t));
hipEvent_t * evt = (hipEvent_t *) malloc((N/S) * sizeof(hipEvent_t));
for(int i = 0; i < (N/S); i++)
{
CUDA_SAFE_CALL(hipStreamCreate(&(str[i])));
CUDA_SAFE_CALL(hipEventCreate(&(evt[i])));
}
gettimeofday(&t1,0);
// Copy matrices from the host to device
CUDA_SAFE_CALL(hipMemcpyAsync(dB,hB,size,hipMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(hipMemcpyAsync(dA,hA,S*(size/N),hipMemcpyHostToDevice,str[0]));
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock),0,str[0], dA,dB,dC,N);
CUDA_SAFE_CALL(hipEventRecord(evt[0],str[0]));
for(LONG i=1; i< (N/S); i++){
if(i%2 == 0)
{
//Wait for previous stream to finish executing the kernel
CUDA_SAFE_CALL(hipStreamWaitEvent(str[i],evt[i-2],0));
// Prefetch the next set of rows
CUDA_SAFE_CALL(hipMemcpyAsync(dA,hA+i*N*S,(S*size/N),hipMemcpyHostToDevice,str[i]));
CUDA_SAFE_CALL(hipStreamSynchronize(str[i-2]));
//Execute the matrix multiplication kernel
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock),0,str[i], dA,dB,dC,N);
CUDA_SAFE_CALL(hipEventRecord(evt[i],str[i]));
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(hipMemcpyAsync(C+(i-1)*N*S,dCT,(S*size/N),hipMemcpyDeviceToHost,str[i-1]));
}
else
{
//Wait for previous stream to finish executing the kernel
if(i>1)
CUDA_SAFE_CALL(hipStreamWaitEvent(str[i],evt[i-2],0));
// Prefetch the next set of rows
CUDA_SAFE_CALL(hipMemcpyAsync(dAT,hA+i*N*S,(S*size/N),hipMemcpyHostToDevice,str[i]));
if(i>1)
CUDA_SAFE_CALL(hipStreamSynchronize(str[i-2]));
//Execute the matrix multiplication kernel
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock),0,str[i], dAT,dB,dCT,N);
CUDA_SAFE_CALL(hipEventRecord(evt[i],str[i]));
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(hipMemcpyAsync(C+(i-1)*N*S,dC,(S*size/N),hipMemcpyDeviceToHost,str[i-1]));
}
}
CUDA_SAFE_CALL(hipStreamSynchronize(str[(N/S)-1]));
if(((N/S)-1)%2 == 0)
CUDA_SAFE_CALL(hipMemcpyAsync(C+((N/S)-1)*N*S,dC,(S*size/N),hipMemcpyDeviceToHost,str[(N/S)-1]));
else
CUDA_SAFE_CALL(hipMemcpyAsync(C+((N/S)-1)*N*S,dCT,(S*size/N),hipMemcpyDeviceToHost,str[(N/S)-1]));
CUDA_SAFE_CALL(hipDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Prefetch : " << gflops << endl;
for(int i = 0; i < (N/S); i++)
{
CUDA_SAFE_CALL(hipStreamDestroy(str[i]));
CUDA_SAFE_CALL(hipEventDestroy(evt[i]));
}
#if 0
// Now do the matrix multiplication on the CPU
double sum;
for (LONG row=0; row<N; row++){
for (LONG col=0; col<N; col++){
sum = 0.f;
for (LONG n=0; n<N; n++){
sum += hA[row*N+n]*hB[n*N+col];
}
hC[row*N+col] = sum;
}
}
// Check the result and make sure it is correct
for (LONG row=0; row<N; row++){
for (LONG col=0; col<N; col++){
if ( fabs(C[row*N+col] - hC[row*N+col]) > ERROR ){
cout << "Wrong answer!" << row << " " << col << endl;
row = col = N;
}
}
}
printArr(C,N);
cout<<endl;
printArr(hC,N);
#endif
CUDA_SAFE_CALL(hipHostFree(dB));
CUDA_SAFE_CALL(hipHostFree(dA));
CUDA_SAFE_CALL(hipHostFree(dC));
CUDA_SAFE_CALL(hipHostFree(dAT));
cout << "Finished." << endl;
return 0;
}
| 125273597059affb2e75e3ddeb6b56394375b0a9.cu | #include <iostream>
#include <cmath>
#include <cstdio>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define BLOCK_SIZE 32
#define ERROR 1.0e-9
typedef unsigned long long int LONG;
void printArr(double *A, LONG N)
{
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
cout << A[i*N+j] << " ";
cout << endl;
}
}
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
cout << "Error at line " << line << " : " << cudaGetErrorString(ret) << endl;
exit(-1);
}
}
void printMat(double *A, LONG N)
{
LONG i,j;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
cout << A[i*N+j] << " ";
cout<<endl;
}
}
__global__ void gpuMM(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; n++)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
struct timeval t1,t2, tp;
double tt, gflops;
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
LONG N,K,S;
cin >> K >> S;
N = K*BLOCK_SIZE;
if(N%S)
{
cout << S << " should be divisible by " << N << endl;
return 0;
}
CUDA_SAFE_CALL(cudaSetDevice(0));
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
// Allocate memory on the host
double *hA,*hB,*hC;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
// Initialize matrices on the host
srand(time(NULL));
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
hA[j*N+i] = drand48();
hB[j*N+i] = drand48();
}
}
// Allocate memory on the device
LONG size = N*N*sizeof(double); // Size of the memory in bytes
double *dA,*dB,*dC,*dAT,*dCT;
// Allocate memory to store the GPU answer on the host
double *C;
C = new double[N*N];
CUDA_SAFE_CALL(cudaMallocHost(&dB,size));
CUDA_SAFE_CALL(cudaMallocHost(&dA,(S*size/N)));
CUDA_SAFE_CALL(cudaMallocHost(&dC,(S*size/N)));
CUDA_SAFE_CALL(cudaMallocHost(&dAT,(S*size/N)));
CUDA_SAFE_CALL(cudaMallocHost(&dCT,(S*size/N)));
dim3 threadBlock(BLOCK_SIZE,S);
dim3 grid(K);
cudaStream_t * str = (cudaStream_t *) malloc((N/S) * sizeof(cudaStream_t));
cudaEvent_t * evt = (cudaEvent_t *) malloc((N/S) * sizeof(cudaEvent_t));
for(int i = 0; i < (N/S); i++)
{
CUDA_SAFE_CALL(cudaStreamCreate(&(str[i])));
CUDA_SAFE_CALL(cudaEventCreate(&(evt[i])));
}
gettimeofday(&t1,0);
// Copy matrices from the host to device
CUDA_SAFE_CALL(cudaMemcpyAsync(dB,hB,size,cudaMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(cudaMemcpyAsync(dA,hA,S*(size/N),cudaMemcpyHostToDevice,str[0]));
gpuMM<<<grid,threadBlock,0,str[0]>>>(dA,dB,dC,N);
CUDA_SAFE_CALL(cudaEventRecord(evt[0],str[0]));
for(LONG i=1; i< (N/S); i++){
if(i%2 == 0)
{
//Wait for previous stream to finish executing the kernel
CUDA_SAFE_CALL(cudaStreamWaitEvent(str[i],evt[i-2],0));
// Prefetch the next set of rows
CUDA_SAFE_CALL(cudaMemcpyAsync(dA,hA+i*N*S,(S*size/N),cudaMemcpyHostToDevice,str[i]));
CUDA_SAFE_CALL(cudaStreamSynchronize(str[i-2]));
//Execute the matrix multiplication kernel
gpuMM<<<grid,threadBlock,0,str[i]>>>(dA,dB,dC,N);
CUDA_SAFE_CALL(cudaEventRecord(evt[i],str[i]));
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(cudaMemcpyAsync(C+(i-1)*N*S,dCT,(S*size/N),cudaMemcpyDeviceToHost,str[i-1]));
}
else
{
//Wait for previous stream to finish executing the kernel
if(i>1)
CUDA_SAFE_CALL(cudaStreamWaitEvent(str[i],evt[i-2],0));
// Prefetch the next set of rows
CUDA_SAFE_CALL(cudaMemcpyAsync(dAT,hA+i*N*S,(S*size/N),cudaMemcpyHostToDevice,str[i]));
if(i>1)
CUDA_SAFE_CALL(cudaStreamSynchronize(str[i-2]));
//Execute the matrix multiplication kernel
gpuMM<<<grid,threadBlock,0,str[i]>>>(dAT,dB,dCT,N);
CUDA_SAFE_CALL(cudaEventRecord(evt[i],str[i]));
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(cudaMemcpyAsync(C+(i-1)*N*S,dC,(S*size/N),cudaMemcpyDeviceToHost,str[i-1]));
}
}
CUDA_SAFE_CALL(cudaStreamSynchronize(str[(N/S)-1]));
if(((N/S)-1)%2 == 0)
CUDA_SAFE_CALL(cudaMemcpyAsync(C+((N/S)-1)*N*S,dC,(S*size/N),cudaMemcpyDeviceToHost,str[(N/S)-1]));
else
CUDA_SAFE_CALL(cudaMemcpyAsync(C+((N/S)-1)*N*S,dCT,(S*size/N),cudaMemcpyDeviceToHost,str[(N/S)-1]));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Prefetch : " << gflops << endl;
for(int i = 0; i < (N/S); i++)
{
CUDA_SAFE_CALL(cudaStreamDestroy(str[i]));
CUDA_SAFE_CALL(cudaEventDestroy(evt[i]));
}
#if 0
// Now do the matrix multiplication on the CPU
double sum;
for (LONG row=0; row<N; row++){
for (LONG col=0; col<N; col++){
sum = 0.f;
for (LONG n=0; n<N; n++){
sum += hA[row*N+n]*hB[n*N+col];
}
hC[row*N+col] = sum;
}
}
// Check the result and make sure it is correct
for (LONG row=0; row<N; row++){
for (LONG col=0; col<N; col++){
if ( fabs(C[row*N+col] - hC[row*N+col]) > ERROR ){
cout << "Wrong answer!" << row << " " << col << endl;
row = col = N;
}
}
}
printArr(C,N);
cout<<endl;
printArr(hC,N);
#endif
CUDA_SAFE_CALL(cudaFreeHost(dB));
CUDA_SAFE_CALL(cudaFreeHost(dA));
CUDA_SAFE_CALL(cudaFreeHost(dC));
CUDA_SAFE_CALL(cudaFreeHost(dAT));
cout << "Finished." << endl;
return 0;
}
|
8c6d55e22d1bb34509f7cdefed1454a262d83c7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <Environment.h>
#include <loops/transform_any.h>
#include <types/types.h>
#include <op_boilerplate.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
using namespace simdOps;
template <typename X, typename Z, typename OpType>
__global__ void transformAnySimple(void *x, Nd4jLong *xShapeInfo, int xRank,
void *params,
void *z, Nd4jLong *zShapeInfo, int zRank,
int *allocationPointer,
void *reductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
functions::transform::TransformAny<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template<typename X, typename Y>
_CUDA_H void TransformAny<X,Y>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_ANY_OPS);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
template <typename OpType>
__device__ void TransformAny<X,Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo,
void *vparams,
void *vz, Nd4jLong *zShapeInfo,
int *allocationPointer, void *vreductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto x = reinterpret_cast<X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<X*>(vparams);
auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer);
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ Nd4jLong length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if(xEws > 0 && zEws > 0 && xOrder == zOrder) {
for (int i = tid; i < length; i += totalThreads)
z[i * zEws] = OpType::op(x[i * xEws], params);
}
else {
if(vx == vz) {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, length);
z[xOffset] = OpType::op(x[xOffset], params);
}
}
else {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, length);
auto zOffset = shape::getIndexOffset(i, zShapeInfo, length);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
};
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void TransformAny<X,Z>::intermediateShaped(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( transformAnySimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "transformAny(...) failed");
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformAny, , LIBND4J_TYPES, LIBND4J_TYPES);
}
}
| 8c6d55e22d1bb34509f7cdefed1454a262d83c7c.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <Environment.h>
#include <loops/transform_any.h>
#include <types/types.h>
#include <op_boilerplate.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
using namespace simdOps;
template <typename X, typename Z, typename OpType>
__global__ void transformAnySimple(void *x, Nd4jLong *xShapeInfo, int xRank,
void *params,
void *z, Nd4jLong *zShapeInfo, int zRank,
int *allocationPointer,
void *reductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
functions::transform::TransformAny<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template<typename X, typename Y>
_CUDA_H void TransformAny<X,Y>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_ANY_OPS);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
template <typename OpType>
__device__ void TransformAny<X,Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo,
void *vparams,
void *vz, Nd4jLong *zShapeInfo,
int *allocationPointer, void *vreductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto x = reinterpret_cast<X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<X*>(vparams);
auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer);
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ Nd4jLong length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if(xEws > 0 && zEws > 0 && xOrder == zOrder) {
for (int i = tid; i < length; i += totalThreads)
z[i * zEws] = OpType::op(x[i * xEws], params);
}
else {
if(vx == vz) {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, length);
z[xOffset] = OpType::op(x[xOffset], params);
}
}
else {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, length);
auto zOffset = shape::getIndexOffset(i, zShapeInfo, length);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
};
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void TransformAny<X,Z>::intermediateShaped(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
transformAnySimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "transformAny(...) failed");
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformAny, , LIBND4J_TYPES, LIBND4J_TYPES);
}
}
|
191da5f8fa230c38f2a56cde8e4e786c4de7e41b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define PI 3.1415926535897932
#define MAXEQNS 10 // maximum number of differential equations in the system
const int itermax10 = 2; // number of iterations to use for rk10
const int itermax12 = 1; // number of additional iterations to use for rk12
const int neqns = 2; // number of differential equations in the system
const double tol = 1.0e-10; // the error tolerance
const double tol10 = tol / 10;
const bool sho = true; // set sho to true if you want the simple harmonic oscillator results
// set sho to false, if you want the predator - prey results
// the following constants are the 10th order method's coefficients
const double a0 = 0;
__constant__ double a1 = 0.11747233803526765;
__constant__ double a2 = 0.35738424175967745;
__constant__ double a3 = 0.64261575824032255;
__constant__ double a4 = 0.88252766196473235;
const double a5 = 1.0000000000000000;
__constant__ double b10 = 0.047323231137709573;
__constant__ double b11 = 0.077952072407795078;
__constant__ double b12 = -0.010133421269900587;
__constant__ double b13 = 0.0028864915990617097;
__constant__ double b14 = -0.00055603583939812082;
__constant__ double b20 = 0.021779075831486075;
__constant__ double b21 = 0.22367959757928498;
__constant__ double b22 = 0.12204792759220492;
__constant__ double b23 = -0.012091266674498959;
__constant__ double b24 = 0.0019689074312004371;
__constant__ double b30 = 0.044887590835180592;
__constant__ double b31 = 0.15973856856089786;
__constant__ double b32 = 0.32285378852557547;
__constant__ double b33 = 0.12204792759220492;
__constant__ double b34 = -0.0069121172735362915;
__constant__ double b40 = 0.019343435528957094;
__constant__ double b41 = 0.22312684732165494;
__constant__ double b42 = 0.23418268877986459;
__constant__ double b43 = 0.32792261792646064;
__constant__ double b44 = 0.077952072407795078;
const double b50 = 0.066666666666666667;
const double b51 = 0.10981508874708385;
const double b52 = 0.37359383699761912;
const double b53 = 0.18126454003786724;
const double b54 = 0.26865986755076313;
const double c0 = 0.033333333333333333;
const double c1 = 0.18923747814892349;
const double c2 = 0.27742918851774318;
const double c3 = 0.27742918851774318;
const double c4 = 0.18923747814892349;
const double c5 = 0.033333333333333333;
// the following coefficients allow us to get rk12 internal xk values from rk10 fk values
__constant__ double g10 = 0.043407276098971173;
__constant__ double g11 = 0.049891561330903419;
__constant__ double g12 = -0.012483721919363355;
__constant__ double g13 = 0.0064848904066894701;
__constant__ double g14 = -0.0038158693974615597;
__constant__ double g15 = 0.0014039153409773882;
__constant__ double g20 = 0.030385164419638569;
__constant__ double g21 = 0.19605322645426044;
__constant__ double g22 = 0.047860687574395354;
__constant__ double g23 = -0.012887249003100515;
__constant__ double g24 = 0.0064058521980400821;
__constant__ double g25 = -0.0022420783785910372;
__constant__ double g30 = 0.032291666666666667;
__constant__ double g31 = 0.19311806292811784;
__constant__ double g32 = 0.25797759963091718;
__constant__ double g33 = 0.019451588886825999;
__constant__ double g34 = -0.0038805847791943522;
__constant__ double g35 = 0.0010416666666666667;
__constant__ double g40 = 0.035575411711924371;
__constant__ double g41 = 0.18283162595088341;
__constant__ double g42 = 0.29031643752084369;
__constant__ double g43 = 0.22956850094334782;
__constant__ double g44 = -0.0068157483053369507;
__constant__ double g45 = 0.0029481689136947641;
__constant__ double g50 = 0.031929417992355945;
__constant__ double g51 = 0.19305334754638505;
__constant__ double g52 = 0.27094429811105371;
__constant__ double g53 = 0.28991291043710653;
__constant__ double g54 = 0.13934591681802007;
__constant__ double g55 = -0.010073942765637839;
const double g60 = 0.033333333333333333;
const double g61 = 0.18923747814892349;
const double g62 = 0.27742918851774318;
const double g63 = 0.27742918851774318;
const double g64 = 0.18923747814892349;
const double g65 = 0.033333333333333333;
// the following constants are the 12th order method's coefficients
const double ah0 = 0.0;
const double ah1 = 0.084888051860716535;
const double ah2 = 0.26557560326464289;
const double ah3 = 0.50000000000000000;
const double ah4 = 0.73442439673535711;
const double ah5 = 0.91511194813928346;
const double ah6 = 1.0000000000000000;
__constant__ double bh10 = 0.033684534770907752;
__constant__ double bh11 = 0.057301749935629582;
__constant__ double bh12 = -0.0082444880936983822;
__constant__ double bh13 = 0.0029151263642014432;
__constant__ double bh14 = -0.00096482361331657787;
__constant__ double bh15 = 0.00019595249699271744;
__constant__ double bh20 = 0.015902242088596380;
__constant__ double bh21 = 0.16276437062291593;
__constant__ double bh22 = 0.096031583397703751;
__constant__ double bh23 = -0.011758319711158930;
__constant__ double bh24 = 0.0032543514515832418;
__constant__ double bh25 = -0.00061862458499748489;
__constant__ double bh30 = 0.031250000000000000;
__constant__ double bh31 = 0.11881843285766042;
__constant__ double bh32 = 0.24868761828096535;
__constant__ double bh33 = 0.11000000000000000;
__constant__ double bh34 = -0.010410996557394222;
__constant__ double bh35 = 0.0016549454187684515;
__constant__ double bh40 = 0.015902242088596380;
__constant__ double bh41 = 0.15809680304274781;
__constant__ double bh42 = 0.18880881534382426;
__constant__ double bh43 = 0.28087114502765051;
__constant__ double bh44 = 0.096031583397703751;
__constant__ double bh45 = -0.0052861921651656089;
__constant__ double bh50 = 0.033684534770907752;
__constant__ double bh51 = 0.11440754737426645;
__constant__ double bh52 = 0.24657204460460206;
__constant__ double bh53 = 0.20929436236889375;
__constant__ double bh54 = 0.25385170908498387;
__constant__ double bh55 = 0.057301749935629582;
const double bh60 = 0;
const double bh61 = 0.19581988897471611;
const double bh62 = 0.14418011102528389;
const double bh63 = 0.32000000000000000;
const double bh64 = 0.14418011102528389;
const double bh65 = 0.19581988897471611;
const double ch0 = 0.023809523809523810;
const double ch1 = 0.13841302368078297;
const double ch2 = 0.21587269060493131;
const double ch3 = 0.24380952380952381;
const double ch4 = 0.21587269060493131;
const double ch5 = 0.13841302368078297;
const double ch6 = 0.023809523809523810;
__global__ void guessKernel(double*device_X_Total, double* device_X_Not,double* device_F_Not, double h){
device_X_Total[threadIdx.x] = device_X_Not[threadIdx.x] + a1 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +2] = device_X_Not[threadIdx.x] + a2 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +4] = device_X_Not[threadIdx.x] + a3 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +6] = device_X_Not[threadIdx.x] + a4 * h * device_F_Not[threadIdx.x];
} | 191da5f8fa230c38f2a56cde8e4e786c4de7e41b.cu | #include "includes.h"
#define PI 3.1415926535897932
#define MAXEQNS 10 // maximum number of differential equations in the system
const int itermax10 = 2; // number of iterations to use for rk10
const int itermax12 = 1; // number of additional iterations to use for rk12
const int neqns = 2; // number of differential equations in the system
const double tol = 1.0e-10; // the error tolerance
const double tol10 = tol / 10;
const bool sho = true; // set sho to true if you want the simple harmonic oscillator results
// set sho to false, if you want the predator - prey results
// the following constants are the 10th order method's coefficients
const double a0 = 0;
__constant__ double a1 = 0.11747233803526765;
__constant__ double a2 = 0.35738424175967745;
__constant__ double a3 = 0.64261575824032255;
__constant__ double a4 = 0.88252766196473235;
const double a5 = 1.0000000000000000;
__constant__ double b10 = 0.047323231137709573;
__constant__ double b11 = 0.077952072407795078;
__constant__ double b12 = -0.010133421269900587;
__constant__ double b13 = 0.0028864915990617097;
__constant__ double b14 = -0.00055603583939812082;
__constant__ double b20 = 0.021779075831486075;
__constant__ double b21 = 0.22367959757928498;
__constant__ double b22 = 0.12204792759220492;
__constant__ double b23 = -0.012091266674498959;
__constant__ double b24 = 0.0019689074312004371;
__constant__ double b30 = 0.044887590835180592;
__constant__ double b31 = 0.15973856856089786;
__constant__ double b32 = 0.32285378852557547;
__constant__ double b33 = 0.12204792759220492;
__constant__ double b34 = -0.0069121172735362915;
__constant__ double b40 = 0.019343435528957094;
__constant__ double b41 = 0.22312684732165494;
__constant__ double b42 = 0.23418268877986459;
__constant__ double b43 = 0.32792261792646064;
__constant__ double b44 = 0.077952072407795078;
const double b50 = 0.066666666666666667;
const double b51 = 0.10981508874708385;
const double b52 = 0.37359383699761912;
const double b53 = 0.18126454003786724;
const double b54 = 0.26865986755076313;
const double c0 = 0.033333333333333333;
const double c1 = 0.18923747814892349;
const double c2 = 0.27742918851774318;
const double c3 = 0.27742918851774318;
const double c4 = 0.18923747814892349;
const double c5 = 0.033333333333333333;
// the following coefficients allow us to get rk12 internal xk values from rk10 fk values
__constant__ double g10 = 0.043407276098971173;
__constant__ double g11 = 0.049891561330903419;
__constant__ double g12 = -0.012483721919363355;
__constant__ double g13 = 0.0064848904066894701;
__constant__ double g14 = -0.0038158693974615597;
__constant__ double g15 = 0.0014039153409773882;
__constant__ double g20 = 0.030385164419638569;
__constant__ double g21 = 0.19605322645426044;
__constant__ double g22 = 0.047860687574395354;
__constant__ double g23 = -0.012887249003100515;
__constant__ double g24 = 0.0064058521980400821;
__constant__ double g25 = -0.0022420783785910372;
__constant__ double g30 = 0.032291666666666667;
__constant__ double g31 = 0.19311806292811784;
__constant__ double g32 = 0.25797759963091718;
__constant__ double g33 = 0.019451588886825999;
__constant__ double g34 = -0.0038805847791943522;
__constant__ double g35 = 0.0010416666666666667;
__constant__ double g40 = 0.035575411711924371;
__constant__ double g41 = 0.18283162595088341;
__constant__ double g42 = 0.29031643752084369;
__constant__ double g43 = 0.22956850094334782;
__constant__ double g44 = -0.0068157483053369507;
__constant__ double g45 = 0.0029481689136947641;
__constant__ double g50 = 0.031929417992355945;
__constant__ double g51 = 0.19305334754638505;
__constant__ double g52 = 0.27094429811105371;
__constant__ double g53 = 0.28991291043710653;
__constant__ double g54 = 0.13934591681802007;
__constant__ double g55 = -0.010073942765637839;
const double g60 = 0.033333333333333333;
const double g61 = 0.18923747814892349;
const double g62 = 0.27742918851774318;
const double g63 = 0.27742918851774318;
const double g64 = 0.18923747814892349;
const double g65 = 0.033333333333333333;
// the following constants are the 12th order method's coefficients
const double ah0 = 0.0;
const double ah1 = 0.084888051860716535;
const double ah2 = 0.26557560326464289;
const double ah3 = 0.50000000000000000;
const double ah4 = 0.73442439673535711;
const double ah5 = 0.91511194813928346;
const double ah6 = 1.0000000000000000;
__constant__ double bh10 = 0.033684534770907752;
__constant__ double bh11 = 0.057301749935629582;
__constant__ double bh12 = -0.0082444880936983822;
__constant__ double bh13 = 0.0029151263642014432;
__constant__ double bh14 = -0.00096482361331657787;
__constant__ double bh15 = 0.00019595249699271744;
__constant__ double bh20 = 0.015902242088596380;
__constant__ double bh21 = 0.16276437062291593;
__constant__ double bh22 = 0.096031583397703751;
__constant__ double bh23 = -0.011758319711158930;
__constant__ double bh24 = 0.0032543514515832418;
__constant__ double bh25 = -0.00061862458499748489;
__constant__ double bh30 = 0.031250000000000000;
__constant__ double bh31 = 0.11881843285766042;
__constant__ double bh32 = 0.24868761828096535;
__constant__ double bh33 = 0.11000000000000000;
__constant__ double bh34 = -0.010410996557394222;
__constant__ double bh35 = 0.0016549454187684515;
__constant__ double bh40 = 0.015902242088596380;
__constant__ double bh41 = 0.15809680304274781;
__constant__ double bh42 = 0.18880881534382426;
__constant__ double bh43 = 0.28087114502765051;
__constant__ double bh44 = 0.096031583397703751;
__constant__ double bh45 = -0.0052861921651656089;
__constant__ double bh50 = 0.033684534770907752;
__constant__ double bh51 = 0.11440754737426645;
__constant__ double bh52 = 0.24657204460460206;
__constant__ double bh53 = 0.20929436236889375;
__constant__ double bh54 = 0.25385170908498387;
__constant__ double bh55 = 0.057301749935629582;
const double bh60 = 0;
const double bh61 = 0.19581988897471611;
const double bh62 = 0.14418011102528389;
const double bh63 = 0.32000000000000000;
const double bh64 = 0.14418011102528389;
const double bh65 = 0.19581988897471611;
const double ch0 = 0.023809523809523810;
const double ch1 = 0.13841302368078297;
const double ch2 = 0.21587269060493131;
const double ch3 = 0.24380952380952381;
const double ch4 = 0.21587269060493131;
const double ch5 = 0.13841302368078297;
const double ch6 = 0.023809523809523810;
__global__ void guessKernel(double*device_X_Total, double* device_X_Not,double* device_F_Not, double h){
device_X_Total[threadIdx.x] = device_X_Not[threadIdx.x] + a1 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +2] = device_X_Not[threadIdx.x] + a2 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +4] = device_X_Not[threadIdx.x] + a3 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +6] = device_X_Not[threadIdx.x] + a4 * h * device_F_Not[threadIdx.x];
} |
a5c3986d0bb54ce434b2f42a5beb340903e8b59b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <cfloat>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/margin_inner_product_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Weight_norm_gpu(int nthreads, const int K_,
Dtype* weight) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype sum_sqaure = 0.;
for (int i = 0; i < K_; i++) {
sum_sqaure += weight[index * K_ + i] * weight[index * K_ + i];
}
sum_sqaure = sqrt(sum_sqaure);
for (int i = 0; i < K_; i++) {
weight[index * K_ + i] = weight[index * K_ + i] / sum_sqaure;
}
}
}
template <typename Dtype>
__global__ void Compute_bottom_norm_gpu(int nthreads, const int K_,
const Dtype* bottom, Dtype* x_norm) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype sum_sqaure = 0.;
for (int i = 0; i < K_; i++) {
sum_sqaure += bottom[index * K_ + i] * bottom[index * K_ + i];
}
x_norm[index] = sqrt(sum_sqaure);
}
}
template <typename Dtype>
__global__ void Compute_cos_theta_gpu(int nthreads, const int N_,
const Dtype* x_norm, Dtype* cos_theta) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / N_;
cos_theta[index] = cos_theta[index] / x_norm[i];
}
}
template <typename Dtype>
__global__ void Compute_sign_1_gpu(int nthreads, const Dtype* cos_theta, Dtype* sign_1) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_1[index] = abs(cos_theta[index]) - (Dtype)0.5;
}
}
template <typename Dtype>
__global__ void Compute_sign_2_gpu(int nthreads, const Dtype* sign_0,
const Dtype* sign_1, Dtype* sign_2) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_2[index] = sign_0[index] * ((Dtype)1. + sign_1[index]) - (Dtype)2.;
}
}
template <typename Dtype>
__global__ void Compute_sign_3_gpu(int nthreads, const Dtype* sign_0,
const Dtype* cos_theta_quadratic, Dtype* sign_3) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_3[index] = sign_0[index] * ((Dtype)2. * cos_theta_quadratic[index] - (Dtype)1.);
}
}
template <typename Dtype>
__global__ void Compute_sign_4_gpu(int nthreads, const Dtype* sign_0,
const Dtype* sign_3, Dtype* sign_4) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_4[index] = (Dtype)2. * sign_0[index] + sign_3[index] - (Dtype)3.;
}
}
template <typename Dtype>
__global__ void Margin_double_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_0,
const Dtype* cos_theta_quadratic, Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * ((Dtype)2. * sign_0[index] * cos_theta_quadratic[index] -
(Dtype)1.);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_triple_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2,
const Dtype* cos_theta, const Dtype* cos_theta_cubic,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * (sign_1[index] * ((Dtype)4. * cos_theta_cubic[index] -
(Dtype)3. * cos_theta[index]) + sign_2[index]);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_quadruple_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4,
const Dtype* cos_theta_quadratic, const Dtype* cos_theta_quartic,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * (sign_3[index] * ((Dtype)8. * cos_theta_quartic[index] -
(Dtype)8. * cos_theta_quadratic[index] + (Dtype)1.) + sign_4[index]);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_double_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_0, const Dtype* cos_theta,
const Dtype* cos_theta_quadratic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = (Dtype)4. * sign_0[i * N_ + n] * cos_theta[i * N_ + n];
Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)2. * sign_0[i * N_ + n] *
cos_theta_quadratic[i * N_ + n] + (Dtype)1.);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_triple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2, const Dtype* cos_theta_quadratic,
const Dtype* cos_theta_cubic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = sign_1[i * N_ + n] * ((Dtype)12. * cos_theta_quadratic[i * N_ + n] - (Dtype)3.);
Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)8. * sign_1[i * N_ + n] * cos_theta_cubic[i * N_ + n] -
sign_2[i * N_ + n]);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_quadruple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4,
const Dtype* cos_theta, const Dtype* cos_theta_quadratic,
const Dtype* cos_theta_cubic, const Dtype* cos_theta_quartic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = sign_3[i * N_ + n] * ((Dtype)32. * cos_theta_cubic[i * N_ + n] - (Dtype)16. * cos_theta[i * N_ + n]);
Dtype coeff_x = - (Dtype)1./ x_norm[i] * (sign_3[i * N_ + n] * ((Dtype)24. * cos_theta_quartic[i * N_ + n] -
(Dtype)8. * cos_theta_quadratic[i * N_ + n] - 1) - sign_4[i * N_ + n]);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
void MarginInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
iter_ += (Dtype)1.;
Dtype base_ = this->layer_param_.margin_inner_product_param().base();
Dtype gamma_ = this->layer_param_.margin_inner_product_param().gamma();
Dtype power_ = this->layer_param_.margin_inner_product_param().power();
Dtype lambda_min_ = this->layer_param_.margin_inner_product_param().lambda_min();
lambda_ = base_ * powf(((Dtype)1. + gamma_ * iter_), -power_);
lambda_ = max(lambda_, lambda_min_);
top[1]->mutable_cpu_data()[0] = lambda_;
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* label = bottom[1]->gpu_data();
/************************* normalize weight *************************/
int nthreads = N_;
hipLaunchKernelGGL(( Weight_norm_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_,
this->blobs_[0]->mutable_gpu_data());
/************************* common variables *************************/
// x_norm_ = |x|
nthreads = M_;
hipLaunchKernelGGL(( Compute_bottom_norm_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_, bottom_data,
x_norm_.mutable_gpu_data());
nthreads = M_ * N_;
// cos_theta = x'w / |x|
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., cos_theta_.mutable_gpu_data());
hipLaunchKernelGGL(( Compute_cos_theta_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, x_norm_.gpu_data(), cos_theta_.mutable_gpu_data());
// sign_0
caffe_gpu_sign(M_ * N_, cos_theta_.gpu_data(), sign_0_.mutable_gpu_data());
/************************* optional variables *************************/
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
// cos_theta_quadratic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
// cos_theta_quadratic && cos_theta_cubic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data());
// sign_1 = sign(abs(cos_theta) - 0.5)
hipLaunchKernelGGL(( Compute_sign_1_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, cos_theta_.gpu_data(), sign_1_.mutable_gpu_data());
caffe_gpu_sign(M_ * N_, sign_1_.gpu_data(), sign_1_.mutable_gpu_data());
// sign_2 = sign_0 * (1 + sign_1) - 2
hipLaunchKernelGGL(( Compute_sign_2_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, sign_0_.gpu_data(),
sign_1_.gpu_data(), sign_2_.mutable_gpu_data());
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
// cos_theta_quadratic && cos_theta_cubic && cos_theta_quartic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)4., cos_theta_quartic_.mutable_gpu_data());
// sign_3 = sign_0 * sign(2 * cos_theta_quadratic_ - 1)
hipLaunchKernelGGL(( Compute_sign_3_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(),
sign_3_.mutable_gpu_data());
caffe_gpu_sign(M_ * N_, sign_3_.gpu_data(), sign_3_.mutable_gpu_data());
// sign_4 = 2 * sign_0 + sign_3 - 3
hipLaunchKernelGGL(( Compute_sign_4_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, sign_0_.gpu_data(),
sign_3_.gpu_data(), sign_4_.mutable_gpu_data());
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
/************************* Forward *************************/
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
// caffe_gpu_memcpy(M_ * N_, cos_theta_.gpu_data(), top_data);
hipLaunchKernelGGL(( Margin_double_forward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, lambda_, label, x_norm_.gpu_data(),
sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(), top_data);
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
hipLaunchKernelGGL(( Margin_triple_forward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_1_.gpu_data(),
sign_2_.gpu_data(), cos_theta_.gpu_data(),
cos_theta_cubic_.gpu_data(), top_data);
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
hipLaunchKernelGGL(( Margin_quadruple_forward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_3_.gpu_data(),
sign_4_.gpu_data(), cos_theta_quadratic_.gpu_data(),
cos_theta_quartic_.gpu_data(), top_data);
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
}
template <typename Dtype>
void MarginInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
if (this->param_propagate_down_[0]) {
// Gradient with respect to weight
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff());
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// Gradient with respect to bottom data
int nthreads = M_ * K_;
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, this->blobs_[0]->gpu_data(), (Dtype)0.,
bottom[0]->mutable_gpu_diff());
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
hipLaunchKernelGGL(( Margin_bottom_double_backward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_0_.gpu_data(),
cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(),
bottom_diff);
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
hipLaunchKernelGGL(( Margin_bottom_triple_backward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_1_.gpu_data(), sign_2_.gpu_data(),
cos_theta_quadratic_.gpu_data(), cos_theta_cubic_.gpu_data(),
bottom_diff);
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
hipLaunchKernelGGL(( Margin_bottom_quadruple_backward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_3_.gpu_data(), sign_4_.gpu_data(),
cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(),
cos_theta_cubic_.gpu_data(), cos_theta_quartic_.gpu_data(),
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MarginInnerProductLayer);
} // namespace caffe
| a5c3986d0bb54ce434b2f42a5beb340903e8b59b.cu | #include <vector>
#include <cfloat>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/margin_inner_product_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Weight_norm_gpu(int nthreads, const int K_,
Dtype* weight) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype sum_sqaure = 0.;
for (int i = 0; i < K_; i++) {
sum_sqaure += weight[index * K_ + i] * weight[index * K_ + i];
}
sum_sqaure = sqrt(sum_sqaure);
for (int i = 0; i < K_; i++) {
weight[index * K_ + i] = weight[index * K_ + i] / sum_sqaure;
}
}
}
template <typename Dtype>
__global__ void Compute_bottom_norm_gpu(int nthreads, const int K_,
const Dtype* bottom, Dtype* x_norm) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype sum_sqaure = 0.;
for (int i = 0; i < K_; i++) {
sum_sqaure += bottom[index * K_ + i] * bottom[index * K_ + i];
}
x_norm[index] = sqrt(sum_sqaure);
}
}
template <typename Dtype>
__global__ void Compute_cos_theta_gpu(int nthreads, const int N_,
const Dtype* x_norm, Dtype* cos_theta) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / N_;
cos_theta[index] = cos_theta[index] / x_norm[i];
}
}
template <typename Dtype>
__global__ void Compute_sign_1_gpu(int nthreads, const Dtype* cos_theta, Dtype* sign_1) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_1[index] = abs(cos_theta[index]) - (Dtype)0.5;
}
}
template <typename Dtype>
__global__ void Compute_sign_2_gpu(int nthreads, const Dtype* sign_0,
const Dtype* sign_1, Dtype* sign_2) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_2[index] = sign_0[index] * ((Dtype)1. + sign_1[index]) - (Dtype)2.;
}
}
template <typename Dtype>
__global__ void Compute_sign_3_gpu(int nthreads, const Dtype* sign_0,
const Dtype* cos_theta_quadratic, Dtype* sign_3) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_3[index] = sign_0[index] * ((Dtype)2. * cos_theta_quadratic[index] - (Dtype)1.);
}
}
template <typename Dtype>
__global__ void Compute_sign_4_gpu(int nthreads, const Dtype* sign_0,
const Dtype* sign_3, Dtype* sign_4) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_4[index] = (Dtype)2. * sign_0[index] + sign_3[index] - (Dtype)3.;
}
}
template <typename Dtype>
__global__ void Margin_double_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_0,
const Dtype* cos_theta_quadratic, Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * ((Dtype)2. * sign_0[index] * cos_theta_quadratic[index] -
(Dtype)1.);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_triple_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2,
const Dtype* cos_theta, const Dtype* cos_theta_cubic,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * (sign_1[index] * ((Dtype)4. * cos_theta_cubic[index] -
(Dtype)3. * cos_theta[index]) + sign_2[index]);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_quadruple_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4,
const Dtype* cos_theta_quadratic, const Dtype* cos_theta_quartic,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * (sign_3[index] * ((Dtype)8. * cos_theta_quartic[index] -
(Dtype)8. * cos_theta_quadratic[index] + (Dtype)1.) + sign_4[index]);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_double_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_0, const Dtype* cos_theta,
const Dtype* cos_theta_quadratic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = (Dtype)4. * sign_0[i * N_ + n] * cos_theta[i * N_ + n];
Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)2. * sign_0[i * N_ + n] *
cos_theta_quadratic[i * N_ + n] + (Dtype)1.);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_triple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2, const Dtype* cos_theta_quadratic,
const Dtype* cos_theta_cubic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = sign_1[i * N_ + n] * ((Dtype)12. * cos_theta_quadratic[i * N_ + n] - (Dtype)3.);
Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)8. * sign_1[i * N_ + n] * cos_theta_cubic[i * N_ + n] -
sign_2[i * N_ + n]);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_quadruple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4,
const Dtype* cos_theta, const Dtype* cos_theta_quadratic,
const Dtype* cos_theta_cubic, const Dtype* cos_theta_quartic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = sign_3[i * N_ + n] * ((Dtype)32. * cos_theta_cubic[i * N_ + n] - (Dtype)16. * cos_theta[i * N_ + n]);
Dtype coeff_x = - (Dtype)1./ x_norm[i] * (sign_3[i * N_ + n] * ((Dtype)24. * cos_theta_quartic[i * N_ + n] -
(Dtype)8. * cos_theta_quadratic[i * N_ + n] - 1) - sign_4[i * N_ + n]);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
void MarginInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
iter_ += (Dtype)1.;
Dtype base_ = this->layer_param_.margin_inner_product_param().base();
Dtype gamma_ = this->layer_param_.margin_inner_product_param().gamma();
Dtype power_ = this->layer_param_.margin_inner_product_param().power();
Dtype lambda_min_ = this->layer_param_.margin_inner_product_param().lambda_min();
lambda_ = base_ * powf(((Dtype)1. + gamma_ * iter_), -power_);
lambda_ = max(lambda_, lambda_min_);
top[1]->mutable_cpu_data()[0] = lambda_;
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* label = bottom[1]->gpu_data();
/************************* normalize weight *************************/
int nthreads = N_;
Weight_norm_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_,
this->blobs_[0]->mutable_gpu_data());
/************************* common variables *************************/
// x_norm_ = |x|
nthreads = M_;
Compute_bottom_norm_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, bottom_data,
x_norm_.mutable_gpu_data());
nthreads = M_ * N_;
// cos_theta = x'w / |x|
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., cos_theta_.mutable_gpu_data());
Compute_cos_theta_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, x_norm_.gpu_data(), cos_theta_.mutable_gpu_data());
// sign_0
caffe_gpu_sign(M_ * N_, cos_theta_.gpu_data(), sign_0_.mutable_gpu_data());
/************************* optional variables *************************/
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
// cos_theta_quadratic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
// cos_theta_quadratic && cos_theta_cubic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data());
// sign_1 = sign(abs(cos_theta) - 0.5)
Compute_sign_1_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, cos_theta_.gpu_data(), sign_1_.mutable_gpu_data());
caffe_gpu_sign(M_ * N_, sign_1_.gpu_data(), sign_1_.mutable_gpu_data());
// sign_2 = sign_0 * (1 + sign_1) - 2
Compute_sign_2_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, sign_0_.gpu_data(),
sign_1_.gpu_data(), sign_2_.mutable_gpu_data());
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
// cos_theta_quadratic && cos_theta_cubic && cos_theta_quartic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)4., cos_theta_quartic_.mutable_gpu_data());
// sign_3 = sign_0 * sign(2 * cos_theta_quadratic_ - 1)
Compute_sign_3_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(),
sign_3_.mutable_gpu_data());
caffe_gpu_sign(M_ * N_, sign_3_.gpu_data(), sign_3_.mutable_gpu_data());
// sign_4 = 2 * sign_0 + sign_3 - 3
Compute_sign_4_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, sign_0_.gpu_data(),
sign_3_.gpu_data(), sign_4_.mutable_gpu_data());
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
/************************* Forward *************************/
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
// caffe_gpu_memcpy(M_ * N_, cos_theta_.gpu_data(), top_data);
Margin_double_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, lambda_, label, x_norm_.gpu_data(),
sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(), top_data);
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
Margin_triple_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_1_.gpu_data(),
sign_2_.gpu_data(), cos_theta_.gpu_data(),
cos_theta_cubic_.gpu_data(), top_data);
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
Margin_quadruple_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_3_.gpu_data(),
sign_4_.gpu_data(), cos_theta_quadratic_.gpu_data(),
cos_theta_quartic_.gpu_data(), top_data);
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
}
template <typename Dtype>
void MarginInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
if (this->param_propagate_down_[0]) {
// Gradient with respect to weight
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff());
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// Gradient with respect to bottom data
int nthreads = M_ * K_;
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, this->blobs_[0]->gpu_data(), (Dtype)0.,
bottom[0]->mutable_gpu_diff());
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
Margin_bottom_double_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_0_.gpu_data(),
cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(),
bottom_diff);
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
Margin_bottom_triple_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_1_.gpu_data(), sign_2_.gpu_data(),
cos_theta_quadratic_.gpu_data(), cos_theta_cubic_.gpu_data(),
bottom_diff);
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
Margin_bottom_quadruple_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_3_.gpu_data(), sign_4_.gpu_data(),
cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(),
cos_theta_cubic_.gpu_data(), cos_theta_quartic_.gpu_data(),
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MarginInnerProductLayer);
} // namespace caffe
|
be28dbb1f271288827e265756e3a95db42c1bca1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_3d_layer_hessian_cuda_kepler.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_texture.h"
#include "packed_config.h"
#include "space_filling_curve.h"
#include "../convolution_layer.h"
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
namespace nnforge
{
namespace cuda
{
template<int BLOCK_SIZE>
__global__ void convolution_3d_tex_blocked_hess_kernel_kepler(
float * __restrict output,
hipTextureObject_t input_tex,
hipTextureObject_t weights_tex,
const float * __restrict biases,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = (window_width * window_height) * (window_depth * input_feature_map_count);
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int output_feature_map_id = conf.get_val(2);
int input_elem_id = ((entry_id * input_feature_map_count * input_depth + z) * input_height + y) * input_width + x;
int weights_offset = weight_count_per_output_feature_map * output_feature_map_id;
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - window_width;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_neuron_count_per_feature_map * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_3d_tex_exact_blocked_hess_kernel_kepler(
float * __restrict output,
hipTextureObject_t input_tex,
hipTextureObject_t weights_tex,
const float * __restrict biases,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = (WINDOW_WIDTH * window_height) * (window_depth * input_feature_map_count);
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int output_feature_map_id = conf.get_val(2);
int input_elem_id = ((entry_id * input_feature_map_count * input_depth + z) * input_height + y) * input_width + x;
int weights_offset = weight_count_per_output_feature_map * output_feature_map_id;
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - WINDOW_WIDTH;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_neuron_count_per_feature_map * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
extern __shared__ float arr[];
__global__ void convolution_3d_update_biases_hess_kernel_kepler(
float * __restrict hessian_biases,
const float * __restrict output_errors,
int block_size,
int output_elem_count_per_feature_map,
int output_feature_map_count,
int entry_count)
{
int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y;
int block_id = blockIdx.z * blockDim.z + threadIdx.z;
int base_entry_id = block_size * block_id;
int thread_id = blockDim.x * threadIdx.z + threadIdx.x;
int threadblock_size = blockDim.x * blockDim.z;
float sum = 0.0F;
int iteration_count = min(entry_count - base_entry_id, block_size);
if (output_neuron_id < output_elem_count_per_feature_map)
{
const float * current_error = output_errors + (base_entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map + output_neuron_id;
int output_elem_count_per_entry = output_elem_count_per_feature_map * output_feature_map_count;
for(int i = 0; i < iteration_count; ++i)
{
sum += *current_error;
current_error += output_elem_count_per_entry;
}
}
arr[thread_id] = sum;
__syncthreads();
int t_add_elems = threadblock_size >> 1;
int t_working_elems = (threadblock_size + 1) >> 1;
while (t_add_elems > 0)
{
if (thread_id < t_add_elems)
arr[thread_id] += arr[thread_id + t_working_elems];
t_add_elems = t_working_elems >> 1;
t_working_elems = (t_working_elems + 1) >> 1;
__syncthreads();
}
if (thread_id == 0)
atomicAdd(hessian_biases + output_feature_map_id, arr[0]);
}
template<int BLOCK_SIZE>
__global__ void convolution_3d_square_deriviative_tex_hess_kernel_kepler(
float * __restrict input_errors,
hipTextureObject_t output_tex,
hipTextureObject_t weights_squared_tex,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE + (BLOCK_SIZE - 1);
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < input_width + (BLOCK_SIZE - 1)) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_input_feature_map = window_depth * window_height * window_width;
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_elem_id = ((entry_id * output_feature_map_count * output_depth + z) * output_height + y) * output_width + x;
int weights_offset = weight_count_per_input_feature_map * input_feature_map_id;
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int output_layer_id = 0; output_layer_id < output_feature_map_count; ++output_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit_x = b_fit_y && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit_x)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_squared_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
weights_offset++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit_x = b_fit_y && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit_x)
{
float inp = tex1Dfetch<float>(output_tex, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * tex1Dfetch<float>(weights_squared_tex, weights_offset + weight_count_per_input_feature_map * i);
}
}
weights_offset++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
weights_offset += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_3d_square_deriviative_tex_exact_hess_kernel_kepler(
float * __restrict input_errors,
hipTextureObject_t output_tex,
hipTextureObject_t weights_squared_tex,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE + (BLOCK_SIZE - 1);
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < input_width + (BLOCK_SIZE - 1)) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_input_feature_map = window_depth * window_height * WINDOW_WIDTH;
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_elem_id = ((entry_id * output_feature_map_count * output_depth + z) * output_height + y) * output_width + x;
int weights_offset = weight_count_per_input_feature_map * input_feature_map_id;
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
#pragma unroll
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int output_layer_id = 0; output_layer_id < output_feature_map_count; ++output_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit_x = b_fit_y && (((1 << i) & mask) != 0);
if (b_fit_x)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_squared_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
weights_offset++;
}
output_elem_id -= output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
weights_offset += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
__global__ void convolution_3d_update_weights_hess_kernel_kepler(
float * __restrict hessian_weights,
hipTextureObject_t input_squared_tex,
hipTextureObject_t output_tex,
const packed_config<6> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int block_size,
int packed_config_count)
{
int weight_x = (blockIdx.x * blockDim.x + threadIdx.x) * WINDOW_WIDTH_LOCAL;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * block_size;
bool in_bounds = (packed_config_id < packed_config_count) && (weight_x < window_width) && (base_entry_id < entry_count);
if (in_bounds)
{
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
packed_config<6> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_y = conf.get_val(3);
int output_z = conf.get_val(4);
int output_feature_map_id = conf.get_val(5);
int iteration_count = min(block_size, entry_count - base_entry_id);
int output_errors_offset = (((base_entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z) * output_height + output_y) * output_width;
int input_elem_id = (((base_entry_id * input_feature_map_count + input_feature_map_id) * input_depth + output_z + weight_z) * input_height + output_y + weight_y) * input_width + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
for(int t = 0; t < iteration_count; ++t)
{
float input_squared_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_squared_buf[i] = tex1Dfetch<float>(input_squared_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch<float>(output_tex, output_errors_offset + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_squared_buf[i] = input_squared_buf[i + 1];
input_squared_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch<float>(input_squared_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_squared_buf[j];
output_errors_offset++;
input_elem_id++;
}
output_errors_offset += (output_feature_map_count * output_height * output_depth - 1) * output_width;
input_elem_id += (input_feature_map_count * input_height * input_depth - 1) * input_width + (window_width - WINDOW_WIDTH_LOCAL);
}
float * base_weights = hessian_weights + (((output_feature_map_id * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * window_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(base_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j]);
}
}
}
}
template<int WINDOW_WIDTH>
__global__ void convolution_3d_update_weights_exact_hess_kernel_kepler(
float * __restrict hessian_weights,
hipTextureObject_t input_squared_tex,
hipTextureObject_t output_tex,
const packed_config<6> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int block_size,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int base_entry_id = (blockIdx.y * blockDim.y + threadIdx.y) * block_size;
bool in_bounds = (packed_config_id < packed_config_count) && (base_entry_id < entry_count);
if (in_bounds)
{
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
packed_config<6> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_y = conf.get_val(3);
int output_z = conf.get_val(4);
int output_feature_map_id = conf.get_val(5);
int iteration_count = min(block_size, entry_count - base_entry_id);
int output_errors_offset = (((base_entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z) * output_height + output_y) * output_width;
int input_elem_id = (((base_entry_id * input_feature_map_count + input_feature_map_id) * input_depth + output_z + weight_z) * input_height + output_y + weight_y) * input_width;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
for(int t = 0; t < iteration_count; ++t)
{
float input_squared_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_squared_buf[i] = tex1Dfetch<float>(input_squared_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch<float>(output_tex, output_errors_offset + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_squared_buf[i] = input_squared_buf[i + 1];
input_squared_buf[WINDOW_WIDTH - 1] = tex1Dfetch<float>(input_squared_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_squared_buf[j];
output_errors_offset++;
input_elem_id++;
}
output_errors_offset += (output_feature_map_count * output_height * output_depth - 1) * output_width;
input_elem_id += (input_feature_map_count * input_height * input_depth - 1) * input_width;
}
float * base_weights = hessian_weights + (((output_feature_map_id * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * WINDOW_WIDTH;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(base_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j]);
}
}
}
}
convolution_3d_layer_hessian_cuda_kepler::convolution_3d_layer_hessian_cuda_kepler()
{
}
convolution_3d_layer_hessian_cuda_kepler::~convolution_3d_layer_hessian_cuda_kepler()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const) \
hipLaunchKernelGGL(( convolution_3d_tex_exact_blocked_hess_kernel_kepler<window_width_const,block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, input_tex, weights_tex, *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_kernel_const(block_size_const) \
hipLaunchKernelGGL(( convolution_3d_tex_blocked_hess_kernel_kepler<block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, input_tex, weights_tex, *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1); \
break; \
case 2: \
launch_kernel_const(2); \
break; \
case 3: \
launch_kernel_const(3); \
break; \
case 4: \
launch_kernel_const(4); \
break; \
case 5: \
launch_kernel_const(5); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const) \
hipLaunchKernelGGL(( convolution_3d_square_deriviative_tex_exact_hess_kernel_kepler<window_width_const,block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, output_tex, weights_squared_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_backprop_kernel_const(block_size_const) \
hipLaunchKernelGGL(( convolution_3d_square_deriviative_tex_hess_kernel_kepler<block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, output_tex, weights_squared_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_backprop_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1); \
break; \
case 2: \
launch_backprop_kernel_const(2); \
break; \
case 3: \
launch_backprop_kernel_const(3); \
break; \
case 4: \
launch_backprop_kernel_const(4); \
break; \
case 5: \
launch_backprop_kernel_const(5); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const) \
hipLaunchKernelGGL(( convolution_3d_update_weights_exact_hess_kernel_kepler<window_width_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *hessian_data[0], input_squared_tex, output_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, block_size, packed_config_count);
#define launch_update_weights_exact_kernel(window_width) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10); \
break; \
};
void convolution_3d_layer_hessian_cuda_kepler::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cuda_texture weights_tex(data[0]);
cuda_texture input_tex(input_neurons_buffer);
int packed_config_count = output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count;
const packed_config<3> * packed_config_list = static_cast<const packed_config<3> *>((const void *)*additional_buffers[1]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
forward_x_block_count,
packed_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size);
}
else
{
launch_kernel(forward_x_block_size);
}
}
void convolution_3d_layer_hessian_cuda_kepler::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_squared,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cuda_texture output_tex(output_errors_buffer);
cuda_texture weights_squared_tex(data_squared[0]);
int packed_config_count = input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2]* backward_input_feature_map_block_count;
const packed_config<3> * packed_config_list = static_cast<const packed_config<3> *>((const void *)*additional_buffers[3]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
backward_x_block_count,
packed_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size);
}
else
{
launch_backprop_kernel(backward_x_block_size);
}
}
void convolution_3d_layer_hessian_cuda_kepler::enqueue_update_hessian(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& hessian_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
// Update weights
{
// Store input neurons multiplied element-wise by themselves
cuda_util::multiply_by_itself(
*cuda_config,
*input_neurons_buffer,
*additional_buffers[0],
input_elem_count_per_entry * entry_count,
stream_id);
cuda_texture input_squared_tex(additional_buffers[0]);
cuda_texture output_tex(output_errors_buffer);
int block_size = get_weights_update_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
int packed_config_count = window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count;
const packed_config<6> * packed_config_list = static_cast<const packed_config<6> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
block_count,
1);
launch_update_weights_exact_kernel(window_sizes[0]);
}
else
{
int packed_config_count = window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count;
const packed_config<6> * packed_config_list = static_cast<const packed_config<6> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
updater_window_x_block_count,
packed_config_count,
block_count);
hipLaunchKernelGGL(( convolution_3d_update_weights_hess_kernel_kepler), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*hessian_data[0],
input_squared_tex,
output_tex,
packed_config_list,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[2],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
input_configuration_specific.dimension_sizes[2],
window_sizes[0],
window_sizes[1],
window_sizes[2],
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
entry_count,
block_size,
packed_config_count);
}
}
// Update biases
{
int block_size = get_bias_update_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
1,
block_count);
kernel_dims.first.y = output_configuration_specific.feature_map_count;
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
hipLaunchKernelGGL(( convolution_3d_update_biases_hess_kernel_kepler), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
*hessian_data[1],
*output_errors_buffer,
block_size,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
}
}
int convolution_3d_layer_hessian_cuda_kepler::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_3d_layer_hessian_cuda_kepler::hessian_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
{
std::tr1::array<int, 6> size_list;
size_list[0] = window_sizes[1];
size_list[1] = window_sizes[2];
size_list[2] = input_configuration_specific.feature_map_count;
size_list[3] = output_configuration_specific.dimension_sizes[1];
size_list[4] = output_configuration_specific.dimension_sizes[2];
size_list[5] = updater_output_feature_map_block_count;
space_filling_curve<6>::fill_pattern(size_list, updater_config_ordered_list);
}
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_3d_layer_hessian_cuda_kepler::is_in_place_backprop() const
{
return false;
}
std::vector<size_t> convolution_3d_layer_hessian_cuda_kepler::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_entry * sizeof(float));
return res;
}
std::vector<unsigned int> convolution_3d_layer_hessian_cuda_kepler::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_3d_layer_hessian_cuda_kepler::get_bias_update_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
int convolution_3d_layer_hessian_cuda_kepler::get_weights_update_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
std::vector<size_t> convolution_3d_layer_hessian_cuda_kepler::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(packed_config<3>) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count);
res.push_back(sizeof(packed_config<6>) * window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
{
res.push_back(sizeof(packed_config<3>) * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count);
}
return res;
}
void convolution_3d_layer_hessian_cuda_kepler::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<packed_config<3> > task_list;
packed_config<3> new_elem;
for(int output_feature_map_block_id = 0; output_feature_map_block_id < forward_output_feature_map_block_count; ++output_feature_map_block_id)
{
new_elem.set_val(2, output_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < output_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(1, z);
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(0, y);
task_list.push_back(new_elem);
}
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(packed_config<3>) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<packed_config<6> > task_list;
packed_config<6> new_elem;
for(std::vector<std::tr1::array<int, 6> >::const_iterator it = updater_config_ordered_list.begin(); it != updater_config_ordered_list.end(); ++it)
{
new_elem.set_val(0, it->at(0));
new_elem.set_val(1, it->at(1));
new_elem.set_val(2, it->at(2));
new_elem.set_val(3, it->at(3));
new_elem.set_val(4, it->at(4));
new_elem.set_val(5, it->at(5) * FEATURE_MAP_BLOCK_SIZE);
task_list.push_back(new_elem);
}
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(packed_config<6>) * task_list.size(), hipMemcpyHostToDevice));
}
if (backprop_required)
{
std::vector<packed_config<3> > task_list;
packed_config<3> new_elem;
for(int input_feature_map_block_id = 0; input_feature_map_block_id < backward_input_feature_map_block_count; ++input_feature_map_block_id)
{
new_elem.set_val(2, input_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < input_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(1, z);
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(0, y);
task_list.push_back(new_elem);
}
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(packed_config<3>) * task_list.size(), hipMemcpyHostToDevice));
}
}
}
}
| be28dbb1f271288827e265756e3a95db42c1bca1.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_3d_layer_hessian_cuda_kepler.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_texture.h"
#include "packed_config.h"
#include "space_filling_curve.h"
#include "../convolution_layer.h"
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
namespace nnforge
{
namespace cuda
{
template<int BLOCK_SIZE>
__global__ void convolution_3d_tex_blocked_hess_kernel_kepler(
float * __restrict output,
cudaTextureObject_t input_tex,
cudaTextureObject_t weights_tex,
const float * __restrict biases,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = (window_width * window_height) * (window_depth * input_feature_map_count);
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int output_feature_map_id = conf.get_val(2);
int input_elem_id = ((entry_id * input_feature_map_count * input_depth + z) * input_height + y) * input_width + x;
int weights_offset = weight_count_per_output_feature_map * output_feature_map_id;
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - window_width;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_neuron_count_per_feature_map * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_3d_tex_exact_blocked_hess_kernel_kepler(
float * __restrict output,
cudaTextureObject_t input_tex,
cudaTextureObject_t weights_tex,
const float * __restrict biases,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = (WINDOW_WIDTH * window_height) * (window_depth * input_feature_map_count);
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int output_feature_map_id = conf.get_val(2);
int input_elem_id = ((entry_id * input_feature_map_count * input_depth + z) * input_height + y) * input_width + x;
int weights_offset = weight_count_per_output_feature_map * output_feature_map_id;
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - WINDOW_WIDTH;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_neuron_count_per_feature_map * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
extern __shared__ float arr[];
__global__ void convolution_3d_update_biases_hess_kernel_kepler(
float * __restrict hessian_biases,
const float * __restrict output_errors,
int block_size,
int output_elem_count_per_feature_map,
int output_feature_map_count,
int entry_count)
{
int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y;
int block_id = blockIdx.z * blockDim.z + threadIdx.z;
int base_entry_id = block_size * block_id;
int thread_id = blockDim.x * threadIdx.z + threadIdx.x;
int threadblock_size = blockDim.x * blockDim.z;
float sum = 0.0F;
int iteration_count = min(entry_count - base_entry_id, block_size);
if (output_neuron_id < output_elem_count_per_feature_map)
{
const float * current_error = output_errors + (base_entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map + output_neuron_id;
int output_elem_count_per_entry = output_elem_count_per_feature_map * output_feature_map_count;
for(int i = 0; i < iteration_count; ++i)
{
sum += *current_error;
current_error += output_elem_count_per_entry;
}
}
arr[thread_id] = sum;
__syncthreads();
int t_add_elems = threadblock_size >> 1;
int t_working_elems = (threadblock_size + 1) >> 1;
while (t_add_elems > 0)
{
if (thread_id < t_add_elems)
arr[thread_id] += arr[thread_id + t_working_elems];
t_add_elems = t_working_elems >> 1;
t_working_elems = (t_working_elems + 1) >> 1;
__syncthreads();
}
if (thread_id == 0)
atomicAdd(hessian_biases + output_feature_map_id, arr[0]);
}
template<int BLOCK_SIZE>
__global__ void convolution_3d_square_deriviative_tex_hess_kernel_kepler(
float * __restrict input_errors,
cudaTextureObject_t output_tex,
cudaTextureObject_t weights_squared_tex,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE + (BLOCK_SIZE - 1);
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < input_width + (BLOCK_SIZE - 1)) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_input_feature_map = window_depth * window_height * window_width;
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_elem_id = ((entry_id * output_feature_map_count * output_depth + z) * output_height + y) * output_width + x;
int weights_offset = weight_count_per_input_feature_map * input_feature_map_id;
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int output_layer_id = 0; output_layer_id < output_feature_map_count; ++output_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit_x = b_fit_y && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit_x)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_squared_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
weights_offset++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit_x = b_fit_y && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit_x)
{
float inp = tex1Dfetch<float>(output_tex, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * tex1Dfetch<float>(weights_squared_tex, weights_offset + weight_count_per_input_feature_map * i);
}
}
weights_offset++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
weights_offset += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_3d_square_deriviative_tex_exact_hess_kernel_kepler(
float * __restrict input_errors,
cudaTextureObject_t output_tex,
cudaTextureObject_t weights_squared_tex,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE + (BLOCK_SIZE - 1);
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < input_width + (BLOCK_SIZE - 1)) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_input_feature_map = window_depth * window_height * WINDOW_WIDTH;
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_elem_id = ((entry_id * output_feature_map_count * output_depth + z) * output_height + y) * output_width + x;
int weights_offset = weight_count_per_input_feature_map * input_feature_map_id;
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
#pragma unroll
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int output_layer_id = 0; output_layer_id < output_feature_map_count; ++output_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit_x = b_fit_y && (((1 << i) & mask) != 0);
if (b_fit_x)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_squared_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
weights_offset++;
}
output_elem_id -= output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
weights_offset += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
__global__ void convolution_3d_update_weights_hess_kernel_kepler(
float * __restrict hessian_weights,
cudaTextureObject_t input_squared_tex,
cudaTextureObject_t output_tex,
const packed_config<6> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int block_size,
int packed_config_count)
{
int weight_x = (blockIdx.x * blockDim.x + threadIdx.x) * WINDOW_WIDTH_LOCAL;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * block_size;
bool in_bounds = (packed_config_id < packed_config_count) && (weight_x < window_width) && (base_entry_id < entry_count);
if (in_bounds)
{
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
packed_config<6> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_y = conf.get_val(3);
int output_z = conf.get_val(4);
int output_feature_map_id = conf.get_val(5);
int iteration_count = min(block_size, entry_count - base_entry_id);
int output_errors_offset = (((base_entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z) * output_height + output_y) * output_width;
int input_elem_id = (((base_entry_id * input_feature_map_count + input_feature_map_id) * input_depth + output_z + weight_z) * input_height + output_y + weight_y) * input_width + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
for(int t = 0; t < iteration_count; ++t)
{
float input_squared_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_squared_buf[i] = tex1Dfetch<float>(input_squared_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch<float>(output_tex, output_errors_offset + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_squared_buf[i] = input_squared_buf[i + 1];
input_squared_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch<float>(input_squared_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_squared_buf[j];
output_errors_offset++;
input_elem_id++;
}
output_errors_offset += (output_feature_map_count * output_height * output_depth - 1) * output_width;
input_elem_id += (input_feature_map_count * input_height * input_depth - 1) * input_width + (window_width - WINDOW_WIDTH_LOCAL);
}
float * base_weights = hessian_weights + (((output_feature_map_id * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * window_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(base_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j]);
}
}
}
}
template<int WINDOW_WIDTH>
__global__ void convolution_3d_update_weights_exact_hess_kernel_kepler(
float * __restrict hessian_weights,
cudaTextureObject_t input_squared_tex,
cudaTextureObject_t output_tex,
const packed_config<6> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int block_size,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int base_entry_id = (blockIdx.y * blockDim.y + threadIdx.y) * block_size;
bool in_bounds = (packed_config_id < packed_config_count) && (base_entry_id < entry_count);
if (in_bounds)
{
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
packed_config<6> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_y = conf.get_val(3);
int output_z = conf.get_val(4);
int output_feature_map_id = conf.get_val(5);
int iteration_count = min(block_size, entry_count - base_entry_id);
int output_errors_offset = (((base_entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z) * output_height + output_y) * output_width;
int input_elem_id = (((base_entry_id * input_feature_map_count + input_feature_map_id) * input_depth + output_z + weight_z) * input_height + output_y + weight_y) * input_width;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
for(int t = 0; t < iteration_count; ++t)
{
float input_squared_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_squared_buf[i] = tex1Dfetch<float>(input_squared_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch<float>(output_tex, output_errors_offset + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_squared_buf[i] = input_squared_buf[i + 1];
input_squared_buf[WINDOW_WIDTH - 1] = tex1Dfetch<float>(input_squared_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_squared_buf[j];
output_errors_offset++;
input_elem_id++;
}
output_errors_offset += (output_feature_map_count * output_height * output_depth - 1) * output_width;
input_elem_id += (input_feature_map_count * input_height * input_depth - 1) * input_width;
}
float * base_weights = hessian_weights + (((output_feature_map_id * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * WINDOW_WIDTH;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(base_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j]);
}
}
}
}
convolution_3d_layer_hessian_cuda_kepler::convolution_3d_layer_hessian_cuda_kepler()
{
}
convolution_3d_layer_hessian_cuda_kepler::~convolution_3d_layer_hessian_cuda_kepler()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const) \
convolution_3d_tex_exact_blocked_hess_kernel_kepler<window_width_const,block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, input_tex, weights_tex, *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_kernel_const(block_size_const) \
convolution_3d_tex_blocked_hess_kernel_kepler<block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, input_tex, weights_tex, *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1); \
break; \
case 2: \
launch_kernel_const(2); \
break; \
case 3: \
launch_kernel_const(3); \
break; \
case 4: \
launch_kernel_const(4); \
break; \
case 5: \
launch_kernel_const(5); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const) \
convolution_3d_square_deriviative_tex_exact_hess_kernel_kepler<window_width_const,block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, output_tex, weights_squared_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_backprop_kernel_const(block_size_const) \
convolution_3d_square_deriviative_tex_hess_kernel_kepler<block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, output_tex, weights_squared_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_backprop_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1); \
break; \
case 2: \
launch_backprop_kernel_const(2); \
break; \
case 3: \
launch_backprop_kernel_const(3); \
break; \
case 4: \
launch_backprop_kernel_const(4); \
break; \
case 5: \
launch_backprop_kernel_const(5); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const) \
convolution_3d_update_weights_exact_hess_kernel_kepler<window_width_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*hessian_data[0], input_squared_tex, output_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, block_size, packed_config_count);
#define launch_update_weights_exact_kernel(window_width) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10); \
break; \
};
void convolution_3d_layer_hessian_cuda_kepler::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cuda_texture weights_tex(data[0]);
cuda_texture input_tex(input_neurons_buffer);
int packed_config_count = output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count;
const packed_config<3> * packed_config_list = static_cast<const packed_config<3> *>((const void *)*additional_buffers[1]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
forward_x_block_count,
packed_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size);
}
else
{
launch_kernel(forward_x_block_size);
}
}
void convolution_3d_layer_hessian_cuda_kepler::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_squared,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cuda_texture output_tex(output_errors_buffer);
cuda_texture weights_squared_tex(data_squared[0]);
int packed_config_count = input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2]* backward_input_feature_map_block_count;
const packed_config<3> * packed_config_list = static_cast<const packed_config<3> *>((const void *)*additional_buffers[3]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
backward_x_block_count,
packed_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size);
}
else
{
launch_backprop_kernel(backward_x_block_size);
}
}
void convolution_3d_layer_hessian_cuda_kepler::enqueue_update_hessian(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& hessian_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
// Update weights
{
// Store input neurons multiplied element-wise by themselves
cuda_util::multiply_by_itself(
*cuda_config,
*input_neurons_buffer,
*additional_buffers[0],
input_elem_count_per_entry * entry_count,
stream_id);
cuda_texture input_squared_tex(additional_buffers[0]);
cuda_texture output_tex(output_errors_buffer);
int block_size = get_weights_update_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
int packed_config_count = window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count;
const packed_config<6> * packed_config_list = static_cast<const packed_config<6> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
block_count,
1);
launch_update_weights_exact_kernel(window_sizes[0]);
}
else
{
int packed_config_count = window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count;
const packed_config<6> * packed_config_list = static_cast<const packed_config<6> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
updater_window_x_block_count,
packed_config_count,
block_count);
convolution_3d_update_weights_hess_kernel_kepler<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*hessian_data[0],
input_squared_tex,
output_tex,
packed_config_list,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[2],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
input_configuration_specific.dimension_sizes[2],
window_sizes[0],
window_sizes[1],
window_sizes[2],
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
entry_count,
block_size,
packed_config_count);
}
}
// Update biases
{
int block_size = get_bias_update_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
1,
block_count);
kernel_dims.first.y = output_configuration_specific.feature_map_count;
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
convolution_3d_update_biases_hess_kernel_kepler<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
*hessian_data[1],
*output_errors_buffer,
block_size,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
}
}
int convolution_3d_layer_hessian_cuda_kepler::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_3d_layer_hessian_cuda_kepler::hessian_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
{
std::tr1::array<int, 6> size_list;
size_list[0] = window_sizes[1];
size_list[1] = window_sizes[2];
size_list[2] = input_configuration_specific.feature_map_count;
size_list[3] = output_configuration_specific.dimension_sizes[1];
size_list[4] = output_configuration_specific.dimension_sizes[2];
size_list[5] = updater_output_feature_map_block_count;
space_filling_curve<6>::fill_pattern(size_list, updater_config_ordered_list);
}
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_3d_layer_hessian_cuda_kepler::is_in_place_backprop() const
{
return false;
}
std::vector<size_t> convolution_3d_layer_hessian_cuda_kepler::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_entry * sizeof(float));
return res;
}
std::vector<unsigned int> convolution_3d_layer_hessian_cuda_kepler::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_3d_layer_hessian_cuda_kepler::get_bias_update_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
int convolution_3d_layer_hessian_cuda_kepler::get_weights_update_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
std::vector<size_t> convolution_3d_layer_hessian_cuda_kepler::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(packed_config<3>) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count);
res.push_back(sizeof(packed_config<6>) * window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
{
res.push_back(sizeof(packed_config<3>) * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count);
}
return res;
}
void convolution_3d_layer_hessian_cuda_kepler::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<packed_config<3> > task_list;
packed_config<3> new_elem;
for(int output_feature_map_block_id = 0; output_feature_map_block_id < forward_output_feature_map_block_count; ++output_feature_map_block_id)
{
new_elem.set_val(2, output_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < output_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(1, z);
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(0, y);
task_list.push_back(new_elem);
}
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(packed_config<3>) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<packed_config<6> > task_list;
packed_config<6> new_elem;
for(std::vector<std::tr1::array<int, 6> >::const_iterator it = updater_config_ordered_list.begin(); it != updater_config_ordered_list.end(); ++it)
{
new_elem.set_val(0, it->at(0));
new_elem.set_val(1, it->at(1));
new_elem.set_val(2, it->at(2));
new_elem.set_val(3, it->at(3));
new_elem.set_val(4, it->at(4));
new_elem.set_val(5, it->at(5) * FEATURE_MAP_BLOCK_SIZE);
task_list.push_back(new_elem);
}
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(packed_config<6>) * task_list.size(), cudaMemcpyHostToDevice));
}
if (backprop_required)
{
std::vector<packed_config<3> > task_list;
packed_config<3> new_elem;
for(int input_feature_map_block_id = 0; input_feature_map_block_id < backward_input_feature_map_block_count; ++input_feature_map_block_id)
{
new_elem.set_val(2, input_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < input_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(1, z);
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(0, y);
task_list.push_back(new_elem);
}
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(packed_config<3>) * task_list.size(), cudaMemcpyHostToDevice));
}
}
}
}
|
3ed1e04032acfe604e96efd2f25b6b9363ae3139.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust\host_vector.h>
#include <thrust\device_vector.h>
#include <hiprand/hiprand.h> // rand lib for host
#include <hiprand/hiprand_kernel.h> // rand lib for device
#include <stdio.h>
#include <iostream>
using namespace std;
const int N = 10;
class Add
{
public:
int a[N];
Add();
Add(const int n);
__host__ __device__ int increase();
int randNum(int n) { return rand() % n; };
void show() const;
~Add();
};
Add::Add()
{
for (size_t i = 0; i < N; i++)
{
a[i] = 0;
}
}
Add::Add(const int n)
{
for (size_t i = 0; i < N; i++)
{
a[i] = this->randNum(n);
cout << a[i] << ", ";
}
cout << endl;
}
__host__ __device__ int Add::increase()
{
return 10;
}
void Add::show() const
{
for (size_t i = 0; i < N; i++)
{
cout << a[i] << ", ";
}
cout << endl;
}
Add::~Add()
{
}
__global__ void add(Add *ad, const int N)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < N)
{
ad->a[i] = ad->a[i] + ad->increase();
}
}
void example(Add *h_a)
{
dim3 blocks(2);
dim3 threads(5);
/*int *d_a;
int size = sizeof(int)*N;*/
Add *d_a;
size_t size = sizeof(Add);
hipMalloc(&d_a, size);
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add) , dim3(blocks), dim3(threads) , 0, 0, d_a, N);
hipDeviceSynchronize();
hipMemcpy(h_a, d_a, size, hipMemcpyDeviceToHost);
hipFree(d_a);
}
int main()
{
Add *d = new Add(N);
cout << d->increase() << endl;
example(d);
d->show();
//cout << d->a[0] << endl;
system("pause");
delete d;
return 0;
} | 3ed1e04032acfe604e96efd2f25b6b9363ae3139.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust\host_vector.h>
#include <thrust\device_vector.h>
#include <curand.h> // rand lib for host
#include <curand_kernel.h> // rand lib for device
#include <stdio.h>
#include <iostream>
using namespace std;
const int N = 10;
class Add
{
public:
int a[N];
Add();
Add(const int n);
__host__ __device__ int increase();
int randNum(int n) { return rand() % n; };
void show() const;
~Add();
};
Add::Add()
{
for (size_t i = 0; i < N; i++)
{
a[i] = 0;
}
}
Add::Add(const int n)
{
for (size_t i = 0; i < N; i++)
{
a[i] = this->randNum(n);
cout << a[i] << ", ";
}
cout << endl;
}
__host__ __device__ int Add::increase()
{
return 10;
}
void Add::show() const
{
for (size_t i = 0; i < N; i++)
{
cout << a[i] << ", ";
}
cout << endl;
}
Add::~Add()
{
}
__global__ void add(Add *ad, const int N)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < N)
{
ad->a[i] = ad->a[i] + ad->increase();
}
}
void example(Add *h_a)
{
dim3 blocks(2);
dim3 threads(5);
/*int *d_a;
int size = sizeof(int)*N;*/
Add *d_a;
size_t size = sizeof(Add);
cudaMalloc(&d_a, size);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
add <<<blocks, threads >>> (d_a, N);
cudaDeviceSynchronize();
cudaMemcpy(h_a, d_a, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
}
int main()
{
Add *d = new Add(N);
cout << d->increase() << endl;
example(d);
d->show();
//cout << d->a[0] << endl;
system("pause");
delete d;
return 0;
} |
aa4b668cbba3ae40526171ae44b5f96e1891d8ff.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "custring_view.cuh"
#include "regex/regex.cuh"
#include "Timing.h"
#ifdef __INTELLISENSE__
void __syncthreads();
#endif
//
unsigned int NVStrings::compare( const char* str, int* results, bool todevice )
{
unsigned int count = size();
if( str==0 || results==0 || count==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str);
if( bytes==0 )
return 0;
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
hipMemcpy(d_str,str,bytes,hipMemcpyHostToDevice);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,sizeof(int)*count,0);
double st = GetTime();
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->compare(d_str,bytes);
else
d_rtn[idx] = (d_str ? -1: 0);
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-compare(%s,%p,%d)\n",str,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("compare",0.0,(et-st));
RMM_FREE(d_str,0);
//
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return count;
}
// searches from the beginning of each string
unsigned int NVStrings::find( const char* str, int start, int end, int* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
if( start < 0 )
start = 0;
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
hipMemcpy(d_str,str,bytes,hipMemcpyHostToDevice);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view** d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, start, end, d_rtn] __device__(unsigned int idx){
//__shared__ char tgt[24];
char* dtgt = d_str;
//if( bytes<24 )
//{
// dtgt = tgt;
// if( threadIdx.x==0 )
// memcpy(dtgt,d_str,bytes);
//}
//__syncthreads();
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->find(dtgt,bytes-1,start,end-start);
else
d_rtn[idx] = -2; // indicate null to caller
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-find(%s,%d,%d,%p,%d)\n",str,start,end,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("find",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val!=-1; });
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
// searches from the beginning of each string and specified individual starting positions
unsigned int NVStrings::find_from( const char* str, int* starts, int* ends, int* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
hipMemcpy(d_str,str,bytes,hipMemcpyHostToDevice);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, starts, ends, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
int pos = (starts ? starts[idx] : 0);
int len = (ends ? (ends[idx]-pos) : -1);
d_rtn[idx] = dstr->find(d_str,bytes-1,pos,len);
}
else
d_rtn[idx] = -2; // indicate null to caller
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-find_from(%s,%p,%p,%p,%d)\n",str,starts,ends,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("find_from",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val!=-1; });
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
// searches from the end of each string
unsigned int NVStrings::rfind( const char* str, int start, int end, int* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1;
if( start < 0 )
start = 0;
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
hipMemcpy(d_str,str,bytes,hipMemcpyHostToDevice);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view** d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, start, end, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->rfind(d_str,bytes-1,start,end-start);
else
d_rtn[idx] = -2; // indicate null to caller
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-rfind(%s,%d,%d,%p,%d)\n",str,start,end,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("rfind",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val!=-1; });
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
//
unsigned int NVStrings::find_multiple( NVStrings& strs, int* results, bool todevice )
{
unsigned int count = size();
unsigned int tcount = strs.size();
if( results==0 || count==0 || tcount==0 )
return 0;
auto execpol = rmm::exec_policy(0);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(int),0);
//
custring_view_array d_strings = pImpl->getStringsPtr();
custring_view_array d_targets = strs.pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_targets, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_targets[jdx];
d_rtn[(idx*tcount)+jdx] = ( (dstr && dtgt) ? dstr->find(*dtgt) : -2 );
}
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-find_multiple(%u,%p,%d)\n",tcount,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("find_multiple",0.0,(et-st));
//
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val!=-1; });
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(int)*count*tcount,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
// for each string, return substring(s) which match specified pattern
int NVStrings::findall_record( const char* pattern, std::vector<NVStrings*>& results )
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> sizes(count,0);
int* d_sizes = sizes.data().get();
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
double st1 = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_counts, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int tsize = 0;;
int fnd = 0, end = (int)dstr->chars_count();
int spos = 0, epos = end;
int result = prog->find(idx,dstr,spos,epos);
while(result > 0)
{
unsigned int bytes = (dstr->byte_offset_for(epos)-dstr->byte_offset_for(spos));
unsigned int nchars = (epos-spos);
unsigned int size = custring_view::alloc_size(bytes,nchars);
tsize += ALIGN_SIZE(size);
spos = epos;
epos = end;
++fnd;
result = prog->find(idx,dstr,spos,epos); // next one
}
d_sizes[idx] = tsize;
d_counts[idx] = fnd;
});
hipDeviceSynchronize();
//
// create rows of buffers
thrust::host_vector<int> hcounts(counts); // copies counts from device
thrust::host_vector<custring_view_array> hrows(count,nullptr);
thrust::host_vector<char*> hbuffers(count,nullptr);
for( unsigned int idx=0; idx < count; ++idx )
{
int rcount = hcounts[idx];
NVStrings* row = new NVStrings(rcount);
results.push_back(row);
if( rcount==0 )
continue;
hrows[idx] = row->pImpl->getStringsPtr();
int size = sizes[idx];
char* d_buffer = 0;
RMM_ALLOC(&d_buffer,size,0);
row->pImpl->setMemoryBuffer(d_buffer,size);
hbuffers[idx] = d_buffer;
}
// copy substrings into buffers
rmm::device_vector<custring_view_array> rows(hrows); // copies hrows to device
custring_view_array* d_rows = rows.data().get();
rmm::device_vector<char*> buffers(hbuffers); // copies hbuffers to device
char** d_buffers = buffers.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_counts, d_buffers, d_sizes, d_rows] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int dcount = d_counts[idx];
if( dcount < 1 )
return;
char* buffer = (char*)d_buffers[idx];
custring_view_array drow = d_rows[idx];
int spos = 0, nchars = (int)dstr->chars_count();
for( int i=0; i < dcount; ++i )
{
int epos = nchars;
prog->find(idx,dstr,spos,epos);
custring_view* str = dstr->substr((unsigned)spos,(unsigned)(epos-spos),1,buffer);
drow[i] = str;
buffer += ALIGN_SIZE(str->alloc_size());
spos = epos;
}
});
//
printCudaError(hipDeviceSynchronize(),"nvs-findall_record");
dreprog::destroy(prog);
return count;
}
// same as findall but strings are returned organized in column-major
int NVStrings::findall( const char* pattern, std::vector<NVStrings*>& results )
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
double st1 = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int fnd = 0, nchars = (int)dstr->chars_count();
int begin = 0, end = nchars;
int result = prog->find(idx,dstr,begin,end);
while(result > 0)
{
++fnd;
begin = end;
end = nchars;
result = prog->find(idx,dstr,begin,end); // next one
}
d_counts[idx] = fnd;
});
int columns = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() );
// boundary case: if no columns, return one null column (issue #119)
if( columns==0 )
results.push_back(new NVStrings(count));
// create columns of nvstrings
for( int col=0; col < columns; ++col )
{
// build index for each string -- collect pointers and lengths
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_counts, col, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
d_indexes[idx].first = 0; // initialize to
d_indexes[idx].second = 0; // null string
if( !dstr || (col >= d_counts[idx]) )
return;
int spos = 0, nchars = (int)dstr->chars_count();
int epos = nchars;
prog->find(idx,dstr,spos,epos);
for( int c=0; c < col; ++c )
{
spos = epos; // update
epos = nchars; // parameters
prog->find(idx,dstr,spos,epos);
}
// this will be the string for this column
if( spos < epos )
{
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos-spos);
}
else
{ // create empty string instead of a null one
d_indexes[idx].first = dstr->data();
}
});
hipError_t err = hipDeviceSynchronize();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-findall(%s): col=%d\n",pattern,col);
printCudaError(err);
}
// build new instance from the index
NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
results.push_back(column);
}
dreprog::destroy(prog);
return (unsigned int)results.size();
}
// does specified string occur in each string
int NVStrings::contains( const char* str, bool* results, bool todevice )
{
if( str==0 || results==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
hipMemcpy(d_str,str,bytes,hipMemcpyHostToDevice);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->find(d_str,bytes-1)>=0;
else
d_rtn[idx] = false;
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-contains(%s,%p,%d)\n",str,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("contains",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val) {return val;} );
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)rtn;
}
// regex version of contain() above
int NVStrings::contains_re( const char* pattern, bool* results, bool todevice )
{
if( pattern==0 || results==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, prog, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = prog->contains(idx,dstr)==1;
else
d_rtn[idx] = false;
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-contains_re(%s,%p,%d)\n",pattern,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("contains_re",0.0,(et-st));
// destroy compiled regex
dreprog::destroy(prog);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val){ return val; });
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)rtn;
}
// match is like contains() except the pattern must match the beginning of the string only
int NVStrings::match( const char* pattern, bool* results, bool todevice )
{
if( pattern==0 || results==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, prog, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = prog->match(idx,dstr)==1;
else
d_rtn[idx] = false;
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-match(%s,%p,%d)\n",pattern,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("match",0.0,(et-st));
dreprog::destroy(prog);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val){ return val; });
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)rtn;
}
// counts number of times the regex pattern matches a string within each string
int NVStrings::count_re( const char* pattern, int* results, bool todevice )
{
if( pattern==0 || results==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, prog, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
int fnd = -1;
if( dstr )
{
fnd = 0;
int nchars = (int)dstr->chars_count();
int begin = 0, end = nchars;
int result = prog->find(idx,dstr,begin,end);
while(result > 0)
{
++fnd; // count how many we find
begin = end;
end = nchars;
result = prog->find(idx,dstr,begin,end);
}
}
d_rtn[idx] = fnd;
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-count_re(%s,%p,%d)\n",pattern,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("count_re",0.0,(et-st));
dreprog::destroy(prog);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val>0; });
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)rtn;
}
//
unsigned int NVStrings::startswith( const char* str, bool* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
hipMemcpy(d_str,str,bytes,hipMemcpyHostToDevice);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->starts_with(d_str,bytes-1);
else
d_rtn[idx] = false;
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-startswith(%s,%p,%d)\n",str,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("startswith",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val) {return val;} );
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
//
unsigned int NVStrings::endswith( const char* str, bool* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
hipMemcpy(d_str,str,bytes,hipMemcpyHostToDevice);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->ends_with(d_str,bytes-1);
else
d_rtn[idx] = false;
});
//
hipError_t err = hipDeviceSynchronize();
double et = GetTime();
if( err != hipSuccess )
{
fprintf(stderr,"nvs-endswith(%s,%p,%d)\n",str,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("endswith",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val) {return val;} );
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
| aa4b668cbba3ae40526171ae44b5f96e1891d8ff.cu |
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "custring_view.cuh"
#include "regex/regex.cuh"
#include "Timing.h"
#ifdef __INTELLISENSE__
void __syncthreads();
#endif
//
unsigned int NVStrings::compare( const char* str, int* results, bool todevice )
{
unsigned int count = size();
if( str==0 || results==0 || count==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str);
if( bytes==0 )
return 0;
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
cudaMemcpy(d_str,str,bytes,cudaMemcpyHostToDevice);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,sizeof(int)*count,0);
double st = GetTime();
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->compare(d_str,bytes);
else
d_rtn[idx] = (d_str ? -1: 0);
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-compare(%s,%p,%d)\n",str,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("compare",0.0,(et-st));
RMM_FREE(d_str,0);
//
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return count;
}
// searches from the beginning of each string
unsigned int NVStrings::find( const char* str, int start, int end, int* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
if( start < 0 )
start = 0;
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
cudaMemcpy(d_str,str,bytes,cudaMemcpyHostToDevice);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view** d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, start, end, d_rtn] __device__(unsigned int idx){
//__shared__ char tgt[24];
char* dtgt = d_str;
//if( bytes<24 )
//{
// dtgt = tgt;
// if( threadIdx.x==0 )
// memcpy(dtgt,d_str,bytes);
//}
//__syncthreads();
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->find(dtgt,bytes-1,start,end-start);
else
d_rtn[idx] = -2; // indicate null to caller
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-find(%s,%d,%d,%p,%d)\n",str,start,end,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("find",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val!=-1; });
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
// searches from the beginning of each string and specified individual starting positions
unsigned int NVStrings::find_from( const char* str, int* starts, int* ends, int* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
cudaMemcpy(d_str,str,bytes,cudaMemcpyHostToDevice);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, starts, ends, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
int pos = (starts ? starts[idx] : 0);
int len = (ends ? (ends[idx]-pos) : -1);
d_rtn[idx] = dstr->find(d_str,bytes-1,pos,len);
}
else
d_rtn[idx] = -2; // indicate null to caller
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-find_from(%s,%p,%p,%p,%d)\n",str,starts,ends,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("find_from",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val!=-1; });
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
// searches from the end of each string
unsigned int NVStrings::rfind( const char* str, int start, int end, int* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1;
if( start < 0 )
start = 0;
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
cudaMemcpy(d_str,str,bytes,cudaMemcpyHostToDevice);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view** d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, start, end, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->rfind(d_str,bytes-1,start,end-start);
else
d_rtn[idx] = -2; // indicate null to caller
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-rfind(%s,%d,%d,%p,%d)\n",str,start,end,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("rfind",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val!=-1; });
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
//
unsigned int NVStrings::find_multiple( NVStrings& strs, int* results, bool todevice )
{
unsigned int count = size();
unsigned int tcount = strs.size();
if( results==0 || count==0 || tcount==0 )
return 0;
auto execpol = rmm::exec_policy(0);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(int),0);
//
custring_view_array d_strings = pImpl->getStringsPtr();
custring_view_array d_targets = strs.pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_targets, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_targets[jdx];
d_rtn[(idx*tcount)+jdx] = ( (dstr && dtgt) ? dstr->find(*dtgt) : -2 );
}
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-find_multiple(%u,%p,%d)\n",tcount,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("find_multiple",0.0,(et-st));
//
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val!=-1; });
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(int)*count*tcount,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
// for each string, return substring(s) which match specified pattern
int NVStrings::findall_record( const char* pattern, std::vector<NVStrings*>& results )
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> sizes(count,0);
int* d_sizes = sizes.data().get();
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
double st1 = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_counts, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int tsize = 0;;
int fnd = 0, end = (int)dstr->chars_count();
int spos = 0, epos = end;
int result = prog->find(idx,dstr,spos,epos);
while(result > 0)
{
unsigned int bytes = (dstr->byte_offset_for(epos)-dstr->byte_offset_for(spos));
unsigned int nchars = (epos-spos);
unsigned int size = custring_view::alloc_size(bytes,nchars);
tsize += ALIGN_SIZE(size);
spos = epos;
epos = end;
++fnd;
result = prog->find(idx,dstr,spos,epos); // next one
}
d_sizes[idx] = tsize;
d_counts[idx] = fnd;
});
cudaDeviceSynchronize();
//
// create rows of buffers
thrust::host_vector<int> hcounts(counts); // copies counts from device
thrust::host_vector<custring_view_array> hrows(count,nullptr);
thrust::host_vector<char*> hbuffers(count,nullptr);
for( unsigned int idx=0; idx < count; ++idx )
{
int rcount = hcounts[idx];
NVStrings* row = new NVStrings(rcount);
results.push_back(row);
if( rcount==0 )
continue;
hrows[idx] = row->pImpl->getStringsPtr();
int size = sizes[idx];
char* d_buffer = 0;
RMM_ALLOC(&d_buffer,size,0);
row->pImpl->setMemoryBuffer(d_buffer,size);
hbuffers[idx] = d_buffer;
}
// copy substrings into buffers
rmm::device_vector<custring_view_array> rows(hrows); // copies hrows to device
custring_view_array* d_rows = rows.data().get();
rmm::device_vector<char*> buffers(hbuffers); // copies hbuffers to device
char** d_buffers = buffers.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_counts, d_buffers, d_sizes, d_rows] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int dcount = d_counts[idx];
if( dcount < 1 )
return;
char* buffer = (char*)d_buffers[idx];
custring_view_array drow = d_rows[idx];
int spos = 0, nchars = (int)dstr->chars_count();
for( int i=0; i < dcount; ++i )
{
int epos = nchars;
prog->find(idx,dstr,spos,epos);
custring_view* str = dstr->substr((unsigned)spos,(unsigned)(epos-spos),1,buffer);
drow[i] = str;
buffer += ALIGN_SIZE(str->alloc_size());
spos = epos;
}
});
//
printCudaError(cudaDeviceSynchronize(),"nvs-findall_record");
dreprog::destroy(prog);
return count;
}
// same as findall but strings are returned organized in column-major
int NVStrings::findall( const char* pattern, std::vector<NVStrings*>& results )
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
double st1 = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int fnd = 0, nchars = (int)dstr->chars_count();
int begin = 0, end = nchars;
int result = prog->find(idx,dstr,begin,end);
while(result > 0)
{
++fnd;
begin = end;
end = nchars;
result = prog->find(idx,dstr,begin,end); // next one
}
d_counts[idx] = fnd;
});
int columns = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() );
// boundary case: if no columns, return one null column (issue #119)
if( columns==0 )
results.push_back(new NVStrings(count));
// create columns of nvstrings
for( int col=0; col < columns; ++col )
{
// build index for each string -- collect pointers and lengths
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_counts, col, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
d_indexes[idx].first = 0; // initialize to
d_indexes[idx].second = 0; // null string
if( !dstr || (col >= d_counts[idx]) )
return;
int spos = 0, nchars = (int)dstr->chars_count();
int epos = nchars;
prog->find(idx,dstr,spos,epos);
for( int c=0; c < col; ++c )
{
spos = epos; // update
epos = nchars; // parameters
prog->find(idx,dstr,spos,epos);
}
// this will be the string for this column
if( spos < epos )
{
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos-spos);
}
else
{ // create empty string instead of a null one
d_indexes[idx].first = dstr->data();
}
});
cudaError_t err = cudaDeviceSynchronize();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-findall(%s): col=%d\n",pattern,col);
printCudaError(err);
}
// build new instance from the index
NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
results.push_back(column);
}
dreprog::destroy(prog);
return (unsigned int)results.size();
}
// does specified string occur in each string
int NVStrings::contains( const char* str, bool* results, bool todevice )
{
if( str==0 || results==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
cudaMemcpy(d_str,str,bytes,cudaMemcpyHostToDevice);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->find(d_str,bytes-1)>=0;
else
d_rtn[idx] = false;
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-contains(%s,%p,%d)\n",str,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("contains",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val) {return val;} );
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)rtn;
}
// regex version of contain() above
int NVStrings::contains_re( const char* pattern, bool* results, bool todevice )
{
if( pattern==0 || results==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, prog, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = prog->contains(idx,dstr)==1;
else
d_rtn[idx] = false;
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-contains_re(%s,%p,%d)\n",pattern,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("contains_re",0.0,(et-st));
// destroy compiled regex
dreprog::destroy(prog);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val){ return val; });
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)rtn;
}
// match is like contains() except the pattern must match the beginning of the string only
int NVStrings::match( const char* pattern, bool* results, bool todevice )
{
if( pattern==0 || results==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, prog, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = prog->match(idx,dstr)==1;
else
d_rtn[idx] = false;
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-match(%s,%p,%d)\n",pattern,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("match",0.0,(et-st));
dreprog::destroy(prog);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val){ return val; });
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)rtn;
}
// counts number of times the regex pattern matches a string within each string
int NVStrings::count_re( const char* pattern, int* results, bool todevice )
{
if( pattern==0 || results==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, prog, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
int fnd = -1;
if( dstr )
{
fnd = 0;
int nchars = (int)dstr->chars_count();
int begin = 0, end = nchars;
int result = prog->find(idx,dstr,begin,end);
while(result > 0)
{
++fnd; // count how many we find
begin = end;
end = nchars;
result = prog->find(idx,dstr,begin,end);
}
}
d_rtn[idx] = fnd;
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-count_re(%s,%p,%d)\n",pattern,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("count_re",0.0,(et-st));
dreprog::destroy(prog);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(int val){ return val>0; });
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)rtn;
}
//
unsigned int NVStrings::startswith( const char* str, bool* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
cudaMemcpy(d_str,str,bytes,cudaMemcpyHostToDevice);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->starts_with(d_str,bytes-1);
else
d_rtn[idx] = false;
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-startswith(%s,%p,%d)\n",str,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("startswith",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val) {return val;} );
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
//
unsigned int NVStrings::endswith( const char* str, bool* results, bool todevice )
{
unsigned int count = size();
if( str==0 || count==0 || results==0 )
return 0;
unsigned int bytes = (unsigned int)strlen(str)+1; // the +1 allows searching for empty string
auto execpol = rmm::exec_policy(0);
char* d_str = 0;
RMM_ALLOC(&d_str,bytes,0);
cudaMemcpy(d_str,str,bytes,cudaMemcpyHostToDevice);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
custring_view_array d_strings = pImpl->getStringsPtr();
double st = GetTime();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, bytes, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->ends_with(d_str,bytes-1);
else
d_rtn[idx] = false;
});
//
cudaError_t err = cudaDeviceSynchronize();
double et = GetTime();
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-endswith(%s,%p,%d)\n",str,results,(int)todevice);
printCudaError(err);
}
pImpl->addOpTimes("endswith",0.0,(et-st));
RMM_FREE(d_str,0);
// count the number of successful finds
unsigned int rtn = thrust::count_if(execpol->on(0), d_rtn, d_rtn+count, [] __device__(bool val) {return val;} );
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return rtn;
}
|
53cf568a5147249c113cb5c0e0a7a3bdb8ce33ff.hip | // !!! This is a file automatically generated by hipify!!!
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <wb.h>
#define BLOCK_SIZE 512 // TODO: You can change this
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
// TODO: Load a segment of the input vector into shared memory
__shared__ float partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE;
if (start + t < len)
partialSum[t] = input[start + t];
else
partialSum[t] = 0;
if (start + BLOCK_SIZE + t < len)
partialSum[BLOCK_SIZE + t] = input[start + BLOCK_SIZE + t];
else
partialSum[BLOCK_SIZE + t] = 0;
// TODO: Traverse the reduction tree
for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t + stride];
}
// TODO: Write the computed sum of the block to the output vector at the
// correct index
if (t == 0)
output[blockIdx.x] = partialSum[0];
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numInputElements);
wbLog(TRACE, "The number of output elements in the input is ",
numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
hipMalloc(&deviceInput, sizeof(float) * numInputElements);
hipMalloc(&deviceOutput, sizeof(float) * numOutputElements);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, sizeof(float) * numInputElements,
hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
// TODO: Initialize the grid and block dimensions here
dim3 dimGrid(numOutputElements, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
// TODO: Launch the GPU Kernel here
total << <dimGrid, dimBlock >> >(deviceInput, deviceOutput,
numInputElements);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO: Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements,
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
// TODO:
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
// TODO: Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
| 53cf568a5147249c113cb5c0e0a7a3bdb8ce33ff.cu | // Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <wb.h>
#define BLOCK_SIZE 512 // TODO: You can change this
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
// TODO: Load a segment of the input vector into shared memory
__shared__ float partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE;
if (start + t < len)
partialSum[t] = input[start + t];
else
partialSum[t] = 0;
if (start + BLOCK_SIZE + t < len)
partialSum[BLOCK_SIZE + t] = input[start + BLOCK_SIZE + t];
else
partialSum[BLOCK_SIZE + t] = 0;
// TODO: Traverse the reduction tree
for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t + stride];
}
// TODO: Write the computed sum of the block to the output vector at the
// correct index
if (t == 0)
output[blockIdx.x] = partialSum[0];
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numInputElements);
wbLog(TRACE, "The number of output elements in the input is ",
numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
cudaMalloc(&deviceInput, sizeof(float) * numInputElements);
cudaMalloc(&deviceOutput, sizeof(float) * numOutputElements);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, sizeof(float) * numInputElements,
cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
// TODO: Initialize the grid and block dimensions here
dim3 dimGrid(numOutputElements, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
// TODO: Launch the GPU Kernel here
total << <dimGrid, dimBlock >> >(deviceInput, deviceOutput,
numInputElements);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO: Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements,
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
// TODO:
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
// TODO: Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
101f7dfe23bc308f3d9efe70ff53ade33f961780.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <bitmask/legacy/bit_mask.cuh>
#include <bitmask/legacy/BitMask.cuh>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <hip/hip_runtime_api.h>
#include <gtest/gtest.h>
#include <chrono>
struct BitMaskTest : public GdfTest {};
//
// Kernel to count bits set in the bit mask
//
__global__ void count_bits_g(int *counter, BitMask bits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int local_counter = 0;
int i;
for (i = index ; i < (bits.num_elements() - 1) ; i += stride) {
local_counter += __popc(bits.get_element_device(i));
}
if (i == (bits.num_elements() - 1)) {
//
// Special case... last word is only partial
//
int bits_used = bits.length() % bit_mask::bits_per_element;
if (bits_used == 0) {
//
// The whole word is used
//
local_counter += __popc(bits.get_element_device(i));
} else {
local_counter += __popc(bits.get_element_device(i) & ((bit_mask_t{1} << bits_used) - 1));
}
}
atomicAdd(counter, local_counter);
}
//
// Testing function, will set a bit in a container. This assumes <1,1>
// for simplicity - all of the tests are small.
//
__global__ void set_bit(cudf::size_type bit, BitMask bits) {
bits.set_bit_unsafe(bit);
}
//
// Kernel to do safe bit set/clear
//
__global__ void test_safe_set_clear_g(BitMask bits) {
int index = threadIdx.x;
if ((index % 2) == 0) {
for (int i = index ; i < bits.length() ; i += bit_mask::bits_per_element) {
bits.set_bit(i);
}
}
for (int i = index ; i < bits.length() ; i += bit_mask::bits_per_element) {
bits.clear_bit(i);
}
if ((index % 2) == 0) {
for (int i = index ; i < bits.length() ; i += bit_mask::bits_per_element) {
bits.set_bit(i);
}
}
}
__host__ gdf_error count_bits(cudf::size_type *count, const BitMask &bit_mask, int a = 1, int b = 1) {
int *count_d;
CUDA_TRY(hipMalloc(&count_d, sizeof(int)));
CUDA_TRY(hipMemset(count_d, 0, sizeof(int)));
hipLaunchKernelGGL(( count_bits_g), dim3(a),dim3(b), 0, 0, count_d, bit_mask);
CUDA_TRY(hipMemcpy(count, count_d, sizeof(int), hipMemcpyDeviceToHost));
CUDA_TRY(hipFree(count_d));
return GDF_SUCCESS;
}
TEST_F(BitMaskTest, NoValids)
{
const int num_rows = 100;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{0}, local_count);
}
TEST_F(BitMaskTest, AllValids)
{
const int num_rows = 100;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{100}, local_count);
}
TEST_F(BitMaskTest, FirstRowValid)
{
const int num_rows = 4;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 0, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(cudf::size_type{1}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0x1});
}
TEST_F(BitMaskTest, EveryOtherBit)
{
const int num_rows = 8;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 0, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 2, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 4, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 6, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(cudf::size_type{4}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0x55});
}
TEST_F(BitMaskTest, OtherEveryOtherBit)
{
const int num_rows = 8;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 1, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 3, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 5, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 7, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(cudf::size_type{4}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0xAA});
}
TEST_F(BitMaskTest, 15rows)
{
const int num_rows = 15;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 0, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 8, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(cudf::size_type{2}, local_count);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
TEST_F(BitMaskTest, 5rows)
{
const int num_rows = 5;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 0, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{1}, local_count);
}
TEST_F(BitMaskTest, 10ValidRows)
{
const int num_rows = 10;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{10}, local_count);
}
TEST_F(BitMaskTest, MultipleOfEight)
{
const int num_rows = 1024;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
for (int i = 0 ; i < num_rows ; i += 8) {
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, i, bit_mask);
}
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{128}, local_count);
}
TEST_F(BitMaskTest, NotMultipleOfEight)
{
const int num_rows = 1023;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
for (int i = 7 ; i < num_rows ; i += 8) {
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, i, bit_mask);
}
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{127}, local_count);
}
TEST_F(BitMaskTest, TenThousandRows)
{
const int num_rows = 10000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{10000}, local_count);
}
TEST_F(BitMaskTest, PerformanceTest)
{
const int num_rows = 100000000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
int num_elements = bit_mask::num_elements(num_rows);
int block_size = 256;
int grid_size = (num_elements + block_size - 1)/block_size;
uint32_t *local_valid = (uint32_t *) malloc(num_elements * sizeof(uint32_t));
for (int i = 0 ; i < num_elements ; ++i) {
local_valid[i] = 0x55555555U;
}
EXPECT_EQ(GDF_SUCCESS, bit_mask::copy_bit_mask(bit_mask.get_valid(), local_valid, num_rows, hipMemcpyHostToDevice));
auto start = std::chrono::system_clock::now();
hipProfilerStart();
for(int i = 0; i < 1000; ++i) {
cudf::size_type local_count = 0;
count_bits(&local_count, bit_mask, grid_size, block_size);
}
hipProfilerStop();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::cout << "Elapsed time (ms): " << elapsed_seconds.count()*1000 << std::endl;
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
free(local_valid);
}
TEST_F(BitMaskTest, CudaThreadingTest)
{
const int num_rows = 100000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( test_safe_set_clear_g), dim3(1),dim3(bit_mask::bits_per_element), 0, 0, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ((cudf::size_type) (num_rows/2), local_count);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
TEST_F(BitMaskTest, PaddingTest)
{
//
// Set the number of rows to 32, we'll try padding to
// 256 bytes.
//
const int num_rows = 32;
const int padding_bytes = 256;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1, padding_bytes));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ((cudf::size_type) num_rows, local_count);
//
// To test this, we should be able to access the last element
//
int last_element = (padding_bytes / sizeof(bit_mask_t)) - 1;
bit_mask_t temp = bit_mask_t{0};
bit_mask.get_element_host(last_element, temp);
EXPECT_EQ(~bit_mask_t{0}, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
| 101f7dfe23bc308f3d9efe70ff53ade33f961780.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <bitmask/legacy/bit_mask.cuh>
#include <bitmask/legacy/BitMask.cuh>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <cuda_profiler_api.h>
#include <gtest/gtest.h>
#include <chrono>
struct BitMaskTest : public GdfTest {};
//
// Kernel to count bits set in the bit mask
//
__global__ void count_bits_g(int *counter, BitMask bits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int local_counter = 0;
int i;
for (i = index ; i < (bits.num_elements() - 1) ; i += stride) {
local_counter += __popc(bits.get_element_device(i));
}
if (i == (bits.num_elements() - 1)) {
//
// Special case... last word is only partial
//
int bits_used = bits.length() % bit_mask::bits_per_element;
if (bits_used == 0) {
//
// The whole word is used
//
local_counter += __popc(bits.get_element_device(i));
} else {
local_counter += __popc(bits.get_element_device(i) & ((bit_mask_t{1} << bits_used) - 1));
}
}
atomicAdd(counter, local_counter);
}
//
// Testing function, will set a bit in a container. This assumes <1,1>
// for simplicity - all of the tests are small.
//
__global__ void set_bit(cudf::size_type bit, BitMask bits) {
bits.set_bit_unsafe(bit);
}
//
// Kernel to do safe bit set/clear
//
__global__ void test_safe_set_clear_g(BitMask bits) {
int index = threadIdx.x;
if ((index % 2) == 0) {
for (int i = index ; i < bits.length() ; i += bit_mask::bits_per_element) {
bits.set_bit(i);
}
}
for (int i = index ; i < bits.length() ; i += bit_mask::bits_per_element) {
bits.clear_bit(i);
}
if ((index % 2) == 0) {
for (int i = index ; i < bits.length() ; i += bit_mask::bits_per_element) {
bits.set_bit(i);
}
}
}
__host__ gdf_error count_bits(cudf::size_type *count, const BitMask &bit_mask, int a = 1, int b = 1) {
int *count_d;
CUDA_TRY(cudaMalloc(&count_d, sizeof(int)));
CUDA_TRY(cudaMemset(count_d, 0, sizeof(int)));
count_bits_g<<<a,b>>>(count_d, bit_mask);
CUDA_TRY(cudaMemcpy(count, count_d, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_TRY(cudaFree(count_d));
return GDF_SUCCESS;
}
TEST_F(BitMaskTest, NoValids)
{
const int num_rows = 100;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{0}, local_count);
}
TEST_F(BitMaskTest, AllValids)
{
const int num_rows = 100;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{100}, local_count);
}
TEST_F(BitMaskTest, FirstRowValid)
{
const int num_rows = 4;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(0, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(cudf::size_type{1}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0x1});
}
TEST_F(BitMaskTest, EveryOtherBit)
{
const int num_rows = 8;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(0, bit_mask);
set_bit<<<1,1>>>(2, bit_mask);
set_bit<<<1,1>>>(4, bit_mask);
set_bit<<<1,1>>>(6, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(cudf::size_type{4}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0x55});
}
TEST_F(BitMaskTest, OtherEveryOtherBit)
{
const int num_rows = 8;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(1, bit_mask);
set_bit<<<1,1>>>(3, bit_mask);
set_bit<<<1,1>>>(5, bit_mask);
set_bit<<<1,1>>>(7, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(cudf::size_type{4}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0xAA});
}
TEST_F(BitMaskTest, 15rows)
{
const int num_rows = 15;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(0, bit_mask);
set_bit<<<1,1>>>(8, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(cudf::size_type{2}, local_count);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
TEST_F(BitMaskTest, 5rows)
{
const int num_rows = 5;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(0, bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{1}, local_count);
}
TEST_F(BitMaskTest, 10ValidRows)
{
const int num_rows = 10;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{10}, local_count);
}
TEST_F(BitMaskTest, MultipleOfEight)
{
const int num_rows = 1024;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
for (int i = 0 ; i < num_rows ; i += 8) {
set_bit<<<1,1>>>(i, bit_mask);
}
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{128}, local_count);
}
TEST_F(BitMaskTest, NotMultipleOfEight)
{
const int num_rows = 1023;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
for (int i = 7 ; i < num_rows ; i += 8) {
set_bit<<<1,1>>>(i, bit_mask);
}
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{127}, local_count);
}
TEST_F(BitMaskTest, TenThousandRows)
{
const int num_rows = 10000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(cudf::size_type{10000}, local_count);
}
TEST_F(BitMaskTest, PerformanceTest)
{
const int num_rows = 100000000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
int num_elements = bit_mask::num_elements(num_rows);
int block_size = 256;
int grid_size = (num_elements + block_size - 1)/block_size;
uint32_t *local_valid = (uint32_t *) malloc(num_elements * sizeof(uint32_t));
for (int i = 0 ; i < num_elements ; ++i) {
local_valid[i] = 0x55555555U;
}
EXPECT_EQ(GDF_SUCCESS, bit_mask::copy_bit_mask(bit_mask.get_valid(), local_valid, num_rows, cudaMemcpyHostToDevice));
auto start = std::chrono::system_clock::now();
cudaProfilerStart();
for(int i = 0; i < 1000; ++i) {
cudf::size_type local_count = 0;
count_bits(&local_count, bit_mask, grid_size, block_size);
}
cudaProfilerStop();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::cout << "Elapsed time (ms): " << elapsed_seconds.count()*1000 << std::endl;
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
free(local_valid);
}
TEST_F(BitMaskTest, CudaThreadingTest)
{
const int num_rows = 100000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
test_safe_set_clear_g<<<1,bit_mask::bits_per_element>>>(bit_mask);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ((cudf::size_type) (num_rows/2), local_count);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
TEST_F(BitMaskTest, PaddingTest)
{
//
// Set the number of rows to 32, we'll try padding to
// 256 bytes.
//
const int num_rows = 32;
const int padding_bytes = 256;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1, padding_bytes));
BitMask bit_mask(bits, num_rows);
cudf::size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ((cudf::size_type) num_rows, local_count);
//
// To test this, we should be able to access the last element
//
int last_element = (padding_bytes / sizeof(bit_mask_t)) - 1;
bit_mask_t temp = bit_mask_t{0};
bit_mask.get_element_host(last_element, temp);
EXPECT_EQ(~bit_mask_t{0}, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
|
95c2837b6ba3882a2b6cb5bf628d58e9a16f1756.hip | // !!! This is a file automatically generated by hipify!!!
/*---------------------------------------------------------------------------------------------------------------*/
/// bpl.c
/// For CSU CS475 Fall 2016
/// Instructor: Sanjay Rajopadhye
/// GTA: Swetha Varadarajan
/// Based on code Created by Paul Tero at Existor Ltd as part of a neural networks tutorial
/// Modified by Swetha Varadarajan
/// Created: 2016-11-16
/*---------------------------------------------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timer.h"
#include "util.h"
#include "bplKernel.h"
#define X(i, j) X[((i) * (cmdLineArgs.N + 1)) + (j)]
#define H(i, j) H[((i) * (cmdLineArgs.M + 1)) + (j)]
int main(int argc, char * * argv) {
/*---------------------------------------------------------------------------------------------------------------*/
/*-----------------------------------------Command line parsing--------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
Params cmdLineArgs;
parseCmdLineArgs( & cmdLineArgs, argc, argv);
/*---------------------------------------------------------------------------------------------------------------*/
/*-------------------------------------------Variable Declaration------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
/*Array description and its size in the comments next to its declation*/
double * inputs; //Given inputs = total number of samples(S)*number of inputs per sample(N)
double * outputs; //Expected outputs = total number of samples(S)*number of outputs per sample(P)
double * X; //Input for a given iteration = bunch size(I)*number of inputs per sample(N+1(bias))
double * Y; //Output for a given iteration = bunch size(I)*number of outputs per sample(P)
double * Wxh; //Weights in between input and hidden layer = (N+1)*M
double * Why; //Weights in between input and hidden layer = (M+1)*P
double * dWxh; //Error Weights in between input and hidden layer = (N+1)*M
double * dWhy; //Error Weights in between input and hidden layer = (M+1)*P
double * Zh; //Weighted sum for hidden layer=I*M
double * H; // Activation values = I*(M+1)
double * Zy; //Weighted sum for output layer=I*P
double * E; //Calculated Errors = I*P
double * P1; //Oredicted output = I*P
double * P; // (exp(Zy)) = I*P
double * sum; //(summation of the P[i]s) = I
double * d_X; //Input for a given iteration = bunch size(I)*number of inputs per sample(N+1(bias))
double * d_Y; //Output for a given iteration = bunch size(I)*number of outputs per sample(P)
double * d_Wxh; //Weights in between input and hidden layer = (N+1)*M
double * d_Why; //Weights in between input and hidden layer = (M+1)*P
double * d_dWxh; //Error Weights in between input and hidden layer = (N+1)*M
double * d_dWhy; //Error Weights in between input and hidden layer = (M+1)*P
double * d_Zh; //Weighted sum for hidden layer=I*M
double * d_H; // Activation values = I*(M+1)
double * d_Zy; //Weighted sum for output layer=I*P
double * d_E; //Calculated Errors = I*P
double * d_P1; //Oredicted output = I*P
double * d_P; // (exp(Zy)) = I*P
double * d_sum; //(summation of the P[i]s) = I
double learningrate = 0.0001; /*learning rate */
long b = cmdLineArgs.sample_per_iter;
long k2 = cmdLineArgs.sample_total / b; /*number of full bunches */
long k3 = cmdLineArgs.sample_total - (k2 * b); /* size of the partial bunch */
/*---------------------------------------------------------------------------------------------------------------*/
/*-------------------------------------------Memory allocations--------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
inputs = (double * ) malloc(cmdLineArgs.sample_total * sizeof(double) * cmdLineArgs.N);
outputs = (double * ) malloc(cmdLineArgs.sample_total * sizeof(double) * cmdLineArgs.P);
sum = (double * ) malloc((b) * sizeof(double));
Wxh = (double * ) malloc((cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M);
Why = (double * ) malloc((cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P);
dWxh = (double * ) malloc((cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M);
dWhy = (double * ) malloc((cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P);
X = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.N + 1));
Y = (double * ) malloc((b) * sizeof(double) * cmdLineArgs.P);
E = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.P));
P = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.P));
P1 = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.P));
H = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.M + 1));
Zh = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.M));
Zy = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.P));
d_sum = (double * ) myCudaMalloc((b) * sizeof(double));
d_Wxh = (double * ) myCudaMalloc((cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M);
d_Why = (double * ) myCudaMalloc((cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P);
d_dWxh = (double * ) myCudaMalloc((cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M);
d_dWhy = (double * ) myCudaMalloc((cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P);
d_X = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.N + 1));
d_Y = (double * ) myCudaMalloc((b) * sizeof(double)* cmdLineArgs.P);
d_E = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.P));
d_P = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.P));
d_P1 = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.P));
d_H = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.M + 1));
d_Zh = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.M));
d_Zy = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.P));
if (inputs == NULL || outputs == NULL || X == NULL || H == NULL || dWxh == NULL || dWhy == NULL || Zh == NULL || Zy == NULL || Wxh == NULL || Why == NULL || E == NULL || P == NULL || P1 == NULL || sum == NULL) {
printf("Could not allocate memory\n");
exit(0);
}
/*---------------------------------------------------------------------------------------------------------------*/
/*----------------------------------------------Initializations--------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
initializeW(Wxh, (cmdLineArgs.N + 1), cmdLineArgs.M);
initializeW(Why, (cmdLineArgs.M + 1), cmdLineArgs.P);
initializeI(inputs, cmdLineArgs.sample_total, cmdLineArgs.N);
initializeO(outputs, cmdLineArgs.sample_total, cmdLineArgs.P);
//displayMatrix1("outputs", outputs, cmdLineArgs.P, cmdLineArgs.sample_total);
HANDLE_ERROR(hipMemcpy(d_Wxh, Wxh, (cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_Why, Why, (cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P, hipMemcpyHostToDevice));
/*---------------------------------------------------------------------------------------------------------------*/
/*------------------------------------------------Training-------------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
initialize_timer();
start_timer();
for (long t = 0; t < cmdLineArgs.iter; t++) //Time loop
{
for (long s = 0; s < k2; s++) //Bunch loop
{
HANDLE_ERROR(hipMemcpy(H, d_H, b * sizeof(double) * (cmdLineArgs.M+1), hipMemcpyDeviceToHost)); //
for (long i = 0; i < b; i++) {
X(i, 0) = H(i, 0) = 1; //bias setting
//required input/output are copied from inputs/outputs to X and Y
memcpy( & X(i, 1), & inputs[cmdLineArgs.N * ((s * b) + i)], cmdLineArgs.N * sizeof(double));
}
Y = & outputs[s * b * cmdLineArgs.P];
hipMemcpy(d_X, X, b * sizeof(double) * (cmdLineArgs.N + 1), hipMemcpyHostToDevice);
hipMemcpy(d_Y, Y, b * sizeof(double) * cmdLineArgs.P, hipMemcpyHostToDevice);
hipMemcpy(d_H, H, b * sizeof(double) * (cmdLineArgs.M+1), hipMemcpyHostToDevice); //set bias on device
/*Forward Phase*/
mm(d_Zh, d_X, d_Wxh, b, cmdLineArgs.N + 1, cmdLineArgs.M); //Zh=X*Wxh
func(d_H, d_Zh, b, cmdLineArgs.M, 1); //H=f1(Zh)
mm(d_Zy, d_H, d_Why, b, cmdLineArgs.M + 1, cmdLineArgs.P); //Zy=H*Why
func(d_P, d_Zy, b, cmdLineArgs.P, 0); //P=fn(Zy)
reduction(d_P, d_sum, b, cmdLineArgs.P); //summation of probabilities for each training sample
prob(d_P, d_P1, d_sum, b, cmdLineArgs.P); //P1=fn(P,sum)
error(d_E, d_P1, d_Y, b, cmdLineArgs.P); //E=P1-Y
/*Backprpagation Phase*/
mtm(d_dWhy, d_H, d_E, cmdLineArgs.M + 1, b, cmdLineArgs.P); //dWhy=H'*E ('->transpose)
delta(d_Why, d_dWhy, cmdLineArgs.M + 1, cmdLineArgs.P, learningrate); //Why=fn(dwhy)
mmt(d_H, d_Why, d_E, b, cmdLineArgs.M + 1, cmdLineArgs.P); //H=Why*E'
gradient_func(d_Zh, d_H, b, cmdLineArgs.M); //Zh=f1"(H) ("->gradient of f1)
mtm(d_dWxh, d_X, d_Zh, cmdLineArgs.N + 1, b, cmdLineArgs.M); //dWxh=X'Zh
delta(d_Wxh, d_dWxh, cmdLineArgs.N + 1, cmdLineArgs.M, learningrate); //Wxh=fn(dWxh)
}
if (k3) {
//printf("k3: %ld\n", k3);
HANDLE_ERROR(hipMemcpy(H, d_H, k3 * sizeof(double) * (cmdLineArgs.M+1), hipMemcpyDeviceToHost)); //
for (long i = 0; i < k3; i++) {
X(i, 0) = H(i, 0) = 1;
memcpy( & X(i, 1), & inputs[cmdLineArgs.N * ((k2 * b) + i)], cmdLineArgs.N * sizeof(double));
}
Y = & outputs[k2 * b * cmdLineArgs.P];
hipMemcpy(d_X, X, k3 * sizeof(double) * (cmdLineArgs.N + 1), hipMemcpyHostToDevice);
hipMemcpy(d_Y, Y, k3 * sizeof(double) * cmdLineArgs.P, hipMemcpyHostToDevice);
hipMemcpy(d_H, H, k3 * sizeof(double) * (cmdLineArgs.M+1), hipMemcpyHostToDevice); //set bias on device
//Forward Phase
mm(d_Zh, d_X, d_Wxh, k3, cmdLineArgs.N + 1, cmdLineArgs.M);
func(d_H, d_Zh, k3, cmdLineArgs.M, 1);
mm(d_Zy, d_H, d_Why, k3, cmdLineArgs.M + 1, cmdLineArgs.P);
func(d_P, d_Zy, k3, cmdLineArgs.P, 0);
reduction(d_P, d_sum, k3, cmdLineArgs.P);
prob(d_P, d_P1, d_sum, k3, cmdLineArgs.P);
error(d_E, d_P1, d_Y, k3, cmdLineArgs.P);
//Backprpagation Phase
mtm(d_dWhy, d_H, d_E, cmdLineArgs.M + 1, k3, cmdLineArgs.P);
delta(d_Why, d_dWhy, cmdLineArgs.M + 1, cmdLineArgs.P, learningrate);
mmt(d_H, d_Why, d_E, k3, cmdLineArgs.M + 1, cmdLineArgs.P);
gradient_func(d_Zh, d_H, k3, cmdLineArgs.M);
mtm(d_dWxh, d_X, d_Zh, cmdLineArgs.N + 1, k3, cmdLineArgs.M);
delta(d_Wxh, d_dWxh, cmdLineArgs.N + 1, cmdLineArgs.M, learningrate);
}
}
stop_timer();
double time = elapsed_time();
double nFlops = b* (cmdLineArgs.N + 1)* cmdLineArgs.M*2;
nFlops += b* cmdLineArgs.M;
nFlops += b *(cmdLineArgs.M + 1) * cmdLineArgs.P*2;
nFlops += b * cmdLineArgs.P * 4; //for 4 different kernels;
nFlops += (cmdLineArgs.M + 1)* b * cmdLineArgs.P*2;
nFlops += (cmdLineArgs.M + 1) * cmdLineArgs.P *2;
nFlops += b* (cmdLineArgs.M + 1)* cmdLineArgs.P *2;
nFlops += b * cmdLineArgs.M;
nFlops += (cmdLineArgs.N + 1)* b * cmdLineArgs.M *2;
nFlops += (cmdLineArgs.N + 1)* cmdLineArgs.M * 2;
nFlops *= cmdLineArgs.iter * k2;
nFlops += k3* (cmdLineArgs.N + 1)* cmdLineArgs.M*2;
nFlops += k3* cmdLineArgs.M;
nFlops += k3 *(cmdLineArgs.M + 1) * cmdLineArgs.P*2;
nFlops += k3 * cmdLineArgs.P * 4; //for 4 different kernels;
nFlops += (cmdLineArgs.M + 1)* k3 * cmdLineArgs.P*2;
nFlops += (cmdLineArgs.M + 1) * cmdLineArgs.P *2;
nFlops += k3* (cmdLineArgs.M + 1)* cmdLineArgs.P *2;
nFlops += k3 * cmdLineArgs.M;
nFlops += (cmdLineArgs.N + 1)* k3 * cmdLineArgs.M *2;
nFlops += (cmdLineArgs.N + 1)* cmdLineArgs.M * 2;
double nFlopsPerSec = nFlops/time;
double nGFlopsPerSec = nFlopsPerSec*1e-9;
printf("Time: %lf\n", time);
//printf("Time: %lf GFlops: %lf\n", time, nGFlopsPerSec);
hipDeviceSynchronize();
//get results from the device to host
HANDLE_ERROR(hipMemcpy(Wxh, d_Wxh, (cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(Why, d_Why, (cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P, hipMemcpyDeviceToHost));
/*---------------------------------------------------------------------------------------------------------------*/
/*----------------------------------------------Print outputs----------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
if (cmdLineArgs.V) {
/*Need the following 2 statements for Testing*/
displayMatrix1("input/hidden weights", Wxh, cmdLineArgs.N + 1, cmdLineArgs.M);
displayMatrix1("hidden/output weights", Why, cmdLineArgs.M + 1, cmdLineArgs.P);
/* Useful for analyzing the accuracy of prediction */
/*if(k3)
{
displayVector ("last input", &X[k3-1][1], cmdLineArgs.N);
displayVector ("last output", Y[k3-1], cmdLineArgs.P);
displayVector ("predicted output",P1[k3-1], cmdLineArgs.P);
}
else
{
displayVector ("last input", &X[b-1][1], cmdLineArgs.N);
displayVector ("last output", Y[b-1], cmdLineArgs.P);
displayVector ("predicted output",P1[b-1], cmdLineArgs.P);
}*/
}
/*---------------------------------------------------------------------------------------------------------------*/
/*----------------------------------------------Free Memory------------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
free(inputs);
free(outputs);
free(X);
free(Zh);
free(Zy);
free(H);
free(E);
free(P);
free(P1);
free(sum);
free(Wxh);
free(Why);
free(dWxh);
free(dWhy);
hipFree(d_X);
hipFree(d_Y);
hipFree(d_Zh);
hipFree(d_Zy);
hipFree(d_H);
hipFree(d_E);
hipFree(d_P);
hipFree(d_P1);
hipFree(d_sum);
hipFree(d_Wxh);
hipFree(d_Why);
hipFree(d_dWxh);
hipFree(d_dWhy);
/*-------------------------------------------------------END-----------------------------------------------------*/
return 0;
}
| 95c2837b6ba3882a2b6cb5bf628d58e9a16f1756.cu | /*---------------------------------------------------------------------------------------------------------------*/
/// bpl.c
/// For CSU CS475 Fall 2016
/// Instructor: Sanjay Rajopadhye
/// GTA: Swetha Varadarajan
/// Based on code Created by Paul Tero at Existor Ltd as part of a neural networks tutorial
/// Modified by Swetha Varadarajan
/// Created: 2016-11-16
/*---------------------------------------------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timer.h"
#include "util.h"
#include "bplKernel.h"
#define X(i, j) X[((i) * (cmdLineArgs.N + 1)) + (j)]
#define H(i, j) H[((i) * (cmdLineArgs.M + 1)) + (j)]
int main(int argc, char * * argv) {
/*---------------------------------------------------------------------------------------------------------------*/
/*-----------------------------------------Command line parsing--------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
Params cmdLineArgs;
parseCmdLineArgs( & cmdLineArgs, argc, argv);
/*---------------------------------------------------------------------------------------------------------------*/
/*-------------------------------------------Variable Declaration------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
/*Array description and its size in the comments next to its declation*/
double * inputs; //Given inputs = total number of samples(S)*number of inputs per sample(N)
double * outputs; //Expected outputs = total number of samples(S)*number of outputs per sample(P)
double * X; //Input for a given iteration = bunch size(I)*number of inputs per sample(N+1(bias))
double * Y; //Output for a given iteration = bunch size(I)*number of outputs per sample(P)
double * Wxh; //Weights in between input and hidden layer = (N+1)*M
double * Why; //Weights in between input and hidden layer = (M+1)*P
double * dWxh; //Error Weights in between input and hidden layer = (N+1)*M
double * dWhy; //Error Weights in between input and hidden layer = (M+1)*P
double * Zh; //Weighted sum for hidden layer=I*M
double * H; // Activation values = I*(M+1)
double * Zy; //Weighted sum for output layer=I*P
double * E; //Calculated Errors = I*P
double * P1; //Oredicted output = I*P
double * P; // (exp(Zy)) = I*P
double * sum; //(summation of the P[i]s) = I
double * d_X; //Input for a given iteration = bunch size(I)*number of inputs per sample(N+1(bias))
double * d_Y; //Output for a given iteration = bunch size(I)*number of outputs per sample(P)
double * d_Wxh; //Weights in between input and hidden layer = (N+1)*M
double * d_Why; //Weights in between input and hidden layer = (M+1)*P
double * d_dWxh; //Error Weights in between input and hidden layer = (N+1)*M
double * d_dWhy; //Error Weights in between input and hidden layer = (M+1)*P
double * d_Zh; //Weighted sum for hidden layer=I*M
double * d_H; // Activation values = I*(M+1)
double * d_Zy; //Weighted sum for output layer=I*P
double * d_E; //Calculated Errors = I*P
double * d_P1; //Oredicted output = I*P
double * d_P; // (exp(Zy)) = I*P
double * d_sum; //(summation of the P[i]s) = I
double learningrate = 0.0001; /*learning rate */
long b = cmdLineArgs.sample_per_iter;
long k2 = cmdLineArgs.sample_total / b; /*number of full bunches */
long k3 = cmdLineArgs.sample_total - (k2 * b); /* size of the partial bunch */
/*---------------------------------------------------------------------------------------------------------------*/
/*-------------------------------------------Memory allocations--------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
inputs = (double * ) malloc(cmdLineArgs.sample_total * sizeof(double) * cmdLineArgs.N);
outputs = (double * ) malloc(cmdLineArgs.sample_total * sizeof(double) * cmdLineArgs.P);
sum = (double * ) malloc((b) * sizeof(double));
Wxh = (double * ) malloc((cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M);
Why = (double * ) malloc((cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P);
dWxh = (double * ) malloc((cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M);
dWhy = (double * ) malloc((cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P);
X = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.N + 1));
Y = (double * ) malloc((b) * sizeof(double) * cmdLineArgs.P);
E = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.P));
P = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.P));
P1 = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.P));
H = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.M + 1));
Zh = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.M));
Zy = (double * ) malloc(b * sizeof(double) * (cmdLineArgs.P));
d_sum = (double * ) myCudaMalloc((b) * sizeof(double));
d_Wxh = (double * ) myCudaMalloc((cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M);
d_Why = (double * ) myCudaMalloc((cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P);
d_dWxh = (double * ) myCudaMalloc((cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M);
d_dWhy = (double * ) myCudaMalloc((cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P);
d_X = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.N + 1));
d_Y = (double * ) myCudaMalloc((b) * sizeof(double)* cmdLineArgs.P);
d_E = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.P));
d_P = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.P));
d_P1 = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.P));
d_H = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.M + 1));
d_Zh = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.M));
d_Zy = (double * ) myCudaMalloc(b * sizeof(double) * (cmdLineArgs.P));
if (inputs == NULL || outputs == NULL || X == NULL || H == NULL || dWxh == NULL || dWhy == NULL || Zh == NULL || Zy == NULL || Wxh == NULL || Why == NULL || E == NULL || P == NULL || P1 == NULL || sum == NULL) {
printf("Could not allocate memory\n");
exit(0);
}
/*---------------------------------------------------------------------------------------------------------------*/
/*----------------------------------------------Initializations--------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
initializeW(Wxh, (cmdLineArgs.N + 1), cmdLineArgs.M);
initializeW(Why, (cmdLineArgs.M + 1), cmdLineArgs.P);
initializeI(inputs, cmdLineArgs.sample_total, cmdLineArgs.N);
initializeO(outputs, cmdLineArgs.sample_total, cmdLineArgs.P);
//displayMatrix1("outputs", outputs, cmdLineArgs.P, cmdLineArgs.sample_total);
HANDLE_ERROR(cudaMemcpy(d_Wxh, Wxh, (cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_Why, Why, (cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P, cudaMemcpyHostToDevice));
/*---------------------------------------------------------------------------------------------------------------*/
/*------------------------------------------------Training-------------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
initialize_timer();
start_timer();
for (long t = 0; t < cmdLineArgs.iter; t++) //Time loop
{
for (long s = 0; s < k2; s++) //Bunch loop
{
HANDLE_ERROR(cudaMemcpy(H, d_H, b * sizeof(double) * (cmdLineArgs.M+1), cudaMemcpyDeviceToHost)); //
for (long i = 0; i < b; i++) {
X(i, 0) = H(i, 0) = 1; //bias setting
//required input/output are copied from inputs/outputs to X and Y
memcpy( & X(i, 1), & inputs[cmdLineArgs.N * ((s * b) + i)], cmdLineArgs.N * sizeof(double));
}
Y = & outputs[s * b * cmdLineArgs.P];
cudaMemcpy(d_X, X, b * sizeof(double) * (cmdLineArgs.N + 1), cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, Y, b * sizeof(double) * cmdLineArgs.P, cudaMemcpyHostToDevice);
cudaMemcpy(d_H, H, b * sizeof(double) * (cmdLineArgs.M+1), cudaMemcpyHostToDevice); //set bias on device
/*Forward Phase*/
mm(d_Zh, d_X, d_Wxh, b, cmdLineArgs.N + 1, cmdLineArgs.M); //Zh=X*Wxh
func(d_H, d_Zh, b, cmdLineArgs.M, 1); //H=f1(Zh)
mm(d_Zy, d_H, d_Why, b, cmdLineArgs.M + 1, cmdLineArgs.P); //Zy=H*Why
func(d_P, d_Zy, b, cmdLineArgs.P, 0); //P=fn(Zy)
reduction(d_P, d_sum, b, cmdLineArgs.P); //summation of probabilities for each training sample
prob(d_P, d_P1, d_sum, b, cmdLineArgs.P); //P1=fn(P,sum)
error(d_E, d_P1, d_Y, b, cmdLineArgs.P); //E=P1-Y
/*Backprpagation Phase*/
mtm(d_dWhy, d_H, d_E, cmdLineArgs.M + 1, b, cmdLineArgs.P); //dWhy=H'*E ('->transpose)
delta(d_Why, d_dWhy, cmdLineArgs.M + 1, cmdLineArgs.P, learningrate); //Why=fn(dwhy)
mmt(d_H, d_Why, d_E, b, cmdLineArgs.M + 1, cmdLineArgs.P); //H=Why*E'
gradient_func(d_Zh, d_H, b, cmdLineArgs.M); //Zh=f1"(H) ("->gradient of f1)
mtm(d_dWxh, d_X, d_Zh, cmdLineArgs.N + 1, b, cmdLineArgs.M); //dWxh=X'Zh
delta(d_Wxh, d_dWxh, cmdLineArgs.N + 1, cmdLineArgs.M, learningrate); //Wxh=fn(dWxh)
}
if (k3) {
//printf("k3: %ld\n", k3);
HANDLE_ERROR(cudaMemcpy(H, d_H, k3 * sizeof(double) * (cmdLineArgs.M+1), cudaMemcpyDeviceToHost)); //
for (long i = 0; i < k3; i++) {
X(i, 0) = H(i, 0) = 1;
memcpy( & X(i, 1), & inputs[cmdLineArgs.N * ((k2 * b) + i)], cmdLineArgs.N * sizeof(double));
}
Y = & outputs[k2 * b * cmdLineArgs.P];
cudaMemcpy(d_X, X, k3 * sizeof(double) * (cmdLineArgs.N + 1), cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, Y, k3 * sizeof(double) * cmdLineArgs.P, cudaMemcpyHostToDevice);
cudaMemcpy(d_H, H, k3 * sizeof(double) * (cmdLineArgs.M+1), cudaMemcpyHostToDevice); //set bias on device
//Forward Phase
mm(d_Zh, d_X, d_Wxh, k3, cmdLineArgs.N + 1, cmdLineArgs.M);
func(d_H, d_Zh, k3, cmdLineArgs.M, 1);
mm(d_Zy, d_H, d_Why, k3, cmdLineArgs.M + 1, cmdLineArgs.P);
func(d_P, d_Zy, k3, cmdLineArgs.P, 0);
reduction(d_P, d_sum, k3, cmdLineArgs.P);
prob(d_P, d_P1, d_sum, k3, cmdLineArgs.P);
error(d_E, d_P1, d_Y, k3, cmdLineArgs.P);
//Backprpagation Phase
mtm(d_dWhy, d_H, d_E, cmdLineArgs.M + 1, k3, cmdLineArgs.P);
delta(d_Why, d_dWhy, cmdLineArgs.M + 1, cmdLineArgs.P, learningrate);
mmt(d_H, d_Why, d_E, k3, cmdLineArgs.M + 1, cmdLineArgs.P);
gradient_func(d_Zh, d_H, k3, cmdLineArgs.M);
mtm(d_dWxh, d_X, d_Zh, cmdLineArgs.N + 1, k3, cmdLineArgs.M);
delta(d_Wxh, d_dWxh, cmdLineArgs.N + 1, cmdLineArgs.M, learningrate);
}
}
stop_timer();
double time = elapsed_time();
double nFlops = b* (cmdLineArgs.N + 1)* cmdLineArgs.M*2;
nFlops += b* cmdLineArgs.M;
nFlops += b *(cmdLineArgs.M + 1) * cmdLineArgs.P*2;
nFlops += b * cmdLineArgs.P * 4; //for 4 different kernels;
nFlops += (cmdLineArgs.M + 1)* b * cmdLineArgs.P*2;
nFlops += (cmdLineArgs.M + 1) * cmdLineArgs.P *2;
nFlops += b* (cmdLineArgs.M + 1)* cmdLineArgs.P *2;
nFlops += b * cmdLineArgs.M;
nFlops += (cmdLineArgs.N + 1)* b * cmdLineArgs.M *2;
nFlops += (cmdLineArgs.N + 1)* cmdLineArgs.M * 2;
nFlops *= cmdLineArgs.iter * k2;
nFlops += k3* (cmdLineArgs.N + 1)* cmdLineArgs.M*2;
nFlops += k3* cmdLineArgs.M;
nFlops += k3 *(cmdLineArgs.M + 1) * cmdLineArgs.P*2;
nFlops += k3 * cmdLineArgs.P * 4; //for 4 different kernels;
nFlops += (cmdLineArgs.M + 1)* k3 * cmdLineArgs.P*2;
nFlops += (cmdLineArgs.M + 1) * cmdLineArgs.P *2;
nFlops += k3* (cmdLineArgs.M + 1)* cmdLineArgs.P *2;
nFlops += k3 * cmdLineArgs.M;
nFlops += (cmdLineArgs.N + 1)* k3 * cmdLineArgs.M *2;
nFlops += (cmdLineArgs.N + 1)* cmdLineArgs.M * 2;
double nFlopsPerSec = nFlops/time;
double nGFlopsPerSec = nFlopsPerSec*1e-9;
printf("Time: %lf\n", time);
//printf("Time: %lf GFlops: %lf\n", time, nGFlopsPerSec);
cudaDeviceSynchronize();
//get results from the device to host
HANDLE_ERROR(cudaMemcpy(Wxh, d_Wxh, (cmdLineArgs.N + 1) * sizeof(double) * cmdLineArgs.M, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(Why, d_Why, (cmdLineArgs.M + 1) * sizeof(double) * cmdLineArgs.P, cudaMemcpyDeviceToHost));
/*---------------------------------------------------------------------------------------------------------------*/
/*----------------------------------------------Print outputs----------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
if (cmdLineArgs.V) {
/*Need the following 2 statements for Testing*/
displayMatrix1("input/hidden weights", Wxh, cmdLineArgs.N + 1, cmdLineArgs.M);
displayMatrix1("hidden/output weights", Why, cmdLineArgs.M + 1, cmdLineArgs.P);
/* Useful for analyzing the accuracy of prediction */
/*if(k3)
{
displayVector ("last input", &X[k3-1][1], cmdLineArgs.N);
displayVector ("last output", Y[k3-1], cmdLineArgs.P);
displayVector ("predicted output",P1[k3-1], cmdLineArgs.P);
}
else
{
displayVector ("last input", &X[b-1][1], cmdLineArgs.N);
displayVector ("last output", Y[b-1], cmdLineArgs.P);
displayVector ("predicted output",P1[b-1], cmdLineArgs.P);
}*/
}
/*---------------------------------------------------------------------------------------------------------------*/
/*----------------------------------------------Free Memory------------------------------------------------------*/
/*---------------------------------------------------------------------------------------------------------------*/
free(inputs);
free(outputs);
free(X);
free(Zh);
free(Zy);
free(H);
free(E);
free(P);
free(P1);
free(sum);
free(Wxh);
free(Why);
free(dWxh);
free(dWhy);
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_Zh);
cudaFree(d_Zy);
cudaFree(d_H);
cudaFree(d_E);
cudaFree(d_P);
cudaFree(d_P1);
cudaFree(d_sum);
cudaFree(d_Wxh);
cudaFree(d_Why);
cudaFree(d_dWxh);
cudaFree(d_dWhy);
/*-------------------------------------------------------END-----------------------------------------------------*/
return 0;
}
|
44c47c14849a34c137e3a1d332d57a5cf1a43b40.hip | // !!! This is a file automatically generated by hipify!!!
/* Dexter Barrows
dbarrows.github.io
McMaster University
2016
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <string>
#include <sstream>
#include <cmath>
#include "timer.h"
#include "rand.h"
#include "readdata.h"
#define NP (2*2500) // number of particles
#define N 500.0 // population size
#define R0true 3.0 // infectiousness
#define rtrue 0.1 // recovery rate
#define etatrue 0.5 // real drift attraction strength
#define berrtrue 0.5 // real beta drift noise
#define phitrue 0.5 // real connectivity strength
#define merr 10.0 // expected measurement error
#define I0 5.0 // Initial infected individuals
#define PSC 0.5 // sensitive parameter perturbation scaling
#define NLOC 10
#define PI 3.141592654f
// Wrapper for CUDA calls, from CUDA API
// Modified to also print the error code and string
# define CUDA_CALL(x) do { if ((x) != hipSuccess ) { \
std::cout << " Error at " << __FILE__ << ":" << __LINE__ << std::endl; \
std::cout << " Error was " << x << " " << hipGetErrorString(x) << std::endl; \
return EXIT_FAILURE ;}} while (0) \
typedef struct {
float R0;
float r;
float sigma;
float eta;
float berr;
float phi;
float S[NLOC];
float I[NLOC];
float R[NLOC];
float B[NLOC];
float Iinit[NLOC];
hiprandState_t randState; // PRNG state
} Particle;
__host__ std::string getHRmemsize (size_t memsize);
__host__ std::string getHRtime (float runtime);
__device__ void exp_euler_SSIR(float h, float t0, float tn, Particle * particle, int * neinum, int * neibmat, int nloc);
__device__ void copyParticle(Particle * dst, Particle * src, int nloc);
/* Initialize all PRNG states, get starting state vector using initial distribution
*/
__global__ void initializeParticles (Particle * particles, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global thread ID
if (id < NP) {
// initialize PRNG state
hiprandState_t state;
hiprand_init(id, 0, 0, &state);
float R0can, rcan, sigmacan, Iinitcan, etacan, berrcan, phican;
do {
R0can = R0true + R0true*hiprand_normal(&state);
} while (R0can < 0);
particles[id].R0 = R0can;
do {
rcan = rtrue + rtrue*hiprand_normal(&state);
} while (rcan < 0);
particles[id].r = rcan;
for (int loc = 0; loc < nloc; loc++)
particles[id].B[loc] = (float) R0can * rcan / N;
do {
sigmacan = merr + merr*hiprand_normal(&state);
} while (sigmacan < 0);
particles[id].sigma = sigmacan;
do {
etacan = etatrue + PSC*etatrue*hiprand_normal(&state);
} while (etacan < 0 || etacan > 1);
particles[id].eta = etacan;
do {
berrcan = berrtrue + PSC*berrtrue*hiprand_normal(&state);
} while (berrcan < 0);
particles[id].berr = berrcan;
do {
phican = phitrue + PSC*phitrue*hiprand_normal(&state);
} while (phican <= 0 || phican >= 1);
particles[id].phi = phican;
for (int loc = 0; loc < nloc; loc++) {
do {
Iinitcan = I0 + I0*hiprand_normal(&state);
} while (Iinitcan < 0 || N < Iinitcan);
particles[id].Iinit[loc] = Iinitcan;
}
particles[id].randState = state;
}
}
__global__ void resetStates (Particle * particles, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global thread ID
if (id < NP) {
for (int loc = 0; loc < nloc; loc++) {
particles[id].S[loc] = N - particles[id].Iinit[loc];
particles[id].I[loc] = particles[id].Iinit[loc];
particles[id].R[loc] = 0.0;
}
}
}
__global__ void clobberParams (Particle * particles, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global thread ID
if (id < NP) {
particles[id].R0 = R0true;
particles[id].r = rtrue;
particles[id].sigma = merr;
particles[id].eta = etatrue;
particles[id].berr = berrtrue;
particles[id].phi = phitrue;
for (int loc = 0; loc < nloc; loc++) {
particles[id].Iinit[loc] = I0;
}
}
}
/* Project particles forward, perturb, and save weight based on data
int t - time step number (1,...,T)
*/
__global__ void project (Particle * particles, int * neinum, int * neibmat, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global id
if (id < NP) {
// project forward
exp_euler_SSIR(1.0/7.0, 0.0, 1.0, &particles[id], neinum, neibmat, nloc);
}
}
__global__ void weight(float * data, Particle * particles, double * w, int t, int T, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global id
if (id < NP) {
float merr_par = particles[id].sigma;
// Get weight and save
double w_local = 1.0;
for (int loc = 0; loc < nloc; loc++) {
float y_diff = data[loc*T + t] - particles[id].I[loc];
w_local *= 1.0/(merr_par*sqrt(2.0*PI)) * exp( - y_diff*y_diff / (2.0*merr_par*merr_par) );
}
w[id] = w_local;
}
}
__global__ void stashParticles (Particle * particles, Particle * particles_old, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global id
if (id < NP) {
// COPY PARTICLE
copyParticle(&particles_old[id], &particles[id], nloc);
}
}
/* The 0th thread will perform cumulative sum on the weights.
There may be a faster way to do this, will investigate.
*/
__global__ void cumsumWeights (double * w) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global thread ID
// compute cumulative weights
if (id == 0) {
for (int i = 1; i < NP; i++)
w[i] += w[i-1];
}
}
/* Resample from all particle states within cell
*/
__global__ void resample (Particle * particles, Particle * particles_old, double * w, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < NP) {
// resampling proportional to weights
double w_r = hiprand_uniform(&particles[id].randState) * w[NP-1];
int i = 0;
while (w_r > w[i]) {
i++;
}
// i is now the index of the particle to copy from
copyParticle(&particles[id], &particles_old[i], nloc);
}
}
// launch this with probably just nloc threads... block structure/size probably not important
__global__ void reduceStates (Particle * particles, float * countmeans, int t, int T, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < nloc) {
int loc = id;
double countmean_local = 0.0;
for (int n = 0; n < NP; n++) {
countmean_local += particles[n].I[loc] / NP;
}
countmeans[loc*T + t] = (float) countmean_local;
}
}
__global__ void perturbParticles(Particle * particles, int nloc, int passnum, double coolrate) {
//double coolcoef = exp( - (double) passnum / coolrate );
double coolcoef = pow(coolrate, passnum);
double spreadR0 = coolcoef * R0true / 10.0;
double spreadr = coolcoef * rtrue / 10.0;
double spreadsigma = coolcoef * merr / 10.0;
double spreadIinit = coolcoef * I0 / 10.0;
double spreadeta = coolcoef * etatrue / 10.0;
double spreadberr = coolcoef * berrtrue / 10.0;
double spreadphi = coolcoef * phitrue / 10.0;
double R0can, rcan, sigmacan, Iinitcan, etacan, berrcan, phican;
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < NP) {
do {
R0can = particles[id].R0 + spreadR0*hiprand_normal(&particles[id].randState);
} while (R0can < 0);
particles[id].R0 = R0can;
do {
rcan = particles[id].r + spreadr*hiprand_normal(&particles[id].randState);
} while (rcan < 0);
particles[id].r = rcan;
do {
sigmacan = particles[id].sigma + spreadsigma*hiprand_normal(&particles[id].randState);
} while (sigmacan < 0);
particles[id].sigma = sigmacan;
do {
etacan = particles[id].eta + PSC*spreadeta*hiprand_normal(&particles[id].randState);
} while (etacan < 0 || etacan > 1);
particles[id].eta = etacan;
do {
berrcan = particles[id].berr + PSC*spreadberr*hiprand_normal(&particles[id].randState);
} while (berrcan < 0);
particles[id].berr = berrcan;
do {
phican = particles[id].phi + PSC*spreadphi*hiprand_normal(&particles[id].randState);
} while (phican <= 0 || phican >= 1);
particles[id].phi = phican;
for (int loc = 0; loc < nloc; loc++) {
do {
Iinitcan = particles[id].Iinit[loc] + spreadIinit*hiprand_normal(&particles[id].randState);
} while (Iinitcan < 0 || Iinitcan > 500);
particles[id].Iinit[loc] = Iinitcan;
}
}
}
int main (int argc, char *argv[]) {
int T, nloc;
double restime;
struct timeval tdr0, tdr1, tdrMaster;
// Parse arguments **********************************************
if (argc < 4) {
std::cout << "Not enough arguments" << std::endl;
return 0;
}
std::string arg1(argv[1]); // infection counts
std::string arg2(argv[2]); // neighbour counts
std::string arg3(argv[3]); // neighbour indices
std::string arg4(argv[4]); // outfile: params + runtime
std::cout << "Arguments:" << std::endl;
std::cout << "Infection data: " << arg1 << std::endl;
std::cout << "Neighbour counts: " << arg2 << std::endl;
std::cout << "Neighbour indices: " << arg3 << std::endl;
std::cout << "Outfile " << arg4 << std::endl;
// **************************************************************
// Read count data **********************************************
std::cout << "Getting count data" << std::endl;
float * data = getDataFloat(arg1, &T, &nloc);
size_t datasize = nloc*T*sizeof(float);
// **************************************************************
// Read neinum matrix data **************************************
std::cout << "Getting neighbour count data" << std::endl;
int * neinum = getDataInt(arg2, NULL, NULL);
size_t neinumsize = nloc * sizeof(int);
// **************************************************************
// Read neibmat matrix data *************************************
std::cout << "Getting neighbour count data" << std::endl;
int * neibmat = getDataInt(arg3, NULL, NULL);
size_t neibmatsize = nloc * nloc * sizeof(int);
// **************************************************************
// *****************************************************************************************************
// start timing
gettimeofday (&tdr0, NULL);
// CUDA data ****************************************************
std::cout << "Allocating device storage" << std::endl;
float * d_data; // device copy of data
Particle * particles; // particles
Particle * particles_old; // intermediate particle states
double * w; // weights
int * d_neinum; // device copy of adjacency matrix
int * d_neibmat; // device copy of neighbour counts matrix
float * countmeans; // host copy of reduced infection count means from last pass
float * d_countmeans; // device copy of reduced infection count means from last pass
CUDA_CALL( hipMalloc( (void**) &d_data , datasize ) );
CUDA_CALL( hipMalloc( (void**) &particles , NP*sizeof(Particle)) );
CUDA_CALL( hipMalloc( (void**) &particles_old , NP*sizeof(Particle)) );
CUDA_CALL( hipMalloc( (void**) &w , NP*sizeof(double)) );
CUDA_CALL( hipMalloc( (void**) &d_neinum , neinumsize) );
CUDA_CALL( hipMalloc( (void**) &d_neibmat , neibmatsize) );
CUDA_CALL( hipMalloc( (void**) &d_countmeans , nloc*T*sizeof(float)) );
gettimeofday (&tdr1, NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
std::cout << "\t" << getHRtime(restime) << std::endl;
size_t avail, total;
hipMemGetInfo( &avail, &total );
size_t used = total - avail;
std::cout << "\t[" << getHRmemsize(used) << "] used of [" << getHRmemsize(total) << "]" <<std::endl;
std::cout << "Copying data to device" << std::endl;
gettimeofday (&tdr0, NULL);
CUDA_CALL( hipMemcpy(d_data , data , datasize , hipMemcpyHostToDevice) );
CUDA_CALL( hipMemcpy(d_neinum , neinum , neinumsize , hipMemcpyHostToDevice) );
CUDA_CALL( hipMemcpy(d_neibmat , neibmat , neibmatsize , hipMemcpyHostToDevice) );
gettimeofday (&tdr1, NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
std::cout << "\t" << getHRtime(restime) << std::endl;
// **************************************************************
// Initialize particles *****************************************
std::cout << "Initializing particles" << std::endl;
//gettimeofday (&tdr0, NULL);
int nThreads = 32;
int nBlocks = ceil( (float) NP / nThreads);
hipLaunchKernelGGL(( initializeParticles) , dim3(nBlocks), dim3(nThreads) , 0, 0, particles, nloc);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
hipLaunchKernelGGL(( initializeParticles) , dim3(nBlocks), dim3(nThreads) , 0, 0, particles_old, nloc);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
//gettimeofday (&tdr1, NULL);
//timeval_subtract (&restime, &tdr1, &tdr0);
//std::cout << "\t" << getHRtime(restime) << std::endl;
hipMemGetInfo( &avail, &total );
used = total - avail;
std::cout << "\t[" << getHRmemsize(used) << "] used of [" << getHRmemsize(total) << "]" <<std::endl;
// **************************************************************
// Starting filtering *******************************************
for (int pass = 0; pass < 50; pass++) {
nThreads = 32;
nBlocks = ceil( (float) NP / nThreads);
hipLaunchKernelGGL(( resetStates) , dim3(nBlocks), dim3(nThreads) , 0, 0, particles, nloc);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
nThreads = 1;
nBlocks = 10;
if (pass == 49) {
hipLaunchKernelGGL(( reduceStates) , dim3(nBlocks), dim3(nThreads) , 0, 0, particles, d_countmeans, 0, T, nloc);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
}
int Tlim = T;
for (int t = 1; t < Tlim; t++) {
// Projection ************************************************
nThreads = 32;
nBlocks = ceil( (float) NP / nThreads);
hipLaunchKernelGGL(( project) , dim3(nBlocks), dim3(nThreads) , 0, 0, particles, d_neinum, d_neibmat, nloc);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
// Weighting *************************************************
nThreads = 32;
nBlocks = ceil( (float) NP / nThreads);
hipLaunchKernelGGL(( weight) , dim3(nBlocks), dim3(nThreads) , 0, 0, d_data, particles, w, t, T, nloc);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
// Cumulative sum ********************************************
nThreads = 1;
nBlocks = 1;
hipLaunchKernelGGL(( cumsumWeights) , dim3(nBlocks), dim3(nThreads) , 0, 0, w);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
// Save particles for resampling from *************************
nThreads = 32;
nBlocks = ceil( (float) NP / nThreads);
hipLaunchKernelGGL(( stashParticles) , dim3(nBlocks), dim3(nThreads) , 0, 0, particles, particles_old, nloc);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
// Resampling *************************************************
nThreads = 32;
nBlocks = ceil( (float) NP/ nThreads);
hipLaunchKernelGGL(( resample) , dim3(nBlocks), dim3(nThreads) , 0, 0, particles, particles_old, w, nloc);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
// Reduction **************************************************
if (pass == 49) {
nThreads = 1;
nBlocks = 10;
hipLaunchKernelGGL(( reduceStates) , dim3(nBlocks), dim3(nThreads) , 0, 0, particles, d_countmeans, t, T, nloc);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
}
// Perturb particles ******************************************
nThreads = 32;
nBlocks = ceil( (float) NP/ nThreads);
hipLaunchKernelGGL(( perturbParticles) , dim3(nBlocks), dim3(nThreads) , 0, 0, particles, nloc, pass, 0.975);
CUDA_CALL( hipGetLastError() );
CUDA_CALL( hipDeviceSynchronize() );
} // end time
} // end pass
std::cout.precision(10);
countmeans = (float*) malloc (nloc*T*sizeof(float));
hipMemcpy(countmeans, d_countmeans, nloc*T*sizeof(float), hipMemcpyDeviceToHost);
// stop master timer and print
gettimeofday (&tdrMaster, NULL);
timeval_subtract(&restime, &tdrMaster, &tdr0);
std::cout << "Time: " << getHRtime(restime) << std::endl;
std::cout << "Rawtime: " << restime << std::endl;
// Write results out
std::string filename = arg4;
std::cout << "Writing results to file '" << filename << "' ..." << std::endl;
std::ofstream outfile;
outfile.open(filename.c_str());
for(int loc = 0; loc < nloc; loc++) {
for (int t = 0; t < T; t++) {
outfile << countmeans[loc*T + t] << " ";
}
outfile << std::endl;
}
outfile.close();
hipFree(d_data);
hipFree(particles);
hipFree(particles_old);
hipFree(w);
hipFree(d_neinum);
hipFree(d_neibmat);
hipFree(d_countmeans);
exit (EXIT_SUCCESS);
}
/* Use the Explicit Euler integration scheme to integrate SIR model forward in time
float h - time step size
float t0 - start time
float tn - stop time
float * y - current system state; a three-component vector representing [S I R], susceptible-infected-recovered
*/
__device__ void exp_euler_SSIR(float h, float t0, float tn, Particle * particle, int * neinum, int * neibmat, int nloc) {
int num_steps = floor( (tn-t0) / h );
float * S = particle->S;
float * I = particle->I;
float * R = particle->R;
float * B = particle->B;
// create last state vectors
float * S_last = (float*) malloc (nloc*sizeof(float));
float * I_last = (float*) malloc (nloc*sizeof(float));
float * R_last = (float*) malloc (nloc*sizeof(float));
float * B_last = (float*) malloc (nloc*sizeof(float));
float R0 = particle->R0;
float r = particle->r;
float B0 = R0 * r / N;
float eta = particle->eta;
float berr = particle->berr;
float phi = particle->phi;
for(int t = 0; t < num_steps; t++) {
for (int loc = 0; loc < nloc; loc++) {
S_last[loc] = S[loc];
I_last[loc] = I[loc];
R_last[loc] = R[loc];
B_last[loc] = B[loc];
}
for (int loc = 0; loc < nloc; loc++) {
B[loc] = exp( log(B_last[loc]) + eta*(log(B0) - log(B_last[loc])) + berr*hiprand_normal(&(particle->randState)) );
int n = neinum[loc];
float sphi = 1.0 - phi*( (float) n/(n+1.0) );
float ophi = phi/(n+1.0);
float nBIsum = 0.0;
for (int j = 0; j < n; j++)
nBIsum += B_last[neibmat[nloc*loc + j]-1] * I_last[neibmat[nloc*loc + j]-1];
float BSI = S_last[loc]*( sphi*B_last[loc]*I_last[loc] + ophi*nBIsum );
float rI = r*I_last[loc];
// get derivatives
float dS = - BSI;
float dI = BSI - rI;
float dR = rI;
// step forward by h
S[loc] += h*dS;
I[loc] += h*dI;
R[loc] += h*dR;
}
}
free(S_last);
free(I_last);
free(R_last);
free(B_last);
}
/* Convinience function for particle resampling process
*/
__device__ void copyParticle(Particle * dst, Particle * src, int nloc) {
dst->R0 = src->R0;
dst->r = src->r;
dst->sigma = src->sigma;
dst->eta = src->eta;
dst->berr = src->berr;
dst->phi = src->phi;
for (int n = 0; n < nloc; n++) {
dst->S[n] = src->S[n];
dst->I[n] = src->I[n];
dst->R[n] = src->R[n];
dst->B[n] = src->B[n];
dst->Iinit[n] = src->Iinit[n];
}
}
/* Convert memory size in bytes to human-readable format
*/
std::string getHRmemsize (size_t memsize) {
std::stringstream ss;
std::string valstring;
int kb = 1024;
int mb = kb*1024;
int gb = mb*1024;
if (memsize <= kb)
ss << memsize << " B";
else if (memsize > kb && memsize <= mb)
ss << (float) memsize/ kb << " KB";
else if (memsize > mb && memsize <= gb)
ss << (float) memsize/ mb << " MB";
else
ss << (float) memsize/ gb << " GB";
valstring = ss.str();
return valstring;
}
/* Convert time in seconds to human readable format
*/
std::string getHRtime (float runtime) {
std::stringstream ss;
std::string valstring;
int mt = 60;
int ht = mt*60;
int dt = ht*24;
if (runtime <= mt)
ss << runtime << " s";
else if (runtime > mt && runtime <= ht)
ss << runtime/mt << " m";
else if (runtime > ht && runtime <= dt)
ss << runtime/dt << " h";
else
ss << runtime/ht << " d";
valstring = ss.str();
return valstring;
} | 44c47c14849a34c137e3a1d332d57a5cf1a43b40.cu | /* Dexter Barrows
dbarrows.github.io
McMaster University
2016
*/
#include <cuda.h>
#include <iostream>
#include <fstream>
#include <curand.h>
#include <curand_kernel.h>
#include <string>
#include <sstream>
#include <cmath>
#include "timer.h"
#include "rand.h"
#include "readdata.h"
#define NP (2*2500) // number of particles
#define N 500.0 // population size
#define R0true 3.0 // infectiousness
#define rtrue 0.1 // recovery rate
#define etatrue 0.5 // real drift attraction strength
#define berrtrue 0.5 // real beta drift noise
#define phitrue 0.5 // real connectivity strength
#define merr 10.0 // expected measurement error
#define I0 5.0 // Initial infected individuals
#define PSC 0.5 // sensitive parameter perturbation scaling
#define NLOC 10
#define PI 3.141592654f
// Wrapper for CUDA calls, from CUDA API
// Modified to also print the error code and string
# define CUDA_CALL(x) do { if ((x) != cudaSuccess ) { \
std::cout << " Error at " << __FILE__ << ":" << __LINE__ << std::endl; \
std::cout << " Error was " << x << " " << cudaGetErrorString(x) << std::endl; \
return EXIT_FAILURE ;}} while (0) \
typedef struct {
float R0;
float r;
float sigma;
float eta;
float berr;
float phi;
float S[NLOC];
float I[NLOC];
float R[NLOC];
float B[NLOC];
float Iinit[NLOC];
curandState randState; // PRNG state
} Particle;
__host__ std::string getHRmemsize (size_t memsize);
__host__ std::string getHRtime (float runtime);
__device__ void exp_euler_SSIR(float h, float t0, float tn, Particle * particle, int * neinum, int * neibmat, int nloc);
__device__ void copyParticle(Particle * dst, Particle * src, int nloc);
/* Initialize all PRNG states, get starting state vector using initial distribution
*/
__global__ void initializeParticles (Particle * particles, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global thread ID
if (id < NP) {
// initialize PRNG state
curandState state;
curand_init(id, 0, 0, &state);
float R0can, rcan, sigmacan, Iinitcan, etacan, berrcan, phican;
do {
R0can = R0true + R0true*curand_normal(&state);
} while (R0can < 0);
particles[id].R0 = R0can;
do {
rcan = rtrue + rtrue*curand_normal(&state);
} while (rcan < 0);
particles[id].r = rcan;
for (int loc = 0; loc < nloc; loc++)
particles[id].B[loc] = (float) R0can * rcan / N;
do {
sigmacan = merr + merr*curand_normal(&state);
} while (sigmacan < 0);
particles[id].sigma = sigmacan;
do {
etacan = etatrue + PSC*etatrue*curand_normal(&state);
} while (etacan < 0 || etacan > 1);
particles[id].eta = etacan;
do {
berrcan = berrtrue + PSC*berrtrue*curand_normal(&state);
} while (berrcan < 0);
particles[id].berr = berrcan;
do {
phican = phitrue + PSC*phitrue*curand_normal(&state);
} while (phican <= 0 || phican >= 1);
particles[id].phi = phican;
for (int loc = 0; loc < nloc; loc++) {
do {
Iinitcan = I0 + I0*curand_normal(&state);
} while (Iinitcan < 0 || N < Iinitcan);
particles[id].Iinit[loc] = Iinitcan;
}
particles[id].randState = state;
}
}
__global__ void resetStates (Particle * particles, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global thread ID
if (id < NP) {
for (int loc = 0; loc < nloc; loc++) {
particles[id].S[loc] = N - particles[id].Iinit[loc];
particles[id].I[loc] = particles[id].Iinit[loc];
particles[id].R[loc] = 0.0;
}
}
}
__global__ void clobberParams (Particle * particles, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global thread ID
if (id < NP) {
particles[id].R0 = R0true;
particles[id].r = rtrue;
particles[id].sigma = merr;
particles[id].eta = etatrue;
particles[id].berr = berrtrue;
particles[id].phi = phitrue;
for (int loc = 0; loc < nloc; loc++) {
particles[id].Iinit[loc] = I0;
}
}
}
/* Project particles forward, perturb, and save weight based on data
int t - time step number (1,...,T)
*/
__global__ void project (Particle * particles, int * neinum, int * neibmat, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global id
if (id < NP) {
// project forward
exp_euler_SSIR(1.0/7.0, 0.0, 1.0, &particles[id], neinum, neibmat, nloc);
}
}
__global__ void weight(float * data, Particle * particles, double * w, int t, int T, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global id
if (id < NP) {
float merr_par = particles[id].sigma;
// Get weight and save
double w_local = 1.0;
for (int loc = 0; loc < nloc; loc++) {
float y_diff = data[loc*T + t] - particles[id].I[loc];
w_local *= 1.0/(merr_par*sqrt(2.0*PI)) * exp( - y_diff*y_diff / (2.0*merr_par*merr_par) );
}
w[id] = w_local;
}
}
__global__ void stashParticles (Particle * particles, Particle * particles_old, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global id
if (id < NP) {
// COPY PARTICLE
copyParticle(&particles_old[id], &particles[id], nloc);
}
}
/* The 0th thread will perform cumulative sum on the weights.
There may be a faster way to do this, will investigate.
*/
__global__ void cumsumWeights (double * w) {
int id = blockIdx.x*blockDim.x + threadIdx.x; // global thread ID
// compute cumulative weights
if (id == 0) {
for (int i = 1; i < NP; i++)
w[i] += w[i-1];
}
}
/* Resample from all particle states within cell
*/
__global__ void resample (Particle * particles, Particle * particles_old, double * w, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < NP) {
// resampling proportional to weights
double w_r = curand_uniform(&particles[id].randState) * w[NP-1];
int i = 0;
while (w_r > w[i]) {
i++;
}
// i is now the index of the particle to copy from
copyParticle(&particles[id], &particles_old[i], nloc);
}
}
// launch this with probably just nloc threads... block structure/size probably not important
__global__ void reduceStates (Particle * particles, float * countmeans, int t, int T, int nloc) {
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < nloc) {
int loc = id;
double countmean_local = 0.0;
for (int n = 0; n < NP; n++) {
countmean_local += particles[n].I[loc] / NP;
}
countmeans[loc*T + t] = (float) countmean_local;
}
}
__global__ void perturbParticles(Particle * particles, int nloc, int passnum, double coolrate) {
//double coolcoef = exp( - (double) passnum / coolrate );
double coolcoef = pow(coolrate, passnum);
double spreadR0 = coolcoef * R0true / 10.0;
double spreadr = coolcoef * rtrue / 10.0;
double spreadsigma = coolcoef * merr / 10.0;
double spreadIinit = coolcoef * I0 / 10.0;
double spreadeta = coolcoef * etatrue / 10.0;
double spreadberr = coolcoef * berrtrue / 10.0;
double spreadphi = coolcoef * phitrue / 10.0;
double R0can, rcan, sigmacan, Iinitcan, etacan, berrcan, phican;
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < NP) {
do {
R0can = particles[id].R0 + spreadR0*curand_normal(&particles[id].randState);
} while (R0can < 0);
particles[id].R0 = R0can;
do {
rcan = particles[id].r + spreadr*curand_normal(&particles[id].randState);
} while (rcan < 0);
particles[id].r = rcan;
do {
sigmacan = particles[id].sigma + spreadsigma*curand_normal(&particles[id].randState);
} while (sigmacan < 0);
particles[id].sigma = sigmacan;
do {
etacan = particles[id].eta + PSC*spreadeta*curand_normal(&particles[id].randState);
} while (etacan < 0 || etacan > 1);
particles[id].eta = etacan;
do {
berrcan = particles[id].berr + PSC*spreadberr*curand_normal(&particles[id].randState);
} while (berrcan < 0);
particles[id].berr = berrcan;
do {
phican = particles[id].phi + PSC*spreadphi*curand_normal(&particles[id].randState);
} while (phican <= 0 || phican >= 1);
particles[id].phi = phican;
for (int loc = 0; loc < nloc; loc++) {
do {
Iinitcan = particles[id].Iinit[loc] + spreadIinit*curand_normal(&particles[id].randState);
} while (Iinitcan < 0 || Iinitcan > 500);
particles[id].Iinit[loc] = Iinitcan;
}
}
}
int main (int argc, char *argv[]) {
int T, nloc;
double restime;
struct timeval tdr0, tdr1, tdrMaster;
// Parse arguments **********************************************
if (argc < 4) {
std::cout << "Not enough arguments" << std::endl;
return 0;
}
std::string arg1(argv[1]); // infection counts
std::string arg2(argv[2]); // neighbour counts
std::string arg3(argv[3]); // neighbour indices
std::string arg4(argv[4]); // outfile: params + runtime
std::cout << "Arguments:" << std::endl;
std::cout << "Infection data: " << arg1 << std::endl;
std::cout << "Neighbour counts: " << arg2 << std::endl;
std::cout << "Neighbour indices: " << arg3 << std::endl;
std::cout << "Outfile " << arg4 << std::endl;
// **************************************************************
// Read count data **********************************************
std::cout << "Getting count data" << std::endl;
float * data = getDataFloat(arg1, &T, &nloc);
size_t datasize = nloc*T*sizeof(float);
// **************************************************************
// Read neinum matrix data **************************************
std::cout << "Getting neighbour count data" << std::endl;
int * neinum = getDataInt(arg2, NULL, NULL);
size_t neinumsize = nloc * sizeof(int);
// **************************************************************
// Read neibmat matrix data *************************************
std::cout << "Getting neighbour count data" << std::endl;
int * neibmat = getDataInt(arg3, NULL, NULL);
size_t neibmatsize = nloc * nloc * sizeof(int);
// **************************************************************
// *****************************************************************************************************
// start timing
gettimeofday (&tdr0, NULL);
// CUDA data ****************************************************
std::cout << "Allocating device storage" << std::endl;
float * d_data; // device copy of data
Particle * particles; // particles
Particle * particles_old; // intermediate particle states
double * w; // weights
int * d_neinum; // device copy of adjacency matrix
int * d_neibmat; // device copy of neighbour counts matrix
float * countmeans; // host copy of reduced infection count means from last pass
float * d_countmeans; // device copy of reduced infection count means from last pass
CUDA_CALL( cudaMalloc( (void**) &d_data , datasize ) );
CUDA_CALL( cudaMalloc( (void**) &particles , NP*sizeof(Particle)) );
CUDA_CALL( cudaMalloc( (void**) &particles_old , NP*sizeof(Particle)) );
CUDA_CALL( cudaMalloc( (void**) &w , NP*sizeof(double)) );
CUDA_CALL( cudaMalloc( (void**) &d_neinum , neinumsize) );
CUDA_CALL( cudaMalloc( (void**) &d_neibmat , neibmatsize) );
CUDA_CALL( cudaMalloc( (void**) &d_countmeans , nloc*T*sizeof(float)) );
gettimeofday (&tdr1, NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
std::cout << "\t" << getHRtime(restime) << std::endl;
size_t avail, total;
cudaMemGetInfo( &avail, &total );
size_t used = total - avail;
std::cout << "\t[" << getHRmemsize(used) << "] used of [" << getHRmemsize(total) << "]" <<std::endl;
std::cout << "Copying data to device" << std::endl;
gettimeofday (&tdr0, NULL);
CUDA_CALL( cudaMemcpy(d_data , data , datasize , cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMemcpy(d_neinum , neinum , neinumsize , cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMemcpy(d_neibmat , neibmat , neibmatsize , cudaMemcpyHostToDevice) );
gettimeofday (&tdr1, NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
std::cout << "\t" << getHRtime(restime) << std::endl;
// **************************************************************
// Initialize particles *****************************************
std::cout << "Initializing particles" << std::endl;
//gettimeofday (&tdr0, NULL);
int nThreads = 32;
int nBlocks = ceil( (float) NP / nThreads);
initializeParticles <<< nBlocks, nThreads >>> (particles, nloc);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
initializeParticles <<< nBlocks, nThreads >>> (particles_old, nloc);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
//gettimeofday (&tdr1, NULL);
//timeval_subtract (&restime, &tdr1, &tdr0);
//std::cout << "\t" << getHRtime(restime) << std::endl;
cudaMemGetInfo( &avail, &total );
used = total - avail;
std::cout << "\t[" << getHRmemsize(used) << "] used of [" << getHRmemsize(total) << "]" <<std::endl;
// **************************************************************
// Starting filtering *******************************************
for (int pass = 0; pass < 50; pass++) {
nThreads = 32;
nBlocks = ceil( (float) NP / nThreads);
resetStates <<< nBlocks, nThreads >>> (particles, nloc);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
nThreads = 1;
nBlocks = 10;
if (pass == 49) {
reduceStates <<< nBlocks, nThreads >>> (particles, d_countmeans, 0, T, nloc);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
}
int Tlim = T;
for (int t = 1; t < Tlim; t++) {
// Projection ************************************************
nThreads = 32;
nBlocks = ceil( (float) NP / nThreads);
project <<< nBlocks, nThreads >>> (particles, d_neinum, d_neibmat, nloc);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
// Weighting *************************************************
nThreads = 32;
nBlocks = ceil( (float) NP / nThreads);
weight <<< nBlocks, nThreads >>>(d_data, particles, w, t, T, nloc);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
// Cumulative sum ********************************************
nThreads = 1;
nBlocks = 1;
cumsumWeights <<< nBlocks, nThreads >>> (w);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
// Save particles for resampling from *************************
nThreads = 32;
nBlocks = ceil( (float) NP / nThreads);
stashParticles <<< nBlocks, nThreads >>> (particles, particles_old, nloc);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
// Resampling *************************************************
nThreads = 32;
nBlocks = ceil( (float) NP/ nThreads);
resample <<< nBlocks, nThreads >>> (particles, particles_old, w, nloc);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
// Reduction **************************************************
if (pass == 49) {
nThreads = 1;
nBlocks = 10;
reduceStates <<< nBlocks, nThreads >>> (particles, d_countmeans, t, T, nloc);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
}
// Perturb particles ******************************************
nThreads = 32;
nBlocks = ceil( (float) NP/ nThreads);
perturbParticles <<< nBlocks, nThreads >>> (particles, nloc, pass, 0.975);
CUDA_CALL( cudaGetLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
} // end time
} // end pass
std::cout.precision(10);
countmeans = (float*) malloc (nloc*T*sizeof(float));
cudaMemcpy(countmeans, d_countmeans, nloc*T*sizeof(float), cudaMemcpyDeviceToHost);
// stop master timer and print
gettimeofday (&tdrMaster, NULL);
timeval_subtract(&restime, &tdrMaster, &tdr0);
std::cout << "Time: " << getHRtime(restime) << std::endl;
std::cout << "Rawtime: " << restime << std::endl;
// Write results out
std::string filename = arg4;
std::cout << "Writing results to file '" << filename << "' ..." << std::endl;
std::ofstream outfile;
outfile.open(filename.c_str());
for(int loc = 0; loc < nloc; loc++) {
for (int t = 0; t < T; t++) {
outfile << countmeans[loc*T + t] << " ";
}
outfile << std::endl;
}
outfile.close();
cudaFree(d_data);
cudaFree(particles);
cudaFree(particles_old);
cudaFree(w);
cudaFree(d_neinum);
cudaFree(d_neibmat);
cudaFree(d_countmeans);
exit (EXIT_SUCCESS);
}
/* Use the Explicit Euler integration scheme to integrate SIR model forward in time
float h - time step size
float t0 - start time
float tn - stop time
float * y - current system state; a three-component vector representing [S I R], susceptible-infected-recovered
*/
__device__ void exp_euler_SSIR(float h, float t0, float tn, Particle * particle, int * neinum, int * neibmat, int nloc) {
int num_steps = floor( (tn-t0) / h );
float * S = particle->S;
float * I = particle->I;
float * R = particle->R;
float * B = particle->B;
// create last state vectors
float * S_last = (float*) malloc (nloc*sizeof(float));
float * I_last = (float*) malloc (nloc*sizeof(float));
float * R_last = (float*) malloc (nloc*sizeof(float));
float * B_last = (float*) malloc (nloc*sizeof(float));
float R0 = particle->R0;
float r = particle->r;
float B0 = R0 * r / N;
float eta = particle->eta;
float berr = particle->berr;
float phi = particle->phi;
for(int t = 0; t < num_steps; t++) {
for (int loc = 0; loc < nloc; loc++) {
S_last[loc] = S[loc];
I_last[loc] = I[loc];
R_last[loc] = R[loc];
B_last[loc] = B[loc];
}
for (int loc = 0; loc < nloc; loc++) {
B[loc] = exp( log(B_last[loc]) + eta*(log(B0) - log(B_last[loc])) + berr*curand_normal(&(particle->randState)) );
int n = neinum[loc];
float sphi = 1.0 - phi*( (float) n/(n+1.0) );
float ophi = phi/(n+1.0);
float nBIsum = 0.0;
for (int j = 0; j < n; j++)
nBIsum += B_last[neibmat[nloc*loc + j]-1] * I_last[neibmat[nloc*loc + j]-1];
float BSI = S_last[loc]*( sphi*B_last[loc]*I_last[loc] + ophi*nBIsum );
float rI = r*I_last[loc];
// get derivatives
float dS = - BSI;
float dI = BSI - rI;
float dR = rI;
// step forward by h
S[loc] += h*dS;
I[loc] += h*dI;
R[loc] += h*dR;
}
}
free(S_last);
free(I_last);
free(R_last);
free(B_last);
}
/* Convinience function for particle resampling process
*/
__device__ void copyParticle(Particle * dst, Particle * src, int nloc) {
dst->R0 = src->R0;
dst->r = src->r;
dst->sigma = src->sigma;
dst->eta = src->eta;
dst->berr = src->berr;
dst->phi = src->phi;
for (int n = 0; n < nloc; n++) {
dst->S[n] = src->S[n];
dst->I[n] = src->I[n];
dst->R[n] = src->R[n];
dst->B[n] = src->B[n];
dst->Iinit[n] = src->Iinit[n];
}
}
/* Convert memory size in bytes to human-readable format
*/
std::string getHRmemsize (size_t memsize) {
std::stringstream ss;
std::string valstring;
int kb = 1024;
int mb = kb*1024;
int gb = mb*1024;
if (memsize <= kb)
ss << memsize << " B";
else if (memsize > kb && memsize <= mb)
ss << (float) memsize/ kb << " KB";
else if (memsize > mb && memsize <= gb)
ss << (float) memsize/ mb << " MB";
else
ss << (float) memsize/ gb << " GB";
valstring = ss.str();
return valstring;
}
/* Convert time in seconds to human readable format
*/
std::string getHRtime (float runtime) {
std::stringstream ss;
std::string valstring;
int mt = 60;
int ht = mt*60;
int dt = ht*24;
if (runtime <= mt)
ss << runtime << " s";
else if (runtime > mt && runtime <= ht)
ss << runtime/mt << " m";
else if (runtime > ht && runtime <= dt)
ss << runtime/dt << " h";
else
ss << runtime/ht << " d";
valstring = ss.str();
return valstring;
} |
4a62796d97f1997d9e4f8514f75d236825903839.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/* This file is copied from https://github.com/jzbonter/mc-cnn */
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void Normalize_get_norm_(float *input, float *norm, int size1, int size23, int size023)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size023) {
int dim23 = id % size23;
int dim0 = id / size23;
float sum = 0.0;
for (int dim1 = 0; dim1 < size1; dim1++) {
float x = input[(dim0 * size1 + dim1) * size23 + dim23];
sum += x * x;
}
norm[dim0 * size23 + dim23] = sum + 1e-5;
}
} | 4a62796d97f1997d9e4f8514f75d236825903839.cu | #include "includes.h"
/* This file is copied from https://github.com/jzbonter/mc-cnn */
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void Normalize_get_norm_(float *input, float *norm, int size1, int size23, int size023)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size023) {
int dim23 = id % size23;
int dim0 = id / size23;
float sum = 0.0;
for (int dim1 = 0; dim1 < size1; dim1++) {
float x = input[(dim0 * size1 + dim1) * size23 + dim23];
sum += x * x;
}
norm[dim0 * size23 + dim23] = sum + 1e-5;
}
} |
85ba00da890178b63a023c57baff2cac4942d0a6.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h"
#include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
nvinfer1::Dims PoolPlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index == 0);
assert(inputDims[0].nbDims == 3);
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
output_dims.d[1] = output_shape_[1];
output_dims.d[2] = output_shape_[2];
return output_dims;
}
size_t PoolPlugin::getSerializationSize() const TRT_NOEXCEPT {
return getBaseSerializationSize() + SerializedSize(ceil_mode_) +
SerializedSize(pool_type_) + SerializedSize(adaptive_) +
SerializedSize(exclusive_) + SerializedSize(ksize_) +
SerializedSize(strides_) + SerializedSize(paddings_) +
SerializedSize(real_paddings_) + SerializedSize(input_shape_) +
SerializedSize(output_shape_);
}
// TRT will call this func when we need to serialize the configuration of
// tensorrt.
void PoolPlugin::serialize(void *buffer) const TRT_NOEXCEPT {
serializeBase(buffer);
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_);
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, real_paddings_);
SerializeValue(&buffer, input_shape_);
SerializeValue(&buffer, output_shape_);
}
PoolPlugin *PoolPlugin::clone() const TRT_NOEXCEPT {
return new PoolPlugin(ceil_mode_, pool_type_, adaptive_, exclusive_, ksize_,
strides_, paddings_, input_shape_, real_paddings_);
}
int PoolPlugin::enqueue(int batchSize, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
#else
void *const *outputs, void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
#endif
auto const &input_dims = this->getInputDims(0);
int input_size = 0;
float const *idata = reinterpret_cast<float const *>(inputs[0]);
float *const *odatas = reinterpret_cast<float *const *>(outputs);
std::vector<int> input_shape = input_shape_;
std::vector<int> output_shape = output_shape_;
input_shape.insert(input_shape.begin(), batchSize);
output_shape.insert(output_shape.begin(), batchSize);
if (pool_type_ == PoolType::max) {
phi::funcs::MaxPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
pool2d_forward;
pool2d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, true, false, odatas[0], stream, pool_process);
} else if (pool_type_ == PoolType::avg) {
phi::funcs::AvgPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
pool2d_forward;
pool2d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, exclusive_, adaptive_, odatas[0], stream,
pool_process);
}
return hipGetLastError() != hipSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
PoolPluginDynamic::PoolPluginDynamic(void const *serialData,
size_t serialLength) {
DeserializeValue(&serialData, &serialLength, &ceil_mode_);
const char *pool_type;
DeserializeValue(&serialData, &serialLength, &pool_type);
pool_type_ = std::string(pool_type);
DeserializeValue(&serialData, &serialLength, &adaptive_);
DeserializeValue(&serialData, &serialLength, &exclusive_);
DeserializeValue(&serialData, &serialLength, &ksize_);
DeserializeValue(&serialData, &serialLength, &strides_);
DeserializeValue(&serialData, &serialLength, &paddings_);
DeserializeValue(&serialData, &serialLength, &is_global_);
}
size_t PoolPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(ceil_mode_) + SerializedSize(pool_type_.c_str()) +
SerializedSize(adaptive_) + SerializedSize(exclusive_) +
SerializedSize(ksize_) + SerializedSize(strides_) +
SerializedSize(paddings_) + SerializedSize(is_global_);
}
void PoolPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_.c_str());
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, is_global_);
}
nvinfer1::IPluginV2DynamicExt *PoolPluginDynamic::clone() const TRT_NOEXCEPT {
return new PoolPluginDynamic(ceil_mode_, pool_type_, adaptive_, exclusive_,
ksize_, strides_, paddings_, is_global_);
}
nvinfer1::DimsExprs PoolPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nb_inputs, 1,
platform::errors::InvalidArgument(
"The Split plugin should be only one input."));
PADDLE_ENFORCE_EQ(
inputs[0].d[1]->isConstant(), true,
platform::errors::InvalidArgument("The channel dimension should be "
"static, but we found it's dynamic."));
nvinfer1::DimsExprs output(inputs[0]);
if (is_global_ && !adaptive_) {
output.d[2] = expr_builder.constant(1);
output.d[3] = expr_builder.constant(1);
return output;
}
if (is_global_ && adaptive_) {
return inputs[0];
}
if (adaptive_) {
output.d[2] = expr_builder.constant(ksize_[0]);
output.d[3] = expr_builder.constant(ksize_[1]);
return output;
}
auto stri_0 = expr_builder.constant(strides_[0]);
auto stri_1 = expr_builder.constant(strides_[1]);
auto one_value = expr_builder.constant(1);
auto v0_tmp = expr_builder.constant(-ksize_[0] + 2 * paddings_[0]);
auto v1_tmp = expr_builder.constant(-ksize_[1] + 2 * paddings_[1]);
auto ceil_tmp =
expr_builder.constant(-ksize_[0] + 2 * paddings_[0] + strides_[0] - 1);
auto ceil1_tmp =
expr_builder.constant(-ksize_[1] + 2 * paddings_[1] + strides_[1] - 1);
if (!ceil_mode_) {
output.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[2], *v0_tmp),
*stri_0),
*one_value);
output.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[3], *v1_tmp),
*stri_1),
*one_value);
} else {
output.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[2], *ceil_tmp),
*stri_0),
*one_value);
output.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[3], *ceil1_tmp),
*stri_1),
*one_value);
}
return output;
}
bool PoolPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
return ((in_out[pos].type == nvinfer1::DataType::kFLOAT) &&
in_out[pos].format == nvinfer1::PluginFormat::kLINEAR);
}
nvinfer1::DataType PoolPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index, 0,
platform::errors::InvalidArgument(
"The Pool Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT), true,
platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int h = input_dims.d[2];
int w = input_dims.d[3];
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
std::vector<int> input_shape, output_shape;
for (int i = 0; i < input_dims.nbDims; i++)
input_shape.push_back(input_dims.d[i]);
output_shape = input_shape;
std::vector<int> ksize = ksize_;
std::vector<int> paddings = paddings_;
if (is_global_) {
ksize[0] = h;
ksize[1] = w;
paddings[0] = 0;
paddings[1] = 0;
output_shape[2] = 1;
output_shape[3] = 1;
} else {
auto data_dim = CalcOutputSize({h, w}, ceil_mode_, adaptive_, ksize_,
strides_, paddings_);
output_shape[2] = data_dim[0];
output_shape[3] = data_dim[1];
}
if (adaptive_) {
output_shape[2] = h;
output_shape[3] = w;
}
if (pool_type_ == "max") {
phi::funcs::MaxPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
pool2d_forward;
pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
true, false, output, stream, pool_process);
} else if (pool_type_ == "avg") {
phi::funcs::AvgPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
pool2d_forward;
pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
exclusive_, adaptive_, output, stream, pool_process);
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 85ba00da890178b63a023c57baff2cac4942d0a6.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h"
#include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
nvinfer1::Dims PoolPlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index == 0);
assert(inputDims[0].nbDims == 3);
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
output_dims.d[1] = output_shape_[1];
output_dims.d[2] = output_shape_[2];
return output_dims;
}
size_t PoolPlugin::getSerializationSize() const TRT_NOEXCEPT {
return getBaseSerializationSize() + SerializedSize(ceil_mode_) +
SerializedSize(pool_type_) + SerializedSize(adaptive_) +
SerializedSize(exclusive_) + SerializedSize(ksize_) +
SerializedSize(strides_) + SerializedSize(paddings_) +
SerializedSize(real_paddings_) + SerializedSize(input_shape_) +
SerializedSize(output_shape_);
}
// TRT will call this func when we need to serialize the configuration of
// tensorrt.
void PoolPlugin::serialize(void *buffer) const TRT_NOEXCEPT {
serializeBase(buffer);
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_);
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, real_paddings_);
SerializeValue(&buffer, input_shape_);
SerializeValue(&buffer, output_shape_);
}
PoolPlugin *PoolPlugin::clone() const TRT_NOEXCEPT {
return new PoolPlugin(ceil_mode_, pool_type_, adaptive_, exclusive_, ksize_,
strides_, paddings_, input_shape_, real_paddings_);
}
int PoolPlugin::enqueue(int batchSize, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
#else
void *const *outputs, void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
#endif
auto const &input_dims = this->getInputDims(0);
int input_size = 0;
float const *idata = reinterpret_cast<float const *>(inputs[0]);
float *const *odatas = reinterpret_cast<float *const *>(outputs);
std::vector<int> input_shape = input_shape_;
std::vector<int> output_shape = output_shape_;
input_shape.insert(input_shape.begin(), batchSize);
output_shape.insert(output_shape.begin(), batchSize);
if (pool_type_ == PoolType::max) {
phi::funcs::MaxPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
pool2d_forward;
pool2d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, true, false, odatas[0], stream, pool_process);
} else if (pool_type_ == PoolType::avg) {
phi::funcs::AvgPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
pool2d_forward;
pool2d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, exclusive_, adaptive_, odatas[0], stream,
pool_process);
}
return cudaGetLastError() != cudaSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
PoolPluginDynamic::PoolPluginDynamic(void const *serialData,
size_t serialLength) {
DeserializeValue(&serialData, &serialLength, &ceil_mode_);
const char *pool_type;
DeserializeValue(&serialData, &serialLength, &pool_type);
pool_type_ = std::string(pool_type);
DeserializeValue(&serialData, &serialLength, &adaptive_);
DeserializeValue(&serialData, &serialLength, &exclusive_);
DeserializeValue(&serialData, &serialLength, &ksize_);
DeserializeValue(&serialData, &serialLength, &strides_);
DeserializeValue(&serialData, &serialLength, &paddings_);
DeserializeValue(&serialData, &serialLength, &is_global_);
}
size_t PoolPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(ceil_mode_) + SerializedSize(pool_type_.c_str()) +
SerializedSize(adaptive_) + SerializedSize(exclusive_) +
SerializedSize(ksize_) + SerializedSize(strides_) +
SerializedSize(paddings_) + SerializedSize(is_global_);
}
void PoolPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_.c_str());
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, is_global_);
}
nvinfer1::IPluginV2DynamicExt *PoolPluginDynamic::clone() const TRT_NOEXCEPT {
return new PoolPluginDynamic(ceil_mode_, pool_type_, adaptive_, exclusive_,
ksize_, strides_, paddings_, is_global_);
}
nvinfer1::DimsExprs PoolPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nb_inputs, 1,
platform::errors::InvalidArgument(
"The Split plugin should be only one input."));
PADDLE_ENFORCE_EQ(
inputs[0].d[1]->isConstant(), true,
platform::errors::InvalidArgument("The channel dimension should be "
"static, but we found it's dynamic."));
nvinfer1::DimsExprs output(inputs[0]);
if (is_global_ && !adaptive_) {
output.d[2] = expr_builder.constant(1);
output.d[3] = expr_builder.constant(1);
return output;
}
if (is_global_ && adaptive_) {
return inputs[0];
}
if (adaptive_) {
output.d[2] = expr_builder.constant(ksize_[0]);
output.d[3] = expr_builder.constant(ksize_[1]);
return output;
}
auto stri_0 = expr_builder.constant(strides_[0]);
auto stri_1 = expr_builder.constant(strides_[1]);
auto one_value = expr_builder.constant(1);
auto v0_tmp = expr_builder.constant(-ksize_[0] + 2 * paddings_[0]);
auto v1_tmp = expr_builder.constant(-ksize_[1] + 2 * paddings_[1]);
auto ceil_tmp =
expr_builder.constant(-ksize_[0] + 2 * paddings_[0] + strides_[0] - 1);
auto ceil1_tmp =
expr_builder.constant(-ksize_[1] + 2 * paddings_[1] + strides_[1] - 1);
if (!ceil_mode_) {
output.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[2], *v0_tmp),
*stri_0),
*one_value);
output.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[3], *v1_tmp),
*stri_1),
*one_value);
} else {
output.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[2], *ceil_tmp),
*stri_0),
*one_value);
output.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[3], *ceil1_tmp),
*stri_1),
*one_value);
}
return output;
}
bool PoolPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
return ((in_out[pos].type == nvinfer1::DataType::kFLOAT) &&
in_out[pos].format == nvinfer1::PluginFormat::kLINEAR);
}
nvinfer1::DataType PoolPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index, 0,
platform::errors::InvalidArgument(
"The Pool Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT), true,
platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int h = input_dims.d[2];
int w = input_dims.d[3];
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
std::vector<int> input_shape, output_shape;
for (int i = 0; i < input_dims.nbDims; i++)
input_shape.push_back(input_dims.d[i]);
output_shape = input_shape;
std::vector<int> ksize = ksize_;
std::vector<int> paddings = paddings_;
if (is_global_) {
ksize[0] = h;
ksize[1] = w;
paddings[0] = 0;
paddings[1] = 0;
output_shape[2] = 1;
output_shape[3] = 1;
} else {
auto data_dim = CalcOutputSize({h, w}, ceil_mode_, adaptive_, ksize_,
strides_, paddings_);
output_shape[2] = data_dim[0];
output_shape[3] = data_dim[1];
}
if (adaptive_) {
output_shape[2] = h;
output_shape[3] = w;
}
if (pool_type_ == "max") {
phi::funcs::MaxPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
pool2d_forward;
pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
true, false, output, stream, pool_process);
} else if (pool_type_ == "avg") {
phi::funcs::AvgPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
pool2d_forward;
pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
exclusive_, adaptive_, output, stream, pool_process);
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
98d15f9eee9bbbf1260d8b4acd3421bea2343343.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <complex>
#include <cmath>
#include <SFML/Graphics.hpp>
#include <vector>
#include <regex>
using namespace std;
#include "Ensemble.cu"
__global__ void lance_calcul(Mandelbrot m, sf::Uint8 *p,int w, int h,int *b)
{
m.calcul(p,w,h,b);
}
__global__ void lance_calcul(Julia j, sf::Uint8 *p,int w, int h,int *b)
{
j.calcul(p,w,h,b);
}
void savePicture(Ensemble *e,int w, int h, sf::Uint8* pixels, int*b)
{
int index_b = 0;
for (int i = 0; i < w*h*4 ; i+=4)
{
pixels[i] = 0;
pixels[i+1] = 0;
pixels[i+2] = b[index_b];
pixels[i+3] = 255;
++index_b;
}
e->saveImage(w,h,pixels);
}
vector<string> split(const string& input, const string& regex) {
std::regex re(regex);
std::sregex_token_iterator
first{input.begin(), input.end(), re, -1},
last;
return {first, last};
}
fstream f;
template <typename T>
void lectureFichier(int& w, int&h, vector<T>& v, int it_max)
{
vector<T> result;
string line;
string delimiter ="=";
getline (f,line);
w = stoi(split(line,delimiter)[1]);
getline (f,line);
h = stoi(split(line,delimiter)[1]);
int id_frame,i;
float x,y,zoom;
zoom = 3;
i = 0;
while ( getline (f,line) )
{
string s(line);
if( !s.empty() )
{
if (i==0) { id_frame = stoi(split(s,delimiter)[1]); ++i;}
else if (i==1) { x = stof(split(s,delimiter)[1]); ++i; }
else if (i==2) { y = stof(split(s,delimiter)[1]); ++i; }
}
else
{
i = 0;
v.push_back(T(x,y,it_max,zoom,id_frame));
zoom-=0.2f;
}
}
if ( f.eof() )
{
v.push_back(T(x,y,it_max,zoom,id_frame));
}
}
int main(void)
{
//const int width = 1024;
//const int height = 1024;
const int iteration_max = 1000;
const string file_m ="config_mandelbrot.txt";
const string file_j ="config_julia.txt";
int width = 0;
int height = 0;
// lecture fichier config de Mandelbrot
f.open(file_m);
if ( !f.good()) throw runtime_error("impossible ouvrir fichier config");
vector<Mandelbrot> list_mandelbrot{};
lectureFichier<Mandelbrot>(width,height,list_mandelbrot,iteration_max);
f.close();
// lectur du fichier config de Julia
f.open(file_j);
if ( !f.good()) throw runtime_error("impossible ouvrir fichier config");
vector<Julia> list_julia;
lectureFichier<Julia>(width,height,list_julia,iteration_max);
f.close();
sf::Uint8 *pixels,*d_pixels;
int *b,*d_b;
pixels = new sf::Uint8[width*height*4];
b = new int[width * height];
hipMalloc(&d_pixels,sizeof(sf::Uint8) * width * height);
hipMalloc(&d_b,sizeof(int) * width * height);
dim3 bloc(16, 16);
dim3 grille(width / bloc.x, height / bloc.y);
// JULIA //
for ( int i = 0 ; i < list_julia.size() ; ++i)
{
hipLaunchKernelGGL(( lance_calcul), dim3(grille),dim3(bloc), 0, 0, list_julia[i],d_pixels,width,height,d_b);
printf("CUDA error: %s\n", hipGetErrorString(hipGetLastError()));
hipDeviceSynchronize();
//hipMemcpy(pixels,d_pixels,sizeof(sf::Uint8) * width * height,hipMemcpyDeviceToHost);
hipMemcpy(b,d_b,sizeof(int) * width * height,hipMemcpyDeviceToHost);
savePicture(&list_julia[i],width,height,pixels,b);
}
// creation video
system("ffmpeg -y -r 1 -i Resultat/Julia%d.png julia.mp4");
// MANDELBROT //
for ( int i = 0 ; i < list_mandelbrot.size() ; ++i)
{
hipLaunchKernelGGL(( lance_calcul), dim3(grille),dim3(bloc), 0, 0, list_mandelbrot[i],d_pixels,width,height,d_b);
printf("CUDA error: %s\n", hipGetErrorString(hipGetLastError()));
hipDeviceSynchronize();
hipMemcpy(b,d_b,sizeof(int) * width * height,hipMemcpyDeviceToHost);
savePicture(&list_mandelbrot[i],width,height,pixels,b);
}
// creation video
system("ffmpeg -y -r 1 -i Resultat/Mandelbrot%d.png mandelbrot.mp4");
hipDeviceReset();
delete [] b;
delete [] pixels;
hipFree(d_pixels);
hipFree(d_b);
cout << "\n\nDeux vidos ont t gnrs dans le dossier courant:\n-mandelbrot.mp4 avec un zoom\n-julia.mp4 avec diffrentes valeurs de c\n\n" << endl;
return 0;
}
| 98d15f9eee9bbbf1260d8b4acd3421bea2343343.cu | #include <iostream>
#include <fstream>
#include <complex>
#include <cmath>
#include <SFML/Graphics.hpp>
#include <vector>
#include <regex>
using namespace std;
#include "Ensemble.cu"
__global__ void lance_calcul(Mandelbrot m, sf::Uint8 *p,int w, int h,int *b)
{
m.calcul(p,w,h,b);
}
__global__ void lance_calcul(Julia j, sf::Uint8 *p,int w, int h,int *b)
{
j.calcul(p,w,h,b);
}
void savePicture(Ensemble *e,int w, int h, sf::Uint8* pixels, int*b)
{
int index_b = 0;
for (int i = 0; i < w*h*4 ; i+=4)
{
pixels[i] = 0;
pixels[i+1] = 0;
pixels[i+2] = b[index_b];
pixels[i+3] = 255;
++index_b;
}
e->saveImage(w,h,pixels);
}
vector<string> split(const string& input, const string& regex) {
std::regex re(regex);
std::sregex_token_iterator
first{input.begin(), input.end(), re, -1},
last;
return {first, last};
}
fstream f;
template <typename T>
void lectureFichier(int& w, int&h, vector<T>& v, int it_max)
{
vector<T> result;
string line;
string delimiter ="=";
getline (f,line);
w = stoi(split(line,delimiter)[1]);
getline (f,line);
h = stoi(split(line,delimiter)[1]);
int id_frame,i;
float x,y,zoom;
zoom = 3;
i = 0;
while ( getline (f,line) )
{
string s(line);
if( !s.empty() )
{
if (i==0) { id_frame = stoi(split(s,delimiter)[1]); ++i;}
else if (i==1) { x = stof(split(s,delimiter)[1]); ++i; }
else if (i==2) { y = stof(split(s,delimiter)[1]); ++i; }
}
else
{
i = 0;
v.push_back(T(x,y,it_max,zoom,id_frame));
zoom-=0.2f;
}
}
if ( f.eof() )
{
v.push_back(T(x,y,it_max,zoom,id_frame));
}
}
int main(void)
{
//const int width = 1024;
//const int height = 1024;
const int iteration_max = 1000;
const string file_m ="config_mandelbrot.txt";
const string file_j ="config_julia.txt";
int width = 0;
int height = 0;
// lecture fichier config de Mandelbrot
f.open(file_m);
if ( !f.good()) throw runtime_error("impossible ouvrir fichier config");
vector<Mandelbrot> list_mandelbrot{};
lectureFichier<Mandelbrot>(width,height,list_mandelbrot,iteration_max);
f.close();
// lectur du fichier config de Julia
f.open(file_j);
if ( !f.good()) throw runtime_error("impossible ouvrir fichier config");
vector<Julia> list_julia;
lectureFichier<Julia>(width,height,list_julia,iteration_max);
f.close();
sf::Uint8 *pixels,*d_pixels;
int *b,*d_b;
pixels = new sf::Uint8[width*height*4];
b = new int[width * height];
cudaMalloc(&d_pixels,sizeof(sf::Uint8) * width * height);
cudaMalloc(&d_b,sizeof(int) * width * height);
dim3 bloc(16, 16);
dim3 grille(width / bloc.x, height / bloc.y);
// JULIA //
for ( int i = 0 ; i < list_julia.size() ; ++i)
{
lance_calcul<<<grille,bloc>>>(list_julia[i],d_pixels,width,height,d_b);
printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaDeviceSynchronize();
//cudaMemcpy(pixels,d_pixels,sizeof(sf::Uint8) * width * height,cudaMemcpyDeviceToHost);
cudaMemcpy(b,d_b,sizeof(int) * width * height,cudaMemcpyDeviceToHost);
savePicture(&list_julia[i],width,height,pixels,b);
}
// creation video
system("ffmpeg -y -r 1 -i Resultat/Julia%d.png julia.mp4");
// MANDELBROT //
for ( int i = 0 ; i < list_mandelbrot.size() ; ++i)
{
lance_calcul<<<grille,bloc>>>(list_mandelbrot[i],d_pixels,width,height,d_b);
printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaDeviceSynchronize();
cudaMemcpy(b,d_b,sizeof(int) * width * height,cudaMemcpyDeviceToHost);
savePicture(&list_mandelbrot[i],width,height,pixels,b);
}
// creation video
system("ffmpeg -y -r 1 -i Resultat/Mandelbrot%d.png mandelbrot.mp4");
cudaDeviceReset();
delete [] b;
delete [] pixels;
cudaFree(d_pixels);
cudaFree(d_b);
cout << "\n\nDeux vidéos ont été générés dans le dossier courant:\n-mandelbrot.mp4 avec un zoom\n-julia.mp4 avec différentes valeurs de c\n\n" << endl;
return 0;
}
|
9a570b328cf4a3030906cf1a02425824d4a7aa9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "SOR_kernel.hu"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<sys/time.h>
#include<sys/stat.h>
#include<fcntl.h>
#include<string.h>
#include<errno.h>
const int n1 = 4096, n2 = 4096;
const int nn1 = 4108, nn2 = 4108;
void SOR(int len1, int len2, int arr1[nn1][nn2], int arr2[nn1][nn2], int padd, int trial){
struct timeval tbegin, tend;
gettimeofday(&tbegin, NULL);
#define ppcg_min(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x < _y ? _x : _y; })
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
if (trial >= 1 && len1 >= padd + 1 && len2 >= padd + 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
int *dev_arr1;
int *dev_arr2;
cudaCheckReturn(hipMalloc((void **) &dev_arr1, (len1 + 3) * (4108) * sizeof(int)));
cudaCheckReturn(hipMalloc((void **) &dev_arr2, (len1 + 3) * (4108) * sizeof(int)));
if (padd <= 4110) {
cudaCheckReturn(hipMemcpy(dev_arr1, arr1, (len1 + 3) * (4108) * sizeof(int), hipMemcpyHostToDevice));
cudaCheckReturn(hipMemcpy(dev_arr2, arr2, (len1 + 3) * (4108) * sizeof(int), hipMemcpyHostToDevice));
}
struct timeval t1, t2;
gettimeofday(&t1, NULL);
for (int c0 = padd; c0 < 6 * trial + len1 - 3; c0 += 1) {
if (6 * trial + len1 >= c0 + 7 && c0 + 6 * ppcg_fdiv_q(len1 - c0 - 1, 6) >= padd)
{
dim3 k0_dimBlock(16, 31);
dim3 k0_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), ppcg_min(256, (trial + 31) / 32));
hipLaunchKernelGGL(( kernel0) , dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_arr1, dev_arr2, trial, padd, len1, len2, c0);
cudaCheckKernel();
}
hipDeviceSynchronize();
if (c0 >= padd + 3 && c0 + 6 * ppcg_fdiv_q(len1 - c0 + 2, 6) >= padd + 3)
{
dim3 k1_dimBlock(16, 31);
dim3 k1_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), ppcg_min(256, (trial + 31) / 32));
hipLaunchKernelGGL(( kernel1) , dim3(k1_dimGrid), dim3(k1_dimBlock), 0, 0, dev_arr1, dev_arr2, trial, padd, len1, len2, c0);
cudaCheckKernel();
}
}
hipDeviceSynchronize();
gettimeofday(&t2, NULL);
double t3 = (double)(t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
printf("execution time: %lf\n", t3);
if (padd <= 4110) {
cudaCheckReturn(hipMemcpy(arr1, dev_arr1, (len1 + 3) * (4108) * sizeof(int), hipMemcpyDeviceToHost));
cudaCheckReturn(hipMemcpy(arr2, dev_arr2, (len1 + 3) * (4108) * sizeof(int), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_arr1));
cudaCheckReturn(hipFree(dev_arr2));
}
gettimeofday(&tend, NULL);
double tt = (double)(tend.tv_sec - tbegin.tv_sec) + (double)(tend.tv_usec - tbegin.tv_usec) / 1000000.0;
printf("execution time: %lf s\n", tt);
}
int main(){
int trial = 64;
int padd = 6;
static int arr1[nn1][nn2];
static int arr2[nn1][nn2];
for (int row = 0; row < nn1; row++){
for (int col = 0; col < nn2; col++){
arr1[row][col] = rand() % 100;
arr2[row][col] = arr1[row][col];
}
}
SOR(n1 + padd, n2 + padd, arr1, arr2, padd, trial);
return 0;
}
| 9a570b328cf4a3030906cf1a02425824d4a7aa9b.cu | #include <assert.h>
#include <stdio.h>
#include "SOR_kernel.hu"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<sys/time.h>
#include<sys/stat.h>
#include<fcntl.h>
#include<string.h>
#include<errno.h>
const int n1 = 4096, n2 = 4096;
const int nn1 = 4108, nn2 = 4108;
void SOR(int len1, int len2, int arr1[nn1][nn2], int arr2[nn1][nn2], int padd, int trial){
struct timeval tbegin, tend;
gettimeofday(&tbegin, NULL);
#define ppcg_min(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x < _y ? _x : _y; })
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
if (trial >= 1 && len1 >= padd + 1 && len2 >= padd + 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
int *dev_arr1;
int *dev_arr2;
cudaCheckReturn(cudaMalloc((void **) &dev_arr1, (len1 + 3) * (4108) * sizeof(int)));
cudaCheckReturn(cudaMalloc((void **) &dev_arr2, (len1 + 3) * (4108) * sizeof(int)));
if (padd <= 4110) {
cudaCheckReturn(cudaMemcpy(dev_arr1, arr1, (len1 + 3) * (4108) * sizeof(int), cudaMemcpyHostToDevice));
cudaCheckReturn(cudaMemcpy(dev_arr2, arr2, (len1 + 3) * (4108) * sizeof(int), cudaMemcpyHostToDevice));
}
struct timeval t1, t2;
gettimeofday(&t1, NULL);
for (int c0 = padd; c0 < 6 * trial + len1 - 3; c0 += 1) {
if (6 * trial + len1 >= c0 + 7 && c0 + 6 * ppcg_fdiv_q(len1 - c0 - 1, 6) >= padd)
{
dim3 k0_dimBlock(16, 31);
dim3 k0_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), ppcg_min(256, (trial + 31) / 32));
kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_arr1, dev_arr2, trial, padd, len1, len2, c0);
cudaCheckKernel();
}
cudaDeviceSynchronize();
if (c0 >= padd + 3 && c0 + 6 * ppcg_fdiv_q(len1 - c0 + 2, 6) >= padd + 3)
{
dim3 k1_dimBlock(16, 31);
dim3 k1_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), ppcg_min(256, (trial + 31) / 32));
kernel1 <<<k1_dimGrid, k1_dimBlock>>> (dev_arr1, dev_arr2, trial, padd, len1, len2, c0);
cudaCheckKernel();
}
}
cudaDeviceSynchronize();
gettimeofday(&t2, NULL);
double t3 = (double)(t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
printf("execution time: %lf\n", t3);
if (padd <= 4110) {
cudaCheckReturn(cudaMemcpy(arr1, dev_arr1, (len1 + 3) * (4108) * sizeof(int), cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaMemcpy(arr2, dev_arr2, (len1 + 3) * (4108) * sizeof(int), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_arr1));
cudaCheckReturn(cudaFree(dev_arr2));
}
gettimeofday(&tend, NULL);
double tt = (double)(tend.tv_sec - tbegin.tv_sec) + (double)(tend.tv_usec - tbegin.tv_usec) / 1000000.0;
printf("execution time: %lf s\n", tt);
}
int main(){
int trial = 64;
int padd = 6;
static int arr1[nn1][nn2];
static int arr2[nn1][nn2];
for (int row = 0; row < nn1; row++){
for (int col = 0; col < nn2; col++){
arr1[row][col] = rand() % 100;
arr2[row][col] = arr1[row][col];
}
}
SOR(n1 + padd, n2 + padd, arr1, arr2, padd, trial);
return 0;
}
|
c29c6d0542e57aa3df52fc7b960d9cf4f678c6bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
int BLOCK_SIZE;
__global__ void pivot_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int dist_block[];
int i = threadIdx.x;
int j = threadIdx.y;
int x = i + k * BLOCK_SIZE, y = j + k * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[x * N + y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_col_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
r += (r >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + pivot_y];
dist_pivot[j * BLOCK_SIZE + i] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_pivot[j * BLOCK_SIZE + s]);
__syncthreads();
}
dist_matrix[x * N + pivot_y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_row_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int c = blockIdx.x;
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int y = j + c * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + y];
dist_pivot[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_pivot[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[pivot_x * N + y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_col_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
r += (r >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + pivot_y];
dist_pivot[j * BLOCK_SIZE + i] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_pivot[j * BLOCK_SIZE + s]);
__syncthreads();
}
dist_matrix[x * N + pivot_y] = dist_block[i * BLOCK_SIZE + j];
pivot_col_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_pivot[i * BLOCK_SIZE + j];
pivot_col_matrix[r * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_row_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int *pivot_row_matrix){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int c = blockIdx.x;
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int y = j + c * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + y];
dist_pivot[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_pivot[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[pivot_x * N + y] = dist_block[i * BLOCK_SIZE + j];
pivot_row_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_pivot[i * BLOCK_SIZE + j];
pivot_row_matrix[c * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void res_floyed_full(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
int c = blockIdx.y;
r += (r >= k);
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = dist_matrix[x * N + pivot_y];
dist_pivot_col[i * (BLOCK_SIZE) + j] = dist_matrix[pivot_x * N + y];
__syncthreads();
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(dist_matrix[x * N + y], res);
}
__global__ void res_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int divide_line){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
int c = blockIdx.y;
if (r < divide_line && c < divide_line)
return;
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = dist_matrix[x * N + pivot_y];
dist_pivot_col[i * (BLOCK_SIZE) + j] = dist_matrix[pivot_x * N + y];
__syncthreads();
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(dist_matrix[x * N + y], res);
}
__global__ void res_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int row_offset, int col_offset){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x + row_offset;
int c = blockIdx.y + col_offset;
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = dist_matrix[x * N + pivot_y];
dist_pivot_col[i * (BLOCK_SIZE) + j] = dist_matrix[pivot_x * N + y];
__syncthreads();
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(dist_matrix[x * N + y], res);
}
__global__ void res_floyed_slave_ul(int *dist_matrix, int N, int k, int BLOCK_SIZE, int row_offset,
int *pivot_row_matrix, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
int c = blockIdx.y;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = pivot_col_matrix[r * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
dist_pivot_col[i * (BLOCK_SIZE) + j] = pivot_row_matrix[c * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
__syncthreads();
if (r == k){
dist_matrix[x * N + y] = dist_pivot_col[i * (BLOCK_SIZE) + j];
return;
}
else if (c == k){
dist_matrix[x * N + y] = dist_pivot_row[j * (BLOCK_SIZE) + i];
return;
}
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(res, dist_matrix[x * N + y]);
}
__global__ void res_floyed_slave(int *dist_matrix, int N, int k, int BLOCK_SIZE, int row_offset,
int *pivot_row_matrix, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x + row_offset;
int c = blockIdx.y;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = pivot_col_matrix[r * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
dist_pivot_col[i * (BLOCK_SIZE) + j] = pivot_row_matrix[c * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
__syncthreads();
if (r == k){
dist_matrix[x * N + y] = dist_pivot_col[i * (BLOCK_SIZE) + j];
return;
}
else if (c == k){
dist_matrix[x * N + y] = dist_pivot_row[j * (BLOCK_SIZE) + i];
return;
}
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = res;
}
__global__ void checkmin(int *dist_matrix, int *dist_matrix2, int N, int BLOCK_SIZE){
int id = (threadIdx.x + (blockIdx.x) * BLOCK_SIZE) * N + (blockIdx.y * BLOCK_SIZE + threadIdx.y);
int res = dist_matrix2[id];
if (res < dist_matrix[id])
dist_matrix[id] = res;
}
void output(int n, int N, int* dist_matrix){
int i, j;
printf("=====\n");
for (i = 0; i < n; ++i)
for (j = 0; j < n; ++j)
if (dist_matrix[i * N + j] < N * 101)
printf((j + 1 < n) ? "%d " : "%d\n", dist_matrix[i * N + j]);
else
printf((j + 1 < n) ? "INF " : "INF\n");
}
void output_pivot(int n, int N, int BLOCK_SIZE, int* pivot_matrix){
int i, j, k;
printf("-----\n");
for (k = 0; k < N / BLOCK_SIZE; ++k){
for (i = 0; i < BLOCK_SIZE; ++i){
for (j = 0; j < BLOCK_SIZE; ++j)
printf("%d ", pivot_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j]);
printf("\n");
}
}
}
#define INPUT_BUF_SIZE 1000000000
#define OUTPUT_BUF_SIZE 1000000000
char input_buf[INPUT_BUF_SIZE], output_buf[OUTPUT_BUF_SIZE];
int input_cur_pt, output_cur_pt;
void bufReRead(){
printf("new read\n");
int len = fread(input_buf, 1, INPUT_BUF_SIZE, stdin);
if (len < INPUT_BUF_SIZE)
input_buf[len] = '\0';
input_cur_pt = 0;
}
int getIntFromBuf(){
char x = ' ';
while (!(x >= '0' && x <= '9')){
x = input_buf[input_cur_pt ++];
if (input_cur_pt == INPUT_BUF_SIZE)
bufReRead();
}
int ret = 0;
while (x >= '0' && x <= '9'){
ret = ret * 10 + x - '0';
x = input_buf[input_cur_pt ++];
if (input_cur_pt == INPUT_BUF_SIZE)
bufReRead();
}
return ret;
}
void putIntToBuf(int x){
if (x == 0){
output_buf[output_cur_pt++] = '0';
return;
}
int len = 0;
int out[8];
memset(out, 0, sizeof out);
for (; ; ){
int t = x / 10;
out[++len] = x - t * 10;
x = t;
if (x == 0) break;
}
for (int i = len; i >= 1; --i)
output_buf[output_cur_pt++] = out[i] + '0';
}
int main(int argc, char** argv){
char *input_filename = argv[1];
char *output_filename = argv[2];
BLOCK_SIZE = atoi(argv[3]);
BLOCK_SIZE = min(BLOCK_SIZE, 32);
/* input & output device */
input_cur_pt = 0;
output_cur_pt = 0;
freopen(input_filename, "r", stdin);
int len = fread(input_buf, 1, INPUT_BUF_SIZE, stdin);
if (len < INPUT_BUF_SIZE)
input_buf[len] = '\0';
/*
FOR CUDA
if (BLOCK_SIZE < 32 && BLOCK_SIZE >= 24) BLOCK_SIZE = 24;
if (BLOCK_SIZE < 24 && BLOCK_SIZE >= 16) BLOCK_SIZE = 16;
if (BLOCK_SIZE < 16 && BLOCK_SIZE >= 8) BLOCK_SIZE = 8;
if (BLOCK_SIZE < 8) BLOCK_SIZE = 8;
*/
int i, j;
int n, m;
/*scanf("%d%d", &n, &m);*/
n = getIntFromBuf();
m = getIntFromBuf();
/* Padding */
int num_blocks = n / BLOCK_SIZE;
if (num_blocks * BLOCK_SIZE < n)
num_blocks ++;
int N = num_blocks * BLOCK_SIZE;
int* dist_matrix = (int*)malloc(sizeof(int) * N * N);
/* read in data */
for (i = 0; i < N * N; ++i)
dist_matrix[i] = N * 101;
for (i = 0; i < N; ++i)
dist_matrix[i * N + i] = 0;
for (i = 0; i < m; ++i){
int x, y, w;
/*scanf("%d%d%d", &x, &y, &w);*/
x = getIntFromBuf();
y = getIntFromBuf();
w = getIntFromBuf();
x--;
y--;
if (dist_matrix[x * N + y] > w)
dist_matrix[x * N + y] = w;
}
int* d_dist_matrix;
int* d_pivot_row;
int* d_pivot_col;
int* vd_dist_matrix;
int* vd_pivot_row;
int* vd_pivot_col;
int* d_bak_matrix;
int size = sizeof(int) * N * N;
int pivot_line_size = sizeof(int) * N * BLOCK_SIZE;
int *pivot_row = (int*)malloc(pivot_line_size);
int *pivot_col = (int*)malloc(pivot_line_size);
/* block ASPA */
hipStream_t stream[2];
hipEvent_t fin, fin0[num_blocks];
hipSetDevice(0);
hipStreamCreate(&stream[0]);
for (i = 0; i < num_blocks; ++i)
hipEventCreate(&fin0[i]);
hipMalloc((void**)&d_dist_matrix, size);
hipMalloc((void**)&d_pivot_row, pivot_line_size);
hipMalloc((void**)&d_pivot_col, pivot_line_size);
hipMalloc((void**)&d_bak_matrix, size);
hipMemcpy(d_dist_matrix, dist_matrix, size, hipMemcpyHostToDevice);
hipSetDevice(1);
hipStreamCreate(&stream[1]);
hipEventCreate(&fin);
hipMalloc((void**)&vd_dist_matrix, size);
hipMalloc((void**)&vd_pivot_row, pivot_line_size);
hipMalloc((void**)&vd_pivot_col, pivot_line_size);
hipMemcpy(vd_dist_matrix, dist_matrix, size, hipMemcpyHostToDevice);
hipDeviceEnablePeerAccess(0, 0);
hipSetDevice(0);
hipDeviceEnablePeerAccess(1, 0);
if (num_blocks > 4){
int divide_line = num_blocks / sqrt(2);
for (i = 0; i < num_blocks; ++i){
/* phrase #1: self dependent blocks */
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
int master_task = (num_blocks - 1) * 0.70;
int num_row_blocks = master_task + (i <= master_task);
int slave_task = num_blocks - num_row_blocks;
dim3 blockPerGrid_master(num_blocks - 1, num_blocks - 1);
hipSetDevice(0);
hipLaunchKernelGGL(( pivot_floyed), dim3(1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE), stream[0], d_dist_matrix, N, i, BLOCK_SIZE);
/* phrase #2: pivot row & col blocks */
hipLaunchKernelGGL(( pivot_row_floyed), dim3(num_blocks - 1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0], d_dist_matrix, N, i, BLOCK_SIZE, d_pivot_row);
hipLaunchKernelGGL(( pivot_col_floyed), dim3(num_blocks - 1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0], d_dist_matrix, N, i, BLOCK_SIZE, d_pivot_col);
hipMemcpyPeerAsync(vd_pivot_col, 1, d_pivot_col, 0, pivot_line_size, stream[0]);
hipMemcpyPeerAsync(vd_pivot_row, 1, d_pivot_row, 0, pivot_line_size, stream[0]);
hipEventRecord(fin0[i], stream[0]);
/* phrase #3: other blocks */
if (i + 1 < num_blocks){
hipLaunchKernelGGL(( res_floyed), dim3(dim3(num_blocks - i - 1, num_blocks - i - 1)), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0], d_dist_matrix, N, i, BLOCK_SIZE, i + 1, i + 1);
if (i > 0){
hipLaunchKernelGGL(( res_floyed), dim3(dim3(i, num_blocks - i - 1)), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0], d_dist_matrix, N, i, BLOCK_SIZE, 0, i + 1);
hipLaunchKernelGGL(( res_floyed), dim3(dim3(num_blocks - i - 1, i)), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0], d_dist_matrix, N, i, BLOCK_SIZE, i + 1, 0);
}
}
if (i > divide_line && i > 0){
hipLaunchKernelGGL(( res_floyed), dim3(dim3(i, i)), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0], d_dist_matrix, N, i, BLOCK_SIZE, divide_line);
}
hipSetDevice(1);
hipStreamWaitEvent(stream[1], fin0[i], 0);
if (i > 0){
dim3 blockPerGrid_slave(min(i, divide_line), min(i, divide_line));
hipLaunchKernelGGL(( res_floyed_slave_ul), dim3(blockPerGrid_slave), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[1], vd_dist_matrix, N, i, BLOCK_SIZE, num_row_blocks,
vd_pivot_row, vd_pivot_col);
}
}
hipSetDevice(1);
hipMemcpyPeerAsync(d_bak_matrix, 0, vd_dist_matrix, 1, size, stream[1]);
hipEventRecord(fin, stream[1]);
hipSetDevice(0);
dim3 blockPerGrid_slave(num_blocks, num_blocks);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
hipStreamWaitEvent(stream[0], fin, 0);
hipLaunchKernelGGL(( checkmin), dim3(blockPerGrid_slave), dim3(threadsPerBlock), 0, stream[0], d_dist_matrix, d_bak_matrix, N, BLOCK_SIZE);
}
else
for (i = 0; i < num_blocks; ++i){
/* phrase #1: self dependent blocks */
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blockPerGrid(num_blocks - 1, num_blocks - 1);
hipLaunchKernelGGL(( pivot_floyed), dim3(1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE), 0, d_dist_matrix, N, i, BLOCK_SIZE);
if (num_blocks > 1){
/* phrase #2: pivot row & col blocks */
hipLaunchKernelGGL(( pivot_row_floyed), dim3(num_blocks - 1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, 0, d_dist_matrix, N, i, BLOCK_SIZE);
hipLaunchKernelGGL(( pivot_col_floyed), dim3(num_blocks - 1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, 0, d_dist_matrix, N, i, BLOCK_SIZE);
/* phrase #3: other blocks */
hipLaunchKernelGGL(( res_floyed_full), dim3(blockPerGrid), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, 0, d_dist_matrix, N, i, BLOCK_SIZE);
}
}
hipMemcpy(dist_matrix, d_dist_matrix, size, hipMemcpyDeviceToHost);
freopen(output_filename, "w", stdout);
for (i = 0; i < n; ++i){
for (j = 0; j < n; ++j){
if (dist_matrix[i * N + j] < N * 101)
putIntToBuf(dist_matrix[i * N + j]);
else{
output_buf[output_cur_pt++] = 'I';
output_buf[output_cur_pt++] = 'N';
output_buf[output_cur_pt++] = 'F';
}
output_buf[output_cur_pt++] = ' ';
}
output_buf[output_cur_pt++] = '\n';
}
fwrite(output_buf, 1, output_cur_pt, stdout);
}
| c29c6d0542e57aa3df52fc7b960d9cf4f678c6bb.cu | #include <stdio.h>
#include <stdlib.h>
int BLOCK_SIZE;
__global__ void pivot_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int dist_block[];
int i = threadIdx.x;
int j = threadIdx.y;
int x = i + k * BLOCK_SIZE, y = j + k * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[x * N + y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_col_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
r += (r >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + pivot_y];
dist_pivot[j * BLOCK_SIZE + i] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_pivot[j * BLOCK_SIZE + s]);
__syncthreads();
}
dist_matrix[x * N + pivot_y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_row_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int c = blockIdx.x;
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int y = j + c * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + y];
dist_pivot[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_pivot[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[pivot_x * N + y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_col_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
r += (r >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + pivot_y];
dist_pivot[j * BLOCK_SIZE + i] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_pivot[j * BLOCK_SIZE + s]);
__syncthreads();
}
dist_matrix[x * N + pivot_y] = dist_block[i * BLOCK_SIZE + j];
pivot_col_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_pivot[i * BLOCK_SIZE + j];
pivot_col_matrix[r * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_row_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int *pivot_row_matrix){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int c = blockIdx.x;
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int y = j + c * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + y];
dist_pivot[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_pivot[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[pivot_x * N + y] = dist_block[i * BLOCK_SIZE + j];
pivot_row_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_pivot[i * BLOCK_SIZE + j];
pivot_row_matrix[c * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void res_floyed_full(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
int c = blockIdx.y;
r += (r >= k);
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = dist_matrix[x * N + pivot_y];
dist_pivot_col[i * (BLOCK_SIZE) + j] = dist_matrix[pivot_x * N + y];
__syncthreads();
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(dist_matrix[x * N + y], res);
}
__global__ void res_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int divide_line){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
int c = blockIdx.y;
if (r < divide_line && c < divide_line)
return;
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = dist_matrix[x * N + pivot_y];
dist_pivot_col[i * (BLOCK_SIZE) + j] = dist_matrix[pivot_x * N + y];
__syncthreads();
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(dist_matrix[x * N + y], res);
}
__global__ void res_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int row_offset, int col_offset){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x + row_offset;
int c = blockIdx.y + col_offset;
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = dist_matrix[x * N + pivot_y];
dist_pivot_col[i * (BLOCK_SIZE) + j] = dist_matrix[pivot_x * N + y];
__syncthreads();
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(dist_matrix[x * N + y], res);
}
__global__ void res_floyed_slave_ul(int *dist_matrix, int N, int k, int BLOCK_SIZE, int row_offset,
int *pivot_row_matrix, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
int c = blockIdx.y;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = pivot_col_matrix[r * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
dist_pivot_col[i * (BLOCK_SIZE) + j] = pivot_row_matrix[c * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
__syncthreads();
if (r == k){
dist_matrix[x * N + y] = dist_pivot_col[i * (BLOCK_SIZE) + j];
return;
}
else if (c == k){
dist_matrix[x * N + y] = dist_pivot_row[j * (BLOCK_SIZE) + i];
return;
}
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(res, dist_matrix[x * N + y]);
}
__global__ void res_floyed_slave(int *dist_matrix, int N, int k, int BLOCK_SIZE, int row_offset,
int *pivot_row_matrix, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x + row_offset;
int c = blockIdx.y;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = pivot_col_matrix[r * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
dist_pivot_col[i * (BLOCK_SIZE) + j] = pivot_row_matrix[c * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
__syncthreads();
if (r == k){
dist_matrix[x * N + y] = dist_pivot_col[i * (BLOCK_SIZE) + j];
return;
}
else if (c == k){
dist_matrix[x * N + y] = dist_pivot_row[j * (BLOCK_SIZE) + i];
return;
}
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = res;
}
__global__ void checkmin(int *dist_matrix, int *dist_matrix2, int N, int BLOCK_SIZE){
int id = (threadIdx.x + (blockIdx.x) * BLOCK_SIZE) * N + (blockIdx.y * BLOCK_SIZE + threadIdx.y);
int res = dist_matrix2[id];
if (res < dist_matrix[id])
dist_matrix[id] = res;
}
void output(int n, int N, int* dist_matrix){
int i, j;
printf("=====\n");
for (i = 0; i < n; ++i)
for (j = 0; j < n; ++j)
if (dist_matrix[i * N + j] < N * 101)
printf((j + 1 < n) ? "%d " : "%d\n", dist_matrix[i * N + j]);
else
printf((j + 1 < n) ? "INF " : "INF\n");
}
void output_pivot(int n, int N, int BLOCK_SIZE, int* pivot_matrix){
int i, j, k;
printf("-----\n");
for (k = 0; k < N / BLOCK_SIZE; ++k){
for (i = 0; i < BLOCK_SIZE; ++i){
for (j = 0; j < BLOCK_SIZE; ++j)
printf("%d ", pivot_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j]);
printf("\n");
}
}
}
#define INPUT_BUF_SIZE 1000000000
#define OUTPUT_BUF_SIZE 1000000000
char input_buf[INPUT_BUF_SIZE], output_buf[OUTPUT_BUF_SIZE];
int input_cur_pt, output_cur_pt;
void bufReRead(){
printf("new read\n");
int len = fread(input_buf, 1, INPUT_BUF_SIZE, stdin);
if (len < INPUT_BUF_SIZE)
input_buf[len] = '\0';
input_cur_pt = 0;
}
int getIntFromBuf(){
char x = ' ';
while (!(x >= '0' && x <= '9')){
x = input_buf[input_cur_pt ++];
if (input_cur_pt == INPUT_BUF_SIZE)
bufReRead();
}
int ret = 0;
while (x >= '0' && x <= '9'){
ret = ret * 10 + x - '0';
x = input_buf[input_cur_pt ++];
if (input_cur_pt == INPUT_BUF_SIZE)
bufReRead();
}
return ret;
}
void putIntToBuf(int x){
if (x == 0){
output_buf[output_cur_pt++] = '0';
return;
}
int len = 0;
int out[8];
memset(out, 0, sizeof out);
for (; ; ){
int t = x / 10;
out[++len] = x - t * 10;
x = t;
if (x == 0) break;
}
for (int i = len; i >= 1; --i)
output_buf[output_cur_pt++] = out[i] + '0';
}
int main(int argc, char** argv){
char *input_filename = argv[1];
char *output_filename = argv[2];
BLOCK_SIZE = atoi(argv[3]);
BLOCK_SIZE = min(BLOCK_SIZE, 32);
/* input & output device */
input_cur_pt = 0;
output_cur_pt = 0;
freopen(input_filename, "r", stdin);
int len = fread(input_buf, 1, INPUT_BUF_SIZE, stdin);
if (len < INPUT_BUF_SIZE)
input_buf[len] = '\0';
/*
FOR CUDA
if (BLOCK_SIZE < 32 && BLOCK_SIZE >= 24) BLOCK_SIZE = 24;
if (BLOCK_SIZE < 24 && BLOCK_SIZE >= 16) BLOCK_SIZE = 16;
if (BLOCK_SIZE < 16 && BLOCK_SIZE >= 8) BLOCK_SIZE = 8;
if (BLOCK_SIZE < 8) BLOCK_SIZE = 8;
*/
int i, j;
int n, m;
/*scanf("%d%d", &n, &m);*/
n = getIntFromBuf();
m = getIntFromBuf();
/* Padding */
int num_blocks = n / BLOCK_SIZE;
if (num_blocks * BLOCK_SIZE < n)
num_blocks ++;
int N = num_blocks * BLOCK_SIZE;
int* dist_matrix = (int*)malloc(sizeof(int) * N * N);
/* read in data */
for (i = 0; i < N * N; ++i)
dist_matrix[i] = N * 101;
for (i = 0; i < N; ++i)
dist_matrix[i * N + i] = 0;
for (i = 0; i < m; ++i){
int x, y, w;
/*scanf("%d%d%d", &x, &y, &w);*/
x = getIntFromBuf();
y = getIntFromBuf();
w = getIntFromBuf();
x--;
y--;
if (dist_matrix[x * N + y] > w)
dist_matrix[x * N + y] = w;
}
int* d_dist_matrix;
int* d_pivot_row;
int* d_pivot_col;
int* vd_dist_matrix;
int* vd_pivot_row;
int* vd_pivot_col;
int* d_bak_matrix;
int size = sizeof(int) * N * N;
int pivot_line_size = sizeof(int) * N * BLOCK_SIZE;
int *pivot_row = (int*)malloc(pivot_line_size);
int *pivot_col = (int*)malloc(pivot_line_size);
/* block ASPA */
cudaStream_t stream[2];
cudaEvent_t fin, fin0[num_blocks];
cudaSetDevice(0);
cudaStreamCreate(&stream[0]);
for (i = 0; i < num_blocks; ++i)
cudaEventCreate(&fin0[i]);
cudaMalloc((void**)&d_dist_matrix, size);
cudaMalloc((void**)&d_pivot_row, pivot_line_size);
cudaMalloc((void**)&d_pivot_col, pivot_line_size);
cudaMalloc((void**)&d_bak_matrix, size);
cudaMemcpy(d_dist_matrix, dist_matrix, size, cudaMemcpyHostToDevice);
cudaSetDevice(1);
cudaStreamCreate(&stream[1]);
cudaEventCreate(&fin);
cudaMalloc((void**)&vd_dist_matrix, size);
cudaMalloc((void**)&vd_pivot_row, pivot_line_size);
cudaMalloc((void**)&vd_pivot_col, pivot_line_size);
cudaMemcpy(vd_dist_matrix, dist_matrix, size, cudaMemcpyHostToDevice);
cudaDeviceEnablePeerAccess(0, 0);
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1, 0);
if (num_blocks > 4){
int divide_line = num_blocks / sqrt(2);
for (i = 0; i < num_blocks; ++i){
/* phrase #1: self dependent blocks */
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
int master_task = (num_blocks - 1) * 0.70;
int num_row_blocks = master_task + (i <= master_task);
int slave_task = num_blocks - num_row_blocks;
dim3 blockPerGrid_master(num_blocks - 1, num_blocks - 1);
cudaSetDevice(0);
pivot_floyed<<<1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE), stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE);
/* phrase #2: pivot row & col blocks */
pivot_row_floyed<<<num_blocks - 1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE, d_pivot_row);
pivot_col_floyed<<<num_blocks - 1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE, d_pivot_col);
cudaMemcpyPeerAsync(vd_pivot_col, 1, d_pivot_col, 0, pivot_line_size, stream[0]);
cudaMemcpyPeerAsync(vd_pivot_row, 1, d_pivot_row, 0, pivot_line_size, stream[0]);
cudaEventRecord(fin0[i], stream[0]);
/* phrase #3: other blocks */
if (i + 1 < num_blocks){
res_floyed<<<dim3(num_blocks - i - 1, num_blocks - i - 1), threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE, i + 1, i + 1);
if (i > 0){
res_floyed<<<dim3(i, num_blocks - i - 1), threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE, 0, i + 1);
res_floyed<<<dim3(num_blocks - i - 1, i), threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE, i + 1, 0);
}
}
if (i > divide_line && i > 0){
res_floyed<<<dim3(i, i), threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE, divide_line);
}
cudaSetDevice(1);
cudaStreamWaitEvent(stream[1], fin0[i], 0);
if (i > 0){
dim3 blockPerGrid_slave(min(i, divide_line), min(i, divide_line));
res_floyed_slave_ul<<<blockPerGrid_slave, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[1]>>>(vd_dist_matrix, N, i, BLOCK_SIZE, num_row_blocks,
vd_pivot_row, vd_pivot_col);
}
}
cudaSetDevice(1);
cudaMemcpyPeerAsync(d_bak_matrix, 0, vd_dist_matrix, 1, size, stream[1]);
cudaEventRecord(fin, stream[1]);
cudaSetDevice(0);
dim3 blockPerGrid_slave(num_blocks, num_blocks);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
cudaStreamWaitEvent(stream[0], fin, 0);
checkmin<<<blockPerGrid_slave, threadsPerBlock, 0, stream[0]>>>(d_dist_matrix, d_bak_matrix, N, BLOCK_SIZE);
}
else
for (i = 0; i < num_blocks; ++i){
/* phrase #1: self dependent blocks */
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blockPerGrid(num_blocks - 1, num_blocks - 1);
pivot_floyed<<<1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE)>>>(d_dist_matrix, N, i, BLOCK_SIZE);
if (num_blocks > 1){
/* phrase #2: pivot row & col blocks */
pivot_row_floyed<<<num_blocks - 1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2>>>(d_dist_matrix, N, i, BLOCK_SIZE);
pivot_col_floyed<<<num_blocks - 1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2>>>(d_dist_matrix, N, i, BLOCK_SIZE);
/* phrase #3: other blocks */
res_floyed_full<<<blockPerGrid, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2>>>(d_dist_matrix, N, i, BLOCK_SIZE);
}
}
cudaMemcpy(dist_matrix, d_dist_matrix, size, cudaMemcpyDeviceToHost);
freopen(output_filename, "w", stdout);
for (i = 0; i < n; ++i){
for (j = 0; j < n; ++j){
if (dist_matrix[i * N + j] < N * 101)
putIntToBuf(dist_matrix[i * N + j]);
else{
output_buf[output_cur_pt++] = 'I';
output_buf[output_cur_pt++] = 'N';
output_buf[output_cur_pt++] = 'F';
}
output_buf[output_cur_pt++] = ' ';
}
output_buf[output_cur_pt++] = '\n';
}
fwrite(output_buf, 1, output_cur_pt, stdout);
}
|
2c97a2409e0d883395dfb13b4bfce43dab0476fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Kernels For Training ANN
* Optimized for One Hidden Layer
* IMPORTANT: Arbitrary Layer Connectivity Requires Nesting in additional loop. Use NVCC loop unrolling flags to guarantee good performance.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <nn.h>
__device__ void initActMat(float * ins , float * actMatrix)
{
for(int i=0 ; i < IN ; i++)
{
actMatrix[i] = ins[threadIdx.x*IN + i];
}
}
__device__ void sliceData(float* glob_data , float* dev_data, bool input)
{
if(input == true) //Slice Inputs
{
for(int i=0 ; i < IN ; i++)
{
dev_data[i] = glob_data[blockIdx.x*IN + i];
}
}
else //Slice Outputs
{
for(int i=0 ; i < ON ; i++)
{
dev_data[i] = glob_data[blockIdx.x*ON + i];
}
}
}
__global__ void kernBackProp(float* ins, float* outs, float* weights, float* grossUpdates, float* prevNetUp, float* outerrors)
{
/*
* Weights are flattened in the following form:
* Weight array is subdivided into (Layers-1) partitions of size HN*(IN+1) that correspond to layers
* Each of these partitions is subdivided into HN partitions of size IN+1 (up to IN inputs and 1 bias)
*
*/
__shared__ float inputs[IN]; //on-chip subsection of inputs
__shared__ float outputs[ON];
__shared__ float activations[IN*LAYERS];
__shared__ float dev_weights[(IN+1)*HN*(LAYERS-1)];
__shared__ float partSums[IN*LAYERS];
__shared__ float deltas[(IN+1)*HN*(LAYERS-1)];
__shared__ float outdeltas[ON];
*dev_weights = *weights; //on-chip copy of weights
sliceData(ins, inputs,true);
sliceData(outs, outputs,false);
initActMat(inputs,activations);
//Initialize Inputs
for(int i = 0; i < IN ; i++)
{
activations[IN+i] = sigmoid(&activations[i]);
}
//Weighted Sum to Hidden Neuron
__syncthreads();
float hnSum = 0.0;
for(int i = 0; i < IN ; i++)
{
hnSum += dev_weights[(IN+1)*threadIdx.x + i] * activations[IN+i];
}
hnSum += dev_weights[(IN+1) * threadIdx.x + IN]; //hidden bias
//Store Output from Hidden Neuron
activations[2*IN + threadIdx.x] = sigmoid(&hnSum);
//Weighted Sum to Output Neuron
__syncthreads();
if(threadIdx.x < ON)
{
float onSum = 0.0;
for(int i = 0; i < HN ; i++)
{
onSum += dev_weights[HN*(IN+1) + (IN+1)*threadIdx.x + i] * activations[2*IN+i];
}
onSum += dev_weights[HN*(IN+1) + (IN+1)*threadIdx + IN]; //output bias
//Output Neuron Activations
activations[3*IN + threadIdx.x] = sigmoid(&onSum);
//Output Deltas
outdeltas[threadIdx.x] = outputs[threadIdx.x]-activations[3*IN + threadIdx.x];
//Weight Changes Hidden:Output
for(int i = 0 ; i < (HN+1) ; i++)
{
deltas[HN*(IN+1) + (IN+1)*threadIdx.x + i] = dev_weights[HN*(IN+1) + (IN+1)*threadIdx.x + i]*outdeltas[threadIdx.x]*DSigmoid(&activations[3*IN + threadIdx.x]);
//No getting around the insane indices. Apologies to the brave soul who reads this.
//Basically... change = -learningrate*delta*activation + momentum*previousweightchange
grossUpdates[BlockIdx.x*((IN+1)*HN*(LAYERS-1))+(HN*(IN+1) + (IN+1)*threadIdx.x + i)]=(-1.0)*(LR*activations[HN*(IN+1) + (IN+1)*threadIdx.x + i]*deltas[HN*(IN+1) + (IN+1)*threadIdx.x + i] +(prevNetUp[HN*(IN+1) + (IN+1)*threadIdx.x + i]*MOM));
}
}
__syncthreads();
//Weight Changes From Hidden to Input Layer
for(int i = 0 ; i < IN+1 ; i++)
{
for(int i = 0; i < HN ; i++) //sum over all hidden neurons
{
deltas[(IN+1)*threadIdx.x + i] = dev_weights[(IN+1)*threadIdx.x + i]*deltas[HN*(IN+1) + (IN+1)*threadIdx.x + i]*DSigmoid(&activations[2*IN + threadIdx.x]);
grossUpdates[BlockIdx.x*((IN+1)*HN*(LAYERS-1))+((IN+1)*threadIdx.x + i)] = (-1.0)*(LR*activations[2*IN + threadIdx.x]*deltas[(IN+1)*threadIdx.x + i])+(MOM*prevNetUp[(IN+1)*threadIdx.x + i]);
}
}
//We store output sum squared at end of kernel to mitigate warp divergence
if(threadIdx.x == 0)
{
for(int i = 0 ; i < ON ; i++)
{
outErrors[BlockIdx.x] = (outputs[i]-activations[3*IN + i])*(outputs[i]-activations[3*IN + i]);
}
}
}
| 2c97a2409e0d883395dfb13b4bfce43dab0476fd.cu | /*
* Kernels For Training ANN
* Optimized for One Hidden Layer
* IMPORTANT: Arbitrary Layer Connectivity Requires Nesting in additional loop. Use NVCC loop unrolling flags to guarantee good performance.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <nn.h>
__device__ void initActMat(float * ins , float * actMatrix)
{
for(int i=0 ; i < IN ; i++)
{
actMatrix[i] = ins[threadIdx.x*IN + i];
}
}
__device__ void sliceData(float* glob_data , float* dev_data, bool input)
{
if(input == true) //Slice Inputs
{
for(int i=0 ; i < IN ; i++)
{
dev_data[i] = glob_data[blockIdx.x*IN + i];
}
}
else //Slice Outputs
{
for(int i=0 ; i < ON ; i++)
{
dev_data[i] = glob_data[blockIdx.x*ON + i];
}
}
}
__global__ void kernBackProp(float* ins, float* outs, float* weights, float* grossUpdates, float* prevNetUp, float* outerrors)
{
/*
* Weights are flattened in the following form:
* Weight array is subdivided into (Layers-1) partitions of size HN*(IN+1) that correspond to layers
* Each of these partitions is subdivided into HN partitions of size IN+1 (up to IN inputs and 1 bias)
*
*/
__shared__ float inputs[IN]; //on-chip subsection of inputs
__shared__ float outputs[ON];
__shared__ float activations[IN*LAYERS];
__shared__ float dev_weights[(IN+1)*HN*(LAYERS-1)];
__shared__ float partSums[IN*LAYERS];
__shared__ float deltas[(IN+1)*HN*(LAYERS-1)];
__shared__ float outdeltas[ON];
*dev_weights = *weights; //on-chip copy of weights
sliceData(ins, inputs,true);
sliceData(outs, outputs,false);
initActMat(inputs,activations);
//Initialize Inputs
for(int i = 0; i < IN ; i++)
{
activations[IN+i] = sigmoid(&activations[i]);
}
//Weighted Sum to Hidden Neuron
__syncthreads();
float hnSum = 0.0;
for(int i = 0; i < IN ; i++)
{
hnSum += dev_weights[(IN+1)*threadIdx.x + i] * activations[IN+i];
}
hnSum += dev_weights[(IN+1) * threadIdx.x + IN]; //hidden bias
//Store Output from Hidden Neuron
activations[2*IN + threadIdx.x] = sigmoid(&hnSum);
//Weighted Sum to Output Neuron
__syncthreads();
if(threadIdx.x < ON)
{
float onSum = 0.0;
for(int i = 0; i < HN ; i++)
{
onSum += dev_weights[HN*(IN+1) + (IN+1)*threadIdx.x + i] * activations[2*IN+i];
}
onSum += dev_weights[HN*(IN+1) + (IN+1)*threadIdx + IN]; //output bias
//Output Neuron Activations
activations[3*IN + threadIdx.x] = sigmoid(&onSum);
//Output Deltas
outdeltas[threadIdx.x] = outputs[threadIdx.x]-activations[3*IN + threadIdx.x];
//Weight Changes Hidden:Output
for(int i = 0 ; i < (HN+1) ; i++)
{
deltas[HN*(IN+1) + (IN+1)*threadIdx.x + i] = dev_weights[HN*(IN+1) + (IN+1)*threadIdx.x + i]*outdeltas[threadIdx.x]*DSigmoid(&activations[3*IN + threadIdx.x]);
//No getting around the insane indices. Apologies to the brave soul who reads this.
//Basically... change = -learningrate*delta*activation + momentum*previousweightchange
grossUpdates[BlockIdx.x*((IN+1)*HN*(LAYERS-1))+(HN*(IN+1) + (IN+1)*threadIdx.x + i)]=(-1.0)*(LR*activations[HN*(IN+1) + (IN+1)*threadIdx.x + i]*deltas[HN*(IN+1) + (IN+1)*threadIdx.x + i] +(prevNetUp[HN*(IN+1) + (IN+1)*threadIdx.x + i]*MOM));
}
}
__syncthreads();
//Weight Changes From Hidden to Input Layer
for(int i = 0 ; i < IN+1 ; i++)
{
for(int i = 0; i < HN ; i++) //sum over all hidden neurons
{
deltas[(IN+1)*threadIdx.x + i] = dev_weights[(IN+1)*threadIdx.x + i]*deltas[HN*(IN+1) + (IN+1)*threadIdx.x + i]*DSigmoid(&activations[2*IN + threadIdx.x]);
grossUpdates[BlockIdx.x*((IN+1)*HN*(LAYERS-1))+((IN+1)*threadIdx.x + i)] = (-1.0)*(LR*activations[2*IN + threadIdx.x]*deltas[(IN+1)*threadIdx.x + i])+(MOM*prevNetUp[(IN+1)*threadIdx.x + i]);
}
}
//We store output sum squared at end of kernel to mitigate warp divergence
if(threadIdx.x == 0)
{
for(int i = 0 ; i < ON ; i++)
{
outErrors[BlockIdx.x] = (outputs[i]-activations[3*IN + i])*(outputs[i]-activations[3*IN + i]);
}
}
}
|
b8f5776cfb7cd768431fb1759462fc7703f07532.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void matAdd(int * m0_d, int * m1_d, std::size_t w, std::size_t h){
auto tidx = blockIdx.x * blockDim.x + threadIdx.x;
auto tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(tidx<w && tidy<h){
m0_d[tidy * w + tidx] += m1_d[tidy * w + tidx];
}
}
int main(){
std::size_t w = 10;
std::size_t h = 10;
std::size_t size = w*h;
std::vector<int> m0_h(size);
std::vector<int> m1_h(size);
int * m0_d = nullptr;
int * m1_d = nullptr;
for(std::size_t i = 0; i < size; i++){
m0_h[i] = m1_h[i] = i;
}
hipError_t err;
err = hipMalloc(&m0_d, m0_h.size() * sizeof(int));
if(err != hipSuccess){
std::cerr << hipGetErrorString(err) << std::endl;
return 1;
}
err = hipMalloc(&m1_d, m1_h.size() * sizeof(int));
hipMemcpy(m0_d,m0_h.data(),m0_h.size() * sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(m1_d,m1_h.data(),m1_h.size() * sizeof(int),hipMemcpyHostToDevice);
dim3 block(32,32);
dim3 grid((w-1)/ block.x +1, (h-1)/ block.y +1);
hipLaunchKernelGGL(( matAdd), dim3(grid),dim3(block), 0, 0, m0_d,m1_d,w,h);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess){
std::cerr << hipGetErrorString(err) << std::endl;
return 1;
}
hipMemcpy(m0_h.data(),m0_d,m0_h.size() * sizeof(int),hipMemcpyDeviceToHost);
for(std::size_t i = 0; i < m0_h.size(); i++){
printf("%d\n",m0_h[i] );
}
hipFree(m0_d);
hipFree(m1_d);
}
| b8f5776cfb7cd768431fb1759462fc7703f07532.cu | #include <iostream>
#include <vector>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void matAdd(int * m0_d, int * m1_d, std::size_t w, std::size_t h){
auto tidx = blockIdx.x * blockDim.x + threadIdx.x;
auto tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(tidx<w && tidy<h){
m0_d[tidy * w + tidx] += m1_d[tidy * w + tidx];
}
}
int main(){
std::size_t w = 10;
std::size_t h = 10;
std::size_t size = w*h;
std::vector<int> m0_h(size);
std::vector<int> m1_h(size);
int * m0_d = nullptr;
int * m1_d = nullptr;
for(std::size_t i = 0; i < size; i++){
m0_h[i] = m1_h[i] = i;
}
cudaError_t err;
err = cudaMalloc(&m0_d, m0_h.size() * sizeof(int));
if(err != cudaSuccess){
std::cerr << cudaGetErrorString(err) << std::endl;
return 1;
}
err = cudaMalloc(&m1_d, m1_h.size() * sizeof(int));
cudaMemcpy(m0_d,m0_h.data(),m0_h.size() * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(m1_d,m1_h.data(),m1_h.size() * sizeof(int),cudaMemcpyHostToDevice);
dim3 block(32,32);
dim3 grid((w-1)/ block.x +1, (h-1)/ block.y +1);
matAdd<<<grid,block>>>(m0_d,m1_d,w,h);
cudaDeviceSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess){
std::cerr << cudaGetErrorString(err) << std::endl;
return 1;
}
cudaMemcpy(m0_h.data(),m0_d,m0_h.size() * sizeof(int),cudaMemcpyDeviceToHost);
for(std::size_t i = 0; i < m0_h.size(); i++){
printf("%d\n",m0_h[i] );
}
cudaFree(m0_d);
cudaFree(m1_d);
}
|
889f32d11235788ff836949d4cae9979a900e938.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file pooling_gpu.cu
// @brief Pooling block implementation (GPU)
// @author Andrea Vedaldi
// @author Karel Lenc
/*
Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "pooling.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <float.h>
#include <sm_20_atomic_functions.h>
#include <math_constants.h>
/* ---------------------------------------------------------------- */
/* pooling_max_switches_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
pooling_max_switches_kernel
(T* pooled,
uint32_t* poolSwitches,
const T* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
T bestValue = data[y1 * width + x1] ;
uint32_t switchLocation = (pz * height + y1) * width + (x1 + 1) ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
if(bestValue < data[y * width + x]) {
bestValue = data[y * width + x] ;
switchLocation = (pz * height + y) * width + (x+1) ;
}
}
}
pooled[pooledIndex] = bestValue ;
poolSwitches[pooledIndex] = switchLocation;
}
}
/* ---------------------------------------------------------------- */
/* pooling_max_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
pooling_max_kernel
(T* pooled,
const T* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
T bestValue = data[y1 * width + x1] ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
bestValue = max(bestValue, data[y * width + x]) ;
}
}
pooled[pooledIndex] = bestValue ;
}
}
/* ---------------------------------------------------------------- */
/* pooling_average_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
pooling_average_kernel
(T* pooled,
const T* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
/* pooledIndex = x + y * pooledWidth + z * (pooledWidth * pooledHeight) */
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
data += pz * (width*height) ;
T accum = 0;
T poolSize = (y2 - y1)*(x2 - x1);
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
accum += data[y * width + x] ;
}
}
pooled[pooledIndex] = accum / poolSize ;
}
}
/* ---------------------------------------------------------------- */
/* pooling_max_backward */
/* ---------------------------------------------------------------- */
#ifdef VLNN_CAFFELIKE_BPPOOL
// In order to be able to use this, BP would need to have access to both
// bottom data and pooled data (currently only passed bottom data...)
template <typename T> __global__ void
pooling_max_backward_with_pooled_data
(T* derData,
const T* data,
const T* pooled,
const T* derPooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
int x = index % width;
int y = (index / width) % height;
int z = (index / width / height) % depth;
int py1 = (y < poolHeight) ? 0 : (y - poolHeight) / strideY + 1;
int py2 = min(y / strideY + 1, pooledHeight);
int px1 = (x < poolWidth) ? 0 : (x - poolWidth) / strideX + 1;
int px2 = min(x / strideX + 1, pooledWidth);
T gradient = 0;
T datum = data[(z * height + y) * width + x];
pooled += z * pooledHeight * pooledWidth;
dzdy += z * pooledHeight * pooledWidth;
for (int py = py1; py < py2; ++py) {
for (int px = px1; px < px2; ++px) {
gradient += dzdy[py * pooledWidth + px] *
(datum == pooled[py * pooledWidth + px]);
}
}
dzdx[index] = gradient;
}
}
#endif
// an implementation of atomicAdd() for double (really slow)
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template<typename T> __global__ void
pooling_max_backward_kernel
(T* derData,
const T* data,
const T* derPooled,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ;
derData += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
int bestIndex = y1 * width + x1 ;
T bestValue = data[bestIndex] ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
int index = y * width + x ;
T value = data[index] ;
if (value > bestValue) {
bestValue = value ;
bestIndex = index ;
}
}
}
/*
This is bad, but required to eliminate a race condition when writing
to bottom_diff.
Caffe goes the other way around, but requrires remembering the layer
output, or the maximal indexes.
atomicAdd(add, val)
*/
atomicAdd(derData + bestIndex, derPooled[pooledIndex]) ;
}
}
/* ---------------------------------------------------------------- */
/* pooling_average_backward */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
pooling_average_backward_kernel(T* derData,
const T* derPooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
/* To understand the logic of this piece of code see the
comments to of the row2im backward kernel */
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T accumulator = 0 ;
derPooled += z * pooledHeight * pooledWidth;
for (int py = py1 ; py <= py2 ; ++py) {
for (int px = px1 ; px <= px2 ; ++px) {
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
T poolSize = (y2 - y1) * (x2 - x1);
accumulator += derPooled[py * pooledWidth + px] / poolSize ;
}
}
derData[index] = accumulator ;
}
}
/* ---------------------------------------------------------------- */
/* unpooling_max_forward */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
unpooling_max_forward
(T* unpooled,
const T* data,
const uint32_t* poolSwitches,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T unpoolValue = (T)(-CUDART_INF_F);
poolSwitches += z * pooledHeight * pooledWidth ;
data += z * pooledHeight * pooledWidth ;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
uint32_t maxIndex = poolSwitches[py * pooledWidth + px] - 1;
if (maxIndex == index) {
if (data[py * pooledWidth + px] > unpoolValue) {
unpoolValue = data[py * pooledWidth + px];
}
}
}
}
unpooled[index] = unpoolValue;
}
}
/* ---------------------------------------------------------------- */
/* unpooling_max_backward */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
unpooling_max_backward
(T* derData,
const T* data,
const uint32_t* poolSwitches,
const T* derUnpooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T unpoolValue = (T)(-CUDART_INF_F);
T derValue;
poolSwitches += z * pooledHeight * pooledWidth ;
derData += z * pooledHeight * pooledWidth ;
data += z * pooledHeight * pooledWidth ;
int derDataIndex = -1 ;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
uint32_t maxIndex = poolSwitches[py * pooledWidth + px] - 1;
if (maxIndex == index) {
if (data[py * pooledWidth + px] > unpoolValue) {
unpoolValue = data[py * pooledWidth + px];
derDataIndex = py * pooledWidth + px;
derValue = derUnpooled[index];
}
}
}
}
if (derDataIndex != -1) {
derData[derDataIndex] = derValue;
}
}
}
/* ---------------------------------------------------------------- */
/* Interface */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template <typename type>
struct pooling_max_switches<vl::GPU, type>
{
static vl::Error
forward(type* pooled,
uint32_t* poolSwitches,
type const* data,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom,
size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
hipLaunchKernelGGL(( pooling_max_switches_kernel<type>)
, dim3(divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
pooled, poolSwitches, data,
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ; // pooling_max_switches
template <typename type>
struct pooling_max<vl::GPU, type>
{
static vl::Error
forward(type* pooled,
type const* data,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom,
size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
hipLaunchKernelGGL(( pooling_max_kernel<type>)
, dim3(divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
pooled, data,
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
static vl::Error
backward(type* derData,
type const* data,
type const* derOutput,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom,
size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
hipLaunchKernelGGL(( pooling_max_backward_kernel<type>)
, dim3(divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derData, data, derOutput,
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ; // pooling_max
template <typename type>
struct pooling_average<vl::GPU, type>
{
static vl::Error
forward(type* pooled,
type const* data,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom, size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
hipLaunchKernelGGL(( pooling_average_kernel<type>)
, dim3(divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
pooled, data,
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
static vl::Error
backward(type* derData,
type const* derPooled,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom,
size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int dataVolume = width * height * depth ;
hipLaunchKernelGGL(( pooling_average_backward_kernel<type>)
, dim3(divideUpwards(dataVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derData, derPooled,
dataVolume,
pooledHeight, pooledWidth,
height, width, dataVolume,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ; // pooling_average
template <typename type>
struct unpooling_max<vl::GPU, type>
{
static vl::Error
forward(type* unpooled,
uint32_t* poolSwitches,
type const* data,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom, size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int nthreads = width * height * depth ;
hipLaunchKernelGGL(( unpooling_max_forward<type>)
, dim3(divideUpwards(nthreads, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
unpooled, data, poolSwitches,
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth, strideY, strideX, padTop, padLeft);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
static vl::Error
backward(type* derData,
uint32_t* poolSwitches,
type const* data,
type const* derOutput,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) {
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int nthreads = width * height * depth ;
hipLaunchKernelGGL(( unpooling_max_backward<type>)
, dim3(divideUpwards(nthreads, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derData, data, poolSwitches, derOutput,
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth, strideY, strideX, padTop, padLeft);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ; // unpooling_max
} } ; // namespace vl::impl
// Instantiations
template struct vl::impl::pooling_max<vl::GPU, float> ;
template struct vl::impl::pooling_max_switches<vl::GPU, float> ;
template struct vl::impl::pooling_average<vl::GPU, float> ;
template struct vl::impl::unpooling_max<vl::GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::pooling_max<vl::GPU, double> ;
template struct vl::impl::pooling_max_switches<vl::GPU, double> ;
template struct vl::impl::pooling_average<vl::GPU, double> ;
template struct vl::impl::unpooling_max<vl::GPU, double> ;
#endif
| 889f32d11235788ff836949d4cae9979a900e938.cu | // @file pooling_gpu.cu
// @brief Pooling block implementation (GPU)
// @author Andrea Vedaldi
// @author Karel Lenc
/*
Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "pooling.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <float.h>
#include <sm_20_atomic_functions.h>
#include <math_constants.h>
/* ---------------------------------------------------------------- */
/* pooling_max_switches_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
pooling_max_switches_kernel
(T* pooled,
uint32_t* poolSwitches,
const T* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
T bestValue = data[y1 * width + x1] ;
uint32_t switchLocation = (pz * height + y1) * width + (x1 + 1) ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
if(bestValue < data[y * width + x]) {
bestValue = data[y * width + x] ;
switchLocation = (pz * height + y) * width + (x+1) ;
}
}
}
pooled[pooledIndex] = bestValue ;
poolSwitches[pooledIndex] = switchLocation;
}
}
/* ---------------------------------------------------------------- */
/* pooling_max_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
pooling_max_kernel
(T* pooled,
const T* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
T bestValue = data[y1 * width + x1] ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
bestValue = max(bestValue, data[y * width + x]) ;
}
}
pooled[pooledIndex] = bestValue ;
}
}
/* ---------------------------------------------------------------- */
/* pooling_average_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
pooling_average_kernel
(T* pooled,
const T* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
/* pooledIndex = x + y * pooledWidth + z * (pooledWidth * pooledHeight) */
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
data += pz * (width*height) ;
T accum = 0;
T poolSize = (y2 - y1)*(x2 - x1);
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
accum += data[y * width + x] ;
}
}
pooled[pooledIndex] = accum / poolSize ;
}
}
/* ---------------------------------------------------------------- */
/* pooling_max_backward */
/* ---------------------------------------------------------------- */
#ifdef VLNN_CAFFELIKE_BPPOOL
// In order to be able to use this, BP would need to have access to both
// bottom data and pooled data (currently only passed bottom data...)
template <typename T> __global__ void
pooling_max_backward_with_pooled_data
(T* derData,
const T* data,
const T* pooled,
const T* derPooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
int x = index % width;
int y = (index / width) % height;
int z = (index / width / height) % depth;
int py1 = (y < poolHeight) ? 0 : (y - poolHeight) / strideY + 1;
int py2 = min(y / strideY + 1, pooledHeight);
int px1 = (x < poolWidth) ? 0 : (x - poolWidth) / strideX + 1;
int px2 = min(x / strideX + 1, pooledWidth);
T gradient = 0;
T datum = data[(z * height + y) * width + x];
pooled += z * pooledHeight * pooledWidth;
dzdy += z * pooledHeight * pooledWidth;
for (int py = py1; py < py2; ++py) {
for (int px = px1; px < px2; ++px) {
gradient += dzdy[py * pooledWidth + px] *
(datum == pooled[py * pooledWidth + px]);
}
}
dzdx[index] = gradient;
}
}
#endif
// an implementation of atomicAdd() for double (really slow)
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template<typename T> __global__ void
pooling_max_backward_kernel
(T* derData,
const T* data,
const T* derPooled,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ;
derData += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
int bestIndex = y1 * width + x1 ;
T bestValue = data[bestIndex] ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
int index = y * width + x ;
T value = data[index] ;
if (value > bestValue) {
bestValue = value ;
bestIndex = index ;
}
}
}
/*
This is bad, but required to eliminate a race condition when writing
to bottom_diff.
Caffe goes the other way around, but requrires remembering the layer
output, or the maximal indexes.
atomicAdd(add, val)
*/
atomicAdd(derData + bestIndex, derPooled[pooledIndex]) ;
}
}
/* ---------------------------------------------------------------- */
/* pooling_average_backward */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
pooling_average_backward_kernel(T* derData,
const T* derPooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
/* To understand the logic of this piece of code see the
comments to of the row2im backward kernel */
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T accumulator = 0 ;
derPooled += z * pooledHeight * pooledWidth;
for (int py = py1 ; py <= py2 ; ++py) {
for (int px = px1 ; px <= px2 ; ++px) {
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
T poolSize = (y2 - y1) * (x2 - x1);
accumulator += derPooled[py * pooledWidth + px] / poolSize ;
}
}
derData[index] = accumulator ;
}
}
/* ---------------------------------------------------------------- */
/* unpooling_max_forward */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
unpooling_max_forward
(T* unpooled,
const T* data,
const uint32_t* poolSwitches,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T unpoolValue = (T)(-CUDART_INF_F);
poolSwitches += z * pooledHeight * pooledWidth ;
data += z * pooledHeight * pooledWidth ;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
uint32_t maxIndex = poolSwitches[py * pooledWidth + px] - 1;
if (maxIndex == index) {
if (data[py * pooledWidth + px] > unpoolValue) {
unpoolValue = data[py * pooledWidth + px];
}
}
}
}
unpooled[index] = unpoolValue;
}
}
/* ---------------------------------------------------------------- */
/* unpooling_max_backward */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
unpooling_max_backward
(T* derData,
const T* data,
const uint32_t* poolSwitches,
const T* derUnpooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T unpoolValue = (T)(-CUDART_INF_F);
T derValue;
poolSwitches += z * pooledHeight * pooledWidth ;
derData += z * pooledHeight * pooledWidth ;
data += z * pooledHeight * pooledWidth ;
int derDataIndex = -1 ;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
uint32_t maxIndex = poolSwitches[py * pooledWidth + px] - 1;
if (maxIndex == index) {
if (data[py * pooledWidth + px] > unpoolValue) {
unpoolValue = data[py * pooledWidth + px];
derDataIndex = py * pooledWidth + px;
derValue = derUnpooled[index];
}
}
}
}
if (derDataIndex != -1) {
derData[derDataIndex] = derValue;
}
}
}
/* ---------------------------------------------------------------- */
/* Interface */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template <typename type>
struct pooling_max_switches<vl::GPU, type>
{
static vl::Error
forward(type* pooled,
uint32_t* poolSwitches,
type const* data,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom,
size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
pooling_max_switches_kernel<type>
<<< divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(pooled, poolSwitches, data,
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ; // pooling_max_switches
template <typename type>
struct pooling_max<vl::GPU, type>
{
static vl::Error
forward(type* pooled,
type const* data,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom,
size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
pooling_max_kernel<type>
<<< divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(pooled, data,
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
static vl::Error
backward(type* derData,
type const* data,
type const* derOutput,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom,
size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
pooling_max_backward_kernel<type>
<<< divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data, derOutput,
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ; // pooling_max
template <typename type>
struct pooling_average<vl::GPU, type>
{
static vl::Error
forward(type* pooled,
type const* data,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom, size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
pooling_average_kernel<type>
<<< divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(pooled, data,
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
static vl::Error
backward(type* derData,
type const* derPooled,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom,
size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int dataVolume = width * height * depth ;
pooling_average_backward_kernel<type>
<<< divideUpwards(dataVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, derPooled,
dataVolume,
pooledHeight, pooledWidth,
height, width, dataVolume,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ; // pooling_average
template <typename type>
struct unpooling_max<vl::GPU, type>
{
static vl::Error
forward(type* unpooled,
uint32_t* poolSwitches,
type const* data,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom, size_t padLeft, size_t padRight)
{
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int nthreads = width * height * depth ;
unpooling_max_forward<type>
<<< divideUpwards(nthreads, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(unpooled, data, poolSwitches,
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth, strideY, strideX, padTop, padLeft);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
static vl::Error
backward(type* derData,
uint32_t* poolSwitches,
type const* data,
type const* derOutput,
size_t height, size_t width, size_t depth,
size_t poolHeight, size_t poolWidth,
size_t strideY, size_t strideX,
size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) {
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int nthreads = width * height * depth ;
unpooling_max_backward<type>
<<< divideUpwards(nthreads, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data, poolSwitches, derOutput,
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth, strideY, strideX, padTop, padLeft);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ; // unpooling_max
} } ; // namespace vl::impl
// Instantiations
template struct vl::impl::pooling_max<vl::GPU, float> ;
template struct vl::impl::pooling_max_switches<vl::GPU, float> ;
template struct vl::impl::pooling_average<vl::GPU, float> ;
template struct vl::impl::unpooling_max<vl::GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::pooling_max<vl::GPU, double> ;
template struct vl::impl::pooling_max_switches<vl::GPU, double> ;
template struct vl::impl::pooling_average<vl::GPU, double> ;
template struct vl::impl::unpooling_max<vl::GPU, double> ;
#endif
|
47cce75e05da93e5381d459d74cd119a6c13ef48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Raffaele Solca
@author Mark Gates
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define NB 64
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_upper(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_lower(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/**
Purpose
-------
ZLASET_BAND_STREAM initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
This is the same as ZLASET_BAND, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag COMPLEX_16
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag COMPLEX_16
All the main diagonal elements are set to DIAG.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
A(i,i) = BETA , 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute ZLASET in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlaset_band_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( (min(m+k-1,n) - 1)/NB + 1 );
hipLaunchKernelGGL(( zlaset_band_upper), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( (min(m,n) - 1)/NB + 1 );
hipLaunchKernelGGL(( zlaset_band_lower), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dA, ldda);
}
}
/**
@see magmablas_zlaset_band_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda)
{
magmablas_zlaset_band_q(uplo, m, n, k, offdiag, diag, dA, ldda, magma_stream);
}
| 47cce75e05da93e5381d459d74cd119a6c13ef48.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Raffaele Solca
@author Mark Gates
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define NB 64
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_upper(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_lower(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/**
Purpose
-------
ZLASET_BAND_STREAM initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
This is the same as ZLASET_BAND, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag COMPLEX_16
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag COMPLEX_16
All the main diagonal elements are set to DIAG.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
A(i,i) = BETA , 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute ZLASET in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlaset_band_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( (min(m+k-1,n) - 1)/NB + 1 );
zlaset_band_upper<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( (min(m,n) - 1)/NB + 1 );
zlaset_band_lower<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dA, ldda);
}
}
/**
@see magmablas_zlaset_band_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda)
{
magmablas_zlaset_band_q(uplo, m, n, k, offdiag, diag, dA, ldda, magma_stream);
}
|
c99c40706832b47d9c5ea558c80fd1b63fe9f25e.hip | // !!! This is a file automatically generated by hipify!!!
/*******
The code below is the original code, edited so that it would run on CUDA
Compute Capability 6.1 hardware (EVGA/NVIDIA GTX 1070) with CUDA v9.0.176.
The display driver being used is NVIDIA 384.111. The OS is Debian Linux v9
('Sid').
Charles W Johnson
April, 2018
*******/
///////////////////////////////////////
///////////////////////////////// SSSP5
/////////////////////// usando texturas
///////////////////////////////////////
/* CWJ includes */
#include <hip/hip_runtime.h>
#include "comun.cu"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#ifndef _SSSP5_Texture_AllOfAll
#define _SSSP5_Texture_AllOfAll
//////////////////////////////////////////
bool ejecutarIteracion_SSSP5_tex_allOfAll(
const unsigned int nVuelta,
const dim3 grid, const dim3 threads,
const unsigned int nv, const unsigned int na,
const unsigned int mem_size_V, const unsigned int mem_size_A,
const unsigned int mem_size_C, const unsigned int mem_size_F,
const unsigned int infinito,
bool* p_h, bool* f_h, unsigned int* c_h ,
bool* p_d, bool* f_d, unsigned int* c_d,
unsigned int* chi, unsigned int* cho, unsigned int* cdi, unsigned int* cdo)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
#ifdef DEBUG
printf("\n\n*******************\n");
printf("\nVUELTA %i\n",nVuelta);
mostrarUI(c_h, nv, "c_h");
mostrarB(f_h, nv, "f_h");
mostrarB(p_h, nv, "p_h");
printf("\nEJECUCION KERNEL 1\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
//ejecutar ltimo kernel
hipGetLastError(); // reset the runtime error variable to hipSuccess
// ACTUALIZANDO CAMINOS MINIMOS ESPECIALES: kernel1
hipLaunchKernelGGL(( kernel1_SSSP5_tex_all), dim3(grid),dim3(threads),threads.x*sizeof(unsigned int), 0, c_d);
// check if kernel execution generated and error
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
hipDeviceSynchronize();
#ifdef DEBUG
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
mostrarUI(c_h, nv, "c_h");
printf("\nEJECUCION KERNEL 2\n");
#endif // DEBUG
//MINIMIZANDO LOS COSTES RECIEN ACTUALIZADOS
unsigned int min= infinito;
minimizar(nv, c_d, p_d, threads, infinito, chi, cho, cdi, cdo, min);
#ifdef DEBUG
printf("\n\nEl minimo es %i\n", min);
printf("\nEJECUCION KERNEL 3\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
//ejecutar ltimo kernel
hipGetLastError(); // reset the runtime error variable to hipSuccess
//ACTUALIZANDO LA FRONTERA: Kernel3
hipLaunchKernelGGL(( kernel3_tex), dim3(grid),dim3(threads), 0, 0, p_d, f_d, min);
// check if kernel execution generated and error
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
hipDeviceSynchronize();
#ifdef DEBUG
copiarD2H( (void*) p_h, (void*)p_d, mem_size_F);
mostrarB(p_h, nv, "p_h");
copiarD2H( (void*) f_h, (void*)f_d, mem_size_F);
mostrarB(f_h, nv, "f_h");
#endif // DEBUG
return (min==infinito);
}
//////////////////////////////////
void testGraph_SSSP5_tex_allOfAll(
const unsigned int nv, const unsigned int mem_size_V,
const unsigned int na, const unsigned int mem_size_A,
const unsigned int infinito, const unsigned int* v_h,
const unsigned int* a_h, const unsigned int* w_h,
const unsigned int* reference)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
unsigned int* v_d; //array de vrtices device
unsigned int* a_d; //array de aristas device
unsigned int* w_d; //array de pesos device
//copiar grafo de host a device
inicializar_Grafo_Device(v_h, mem_size_V, v_d,
a_h, mem_size_A, a_d,
w_h, w_d);
//enlazar las texturas
hipBindTexture(0, textura_v, v_d, mem_size_V);
hipBindTexture(0, textura_a, a_d, mem_size_A);
hipBindTexture(0, textura_w, w_d, mem_size_A);
unsigned int* c_h; //solucin en el host
unsigned int* c_d; //solucin en el device
unsigned int mem_size_C= mem_size_V-sizeof(unsigned int); //Descontar el tapon -4
inicializar_Sol(c_h, c_d, nv, mem_size_C, infinito);
bool* f_h; //frontera en el host
bool* f_d; //frontera en el device
unsigned int mem_size_F= sizeof(bool) * nv;
inicializar_Frontera(f_h, f_d, nv, mem_size_F);
bool* p_h; //pendientes por procesar
bool* p_d; //pendientes por procesar
inicializar_Pendientes(p_h, p_d, nv, mem_size_F);
//enlazar las texturas del algoritmo
hipBindTexture(0, textura_c, c_d, mem_size_C);
hipBindTexture(0, textura_p, p_d, mem_size_F);
hipBindTexture(0, textura_f, f_d, mem_size_F);
#ifdef DEBUG
//DEPURACION
printf("\nnv= %i\n", nv);
printf("na= %i\n", na);
printf("mem_size_V= %i\n", mem_size_V);
printf("mem_size_A= %i\n", mem_size_A);
printf("mem_size_F= %i\n\n", mem_size_F);
#endif // DEBUG
// setup execution parameters
unsigned int num_threadsInBlock= NUM_THREADS_IN_BLOCK;
//unsigned int num_blocksInGrid= nv/num_threadsInBlock; // original code, but the line below is better
unsigned int num_blocksInGrid = (nv + (num_threadsInBlock-1)) / num_threadsInBlock;
dim3 grid(num_blocksInGrid, 1, 1);
dim3 threads(num_threadsInBlock, 1, 1);
//RESERVAR ESPACIO PARA LA MINIMIZACION
unsigned int nvi = nv/(2*num_threadsInBlock);
unsigned int nvo = nvi/(2*num_threadsInBlock);
unsigned int* cdi;
unsigned int* cdo;
hipMalloc((void**) &cdi, nvi*sizeof(unsigned int));
hipMalloc((void**) &cdo, nvo*sizeof(unsigned int));
unsigned int* chi = (unsigned int*) malloc(nvi*sizeof(unsigned int));
unsigned int* cho = (unsigned int*) malloc(nvo*sizeof(unsigned int));
/* Updated timer code for CUDA 9 */
hipEvent_t timerStart, timerStop;
float time;
//EJECUTAR VUELTAS
bool ultima= false;
unsigned int i= 0;
// start things
hipEventCreate(&timerStart);
hipEventCreate(&timerStop);
hipEventRecord(timerStart, 0);
while(!ultima){
i++;
ultima= ejecutarIteracion_SSSP5_tex_allOfAll( i,
grid, threads,
nv, na,
mem_size_V, mem_size_A, mem_size_C, mem_size_F,
infinito,
p_h, f_h, c_h,
p_d, f_d, c_d,
chi, cho, cdi, cdo);
}//while
// end things
hipEventRecord(timerStop, 0);
hipEventSynchronize(timerStop);
hipEventElapsedTime(&time, timerStart, timerStop);
hipEventDestroy(timerStart);
hipEventDestroy(timerStop);
printf("Runtime for SSSP5_Texture_AllOfAll algorithm is: %.6f ms\n", time);
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
//desenlazar las texturas
hipUnbindTexture(textura_v);
hipUnbindTexture(textura_a);
hipUnbindTexture(textura_w);
// cleanup memory
hipFree(v_d);
hipFree(a_d);
hipFree(w_d);
free(f_h);
free(p_h);
//desenlazar las texturas
hipUnbindTexture(textura_c);
//hipUnbindTexture(textura_p);
//hipUnbindTexture(textura_f);
hipFree(c_d);
hipFree(f_d);
hipFree(p_d);
free(chi);
free(cho);
hipFree(cdi);
hipFree(cdo);
// check result
//CUTBoolean res = cutComparei( (int*)reference, (int*)c_h, nv);
//printf( "%s\t", (1 == res) ? "OK" : "FAILED");
//mostrarUI(c_h, nv, "c_h");
//mostrarUI(reference, nv, "reference");
// cleanup memory
free(c_h);
}
#endif //#ifndef _SSSP5_Texture_AllOfAll
| c99c40706832b47d9c5ea558c80fd1b63fe9f25e.cu |
/*******
The code below is the original code, edited so that it would run on CUDA
Compute Capability 6.1 hardware (EVGA/NVIDIA GTX 1070) with CUDA v9.0.176.
The display driver being used is NVIDIA 384.111. The OS is Debian Linux v9
('Sid').
Charles W Johnson
April, 2018
*******/
///////////////////////////////////////
///////////////////////////////// SSSP5
/////////////////////// usando texturas
///////////////////////////////////////
/* CWJ includes */
#include <cuda.h>
#include "comun.cu"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#ifndef _SSSP5_Texture_AllOfAll
#define _SSSP5_Texture_AllOfAll
//////////////////////////////////////////
bool ejecutarIteracion_SSSP5_tex_allOfAll(
const unsigned int nVuelta,
const dim3 grid, const dim3 threads,
const unsigned int nv, const unsigned int na,
const unsigned int mem_size_V, const unsigned int mem_size_A,
const unsigned int mem_size_C, const unsigned int mem_size_F,
const unsigned int infinito,
bool* p_h, bool* f_h, unsigned int* c_h ,
bool* p_d, bool* f_d, unsigned int* c_d,
unsigned int* chi, unsigned int* cho, unsigned int* cdi, unsigned int* cdo)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
#ifdef DEBUG
printf("\n\n*******************\n");
printf("\nVUELTA %i\n",nVuelta);
mostrarUI(c_h, nv, "c_h");
mostrarB(f_h, nv, "f_h");
mostrarB(p_h, nv, "p_h");
printf("\nEJECUCION KERNEL 1\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
//ejecutar último kernel
cudaGetLastError(); // reset the runtime error variable to cudaSuccess
// ACTUALIZANDO CAMINOS MINIMOS ESPECIALES: kernel1
kernel1_SSSP5_tex_all<<<grid,threads,threads.x*sizeof(unsigned int)>>>( c_d);
// check if kernel execution generated and error
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
cudaThreadSynchronize();
#ifdef DEBUG
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
mostrarUI(c_h, nv, "c_h");
printf("\nEJECUCION KERNEL 2\n");
#endif // DEBUG
//MINIMIZANDO LOS COSTES RECIEN ACTUALIZADOS
unsigned int min= infinito;
minimizar(nv, c_d, p_d, threads, infinito, chi, cho, cdi, cdo, min);
#ifdef DEBUG
printf("\n\nEl minimo es %i\n", min);
printf("\nEJECUCION KERNEL 3\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
//ejecutar último kernel
cudaGetLastError(); // reset the runtime error variable to cudaSuccess
//ACTUALIZANDO LA FRONTERA: Kernel3
kernel3_tex<<<grid,threads>>>( p_d, f_d, min);
// check if kernel execution generated and error
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
cudaThreadSynchronize();
#ifdef DEBUG
copiarD2H( (void*) p_h, (void*)p_d, mem_size_F);
mostrarB(p_h, nv, "p_h");
copiarD2H( (void*) f_h, (void*)f_d, mem_size_F);
mostrarB(f_h, nv, "f_h");
#endif // DEBUG
return (min==infinito);
}
//////////////////////////////////
void testGraph_SSSP5_tex_allOfAll(
const unsigned int nv, const unsigned int mem_size_V,
const unsigned int na, const unsigned int mem_size_A,
const unsigned int infinito, const unsigned int* v_h,
const unsigned int* a_h, const unsigned int* w_h,
const unsigned int* reference)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
unsigned int* v_d; //array de vértices device
unsigned int* a_d; //array de aristas device
unsigned int* w_d; //array de pesos device
//copiar grafo de host a device
inicializar_Grafo_Device(v_h, mem_size_V, v_d,
a_h, mem_size_A, a_d,
w_h, w_d);
//enlazar las texturas
cudaBindTexture(0, textura_v, v_d, mem_size_V);
cudaBindTexture(0, textura_a, a_d, mem_size_A);
cudaBindTexture(0, textura_w, w_d, mem_size_A);
unsigned int* c_h; //solución en el host
unsigned int* c_d; //solución en el device
unsigned int mem_size_C= mem_size_V-sizeof(unsigned int); //Descontar el tapon -4
inicializar_Sol(c_h, c_d, nv, mem_size_C, infinito);
bool* f_h; //frontera en el host
bool* f_d; //frontera en el device
unsigned int mem_size_F= sizeof(bool) * nv;
inicializar_Frontera(f_h, f_d, nv, mem_size_F);
bool* p_h; //pendientes por procesar
bool* p_d; //pendientes por procesar
inicializar_Pendientes(p_h, p_d, nv, mem_size_F);
//enlazar las texturas del algoritmo
cudaBindTexture(0, textura_c, c_d, mem_size_C);
cudaBindTexture(0, textura_p, p_d, mem_size_F);
cudaBindTexture(0, textura_f, f_d, mem_size_F);
#ifdef DEBUG
//DEPURACION
printf("\nnv= %i\n", nv);
printf("na= %i\n", na);
printf("mem_size_V= %i\n", mem_size_V);
printf("mem_size_A= %i\n", mem_size_A);
printf("mem_size_F= %i\n\n", mem_size_F);
#endif // DEBUG
// setup execution parameters
unsigned int num_threadsInBlock= NUM_THREADS_IN_BLOCK;
//unsigned int num_blocksInGrid= nv/num_threadsInBlock; // original code, but the line below is better
unsigned int num_blocksInGrid = (nv + (num_threadsInBlock-1)) / num_threadsInBlock;
dim3 grid(num_blocksInGrid, 1, 1);
dim3 threads(num_threadsInBlock, 1, 1);
//RESERVAR ESPACIO PARA LA MINIMIZACION
unsigned int nvi = nv/(2*num_threadsInBlock);
unsigned int nvo = nvi/(2*num_threadsInBlock);
unsigned int* cdi;
unsigned int* cdo;
cudaMalloc((void**) &cdi, nvi*sizeof(unsigned int));
cudaMalloc((void**) &cdo, nvo*sizeof(unsigned int));
unsigned int* chi = (unsigned int*) malloc(nvi*sizeof(unsigned int));
unsigned int* cho = (unsigned int*) malloc(nvo*sizeof(unsigned int));
/* Updated timer code for CUDA 9 */
cudaEvent_t timerStart, timerStop;
float time;
//EJECUTAR VUELTAS
bool ultima= false;
unsigned int i= 0;
// start things
cudaEventCreate(&timerStart);
cudaEventCreate(&timerStop);
cudaEventRecord(timerStart, 0);
while(!ultima){
i++;
ultima= ejecutarIteracion_SSSP5_tex_allOfAll( i,
grid, threads,
nv, na,
mem_size_V, mem_size_A, mem_size_C, mem_size_F,
infinito,
p_h, f_h, c_h,
p_d, f_d, c_d,
chi, cho, cdi, cdo);
}//while
// end things
cudaEventRecord(timerStop, 0);
cudaEventSynchronize(timerStop);
cudaEventElapsedTime(&time, timerStart, timerStop);
cudaEventDestroy(timerStart);
cudaEventDestroy(timerStop);
printf("Runtime for SSSP5_Texture_AllOfAll algorithm is: %.6f ms\n", time);
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
//desenlazar las texturas
cudaUnbindTexture(textura_v);
cudaUnbindTexture(textura_a);
cudaUnbindTexture(textura_w);
// cleanup memory
cudaFree(v_d);
cudaFree(a_d);
cudaFree(w_d);
free(f_h);
free(p_h);
//desenlazar las texturas
cudaUnbindTexture(textura_c);
//cudaUnbindTexture(textura_p);
//cudaUnbindTexture(textura_f);
cudaFree(c_d);
cudaFree(f_d);
cudaFree(p_d);
free(chi);
free(cho);
cudaFree(cdi);
cudaFree(cdo);
// check result
//CUTBoolean res = cutComparei( (int*)reference, (int*)c_h, nv);
//printf( "%s\t", (1 == res) ? "OK" : "FAILED");
//mostrarUI(c_h, nv, "c_h");
//mostrarUI(reference, nv, "reference");
// cleanup memory
free(c_h);
}
#endif //#ifndef _SSSP5_Texture_AllOfAll
|
d50d5c85d472af17c6dd36e26636ef7415d0b007.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cstdio>
#include<cstdlib>
#include<cuda.h>
#include"vector.h"
#include <iostream>
#include<sys/time.h>
using namespace std;
/*
*/
//Bilinear interpolation
#define INTERPOLATE_IMAGE 0
#define INTERPOLATE_NORM 1
#define EPLISION 0.0000001f
/*__device__ int halfWidth_d;*/
/*__device__ float* kernel_d = NULL;*/
__device__
float cu_dabs( float d){
return d>0?d:-d;
}
__device__
float2 abs2( float2 c )
{
float2 a = { cu_dabs( c.x ), cu_dabs( c.y ) };
return a;
}
__device__
float length3( float3 c){
return sqrtf ( c.x * c.x + c.y * c.y + c.z * c.z );
}
__device__
float3 normalize( float3 c ){
float len = length3(c);
float3 n;
if(fabs(len)>EPLISION){
n.x = c.x / len;
n.y = c.y / len;
n.z = c.z / len;
}else{
}
return n;
}
__device__
float dclamp(float c, float min = 0.0 , float max = 1.0 ){
return c < min ? min : ( c > max ? max : c );
}
__device__
float3 clamp(float3 c, float min = 0.0 , float max = 1.0 ){
float3 tmp;
tmp.x = c.x < min ? min : ( c.x > max ? max : c.x );
tmp.y = c.y < min ? min : ( c.y > max ? max : c.y );
tmp.z = c.z < min ? min : ( c.z > max ? max : c.z );
return tmp;
}
__device__
float3 bilinear_interpolate( float3* img, float2 p, int width, int height, int flag = INTERPOLATE_IMAGE )
{
// const unsigned int x = blockIdx.x;
// const unsigned int y = threadIdx.x;
float2 q = { dclamp( p.x, 0.0, width - 1.1 ), dclamp( p.y, 0.0, height - 1.1 ) };//prevent being out of map
int qx = int ( q.x );
int qy = int ( q.y );
float tx = qx + 1 -q.x;
float ty = qy + 1 -q.y;
float3 result;
switch (flag)
{
case INTERPOLATE_IMAGE:
result.x = ( img[ qy * width + qx ].x * tx + img[ qy * width + qx + 1 ].x * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].x * tx + img[ ( qy + 1 ) * width + qx + 1 ].x * ( 1 - tx ) ) * ( 1 - ty );
result.y = ( img[ qy * width + qx ].y * tx + img[ qy * width + qx + 1 ].y * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].y * tx + img[ ( qy + 1 ) * width + qx + 1 ].y * ( 1 - tx ) ) * ( 1 - ty );
result.z = ( img[ qy * width + qx ].z * tx + img[ qy * width + qx + 1 ].z * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].z * tx + img[ ( qy + 1 ) * width + qx + 1 ].z * ( 1 - tx ) ) * ( 1 - ty );
break;
case INTERPOLATE_NORM:
result.x = ( img[ qy * width + qx ].x * tx + img[ qy * width + qx + 1 ].x * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].x * tx + img[ ( qy + 1 ) * width + qx + 1 ].x * ( 1 - tx ) ) * ( 1 - ty );
result.y = ( img[ qy * width + qx ].y * tx + img[ qy * width + qx + 1 ].y * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].y * tx + img[ ( qy + 1 ) * width + qx + 1 ].y * ( 1 - tx ) ) * ( 1 - ty );
result.z = ( img[ qy * width + qx ].z * tx + img[ qy * width + qx + 1 ].z * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].z * tx + img[ ( qy + 1 ) * width + qx + 1 ].z * ( 1 - tx ) ) * ( 1 - ty );
}
return result;
}
// __device__
// float length( float3 c )
// {
// return sqrtf ( c.x * c.x + c.y * c.y + c.z * c.z );
// }
__global__
void cu_rgb2lab(float3* src, float3* dest, int width, int height){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height) return;
float3 c, tmp, xyz, n, v, lab;
c = src[ y * width + x];
tmp.x = ( c.x > 0.04045 ) ? powf( ( c.x + 0.055 ) / 1.055, 2.4 ) : c.x / 12.92;
tmp.y = ( c.y > 0.04045 ) ? powf( ( c.y + 0.055 ) / 1.055, 2.4 ) : c.y / 12.92,
tmp.z = ( c.z > 0.04045 ) ? powf( ( c.z + 0.055 ) / 1.055, 2.4 ) : c.z / 12.92;
xyz.x = 100.0 * ( tmp.x * 0.4124 + tmp.y * 0.3576 + tmp.z * 0.1805 ) ;
xyz.y = 100.0 * ( tmp.x * 0.2126 + tmp.y * 0.7152 + tmp.z * 0.0722 ) ;
xyz.z = 100.0 * ( tmp.x * 0.0193 + tmp.y * 0.1192 + tmp.z * 0.9505 ) ;
n.x = xyz.x / 95.047;
n.y = xyz.y / 100;
n.z = xyz.z / 108.883;
v.x = ( n.x > 0.008856 ) ? powf( n.x, 1.0 / 3.0 ) : ( 7.787 * n.x ) + ( 16.0 / 116.0 );
v.y = ( n.y > 0.008856 ) ? powf( n.y, 1.0 / 3.0 ) : ( 7.787 * n.y ) + ( 16.0 / 116.0 );
v.z = ( n.z > 0.008856 ) ? powf( n.z, 1.0 / 3.0 ) : ( 7.787 * n.z ) + ( 16.0 / 116.0 );
lab.x = ( 116.0 * v.y ) - 16.0;
lab.y = 500.0 * ( v.x - v.y );
lab.z = 200.0 * ( v.y - v.z );
dest[ y * width + x ].x = lab.x / 100.0;
dest[ y * width + x ].y = 0.5 + 0.5 * ( lab.y / 127.0 );
dest[ y * width + x ].z = 0.5 + 0.5 * ( lab.z / 127.0 );
}
__global__
void cu_lab2rgb(float3* src, float3* dest, int width, int height){
// const int x = blockIdx.x;
// const int y = threadIdx.x;
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height) return;
float3 c, tmp, xyz, r, v, lab;
c = clamp ( src[ y * width + x] );
lab.x = 100.0 * c.x;
lab.y = 2.0 * 127.0 * ( c.y - 0.5 );
lab.z = 2.0 * 127.0 * ( c.z - 0.5 );
tmp.y = ( lab.x + 16.0 ) / 116.0;
tmp.x = lab.y / 500.0 + tmp.y;
tmp.z = tmp.y - lab.z / 200.0;
xyz.x = 95.047 * (( tmp.x > 0.206897 ) ? tmp.x * tmp.x * tmp.x : ( tmp.x -16.0 / 116.0 ) / 7.787);
xyz.y = 100.000 * (( tmp.y > 0.206897 ) ? tmp.y * tmp.y * tmp.y : ( tmp.y -16.0 / 116.0 ) / 7.787);
xyz.z = 108.883 * (( tmp.z > 0.206897 ) ? tmp.z * tmp.z * tmp.z : ( tmp.z -16.0 / 116.0 ) / 7.787);
v.x = ( xyz.x * 3.2406 + xyz.y * -1.5372 + xyz.z * -0.4986 ) / 100.0;
v.y = ( xyz.x * -0.9689 + xyz.y * 1.8758 + xyz.z * 0.0415 ) / 100.0;
v.z = ( xyz.x * 0.0557 + xyz.y * -0.2040 + xyz.z * 1.0570 ) / 100.0;
r.x = ( v.x > 0.0031308 ) ? (( 1.055 * powf( v.x, (1.0 / 2.4 ))) - 0.055 ) : 12.92 * v.x;
r.y = ( v.y > 0.0031308 ) ? (( 1.055 * powf( v.y, (1.0 / 2.4 ))) - 0.055 ) : 12.92 * v.y;
r.z = ( v.z > 0.0031308 ) ? (( 1.055 * powf( v.z, (1.0 / 2.4 ))) - 0.055 ) : 12.92 * v.z;
dest[ y * width + x ] = clamp( r );
}
__global__
void cu_structure_tensor(float3* src, float3* st, int width, int height){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height || x<0 || y<0) return;
float3 u,v;
int ym1 = ( y - 1 ) < 0 ? 0 : ( y - 1 );
int xm1 = ( x - 1 ) < 0 ? 0 : ( x - 1 );
int yp1 = ( y + 1 ) > ( height - 1 ) ? ( height - 1 ) : ( y + 1 );
int xp1 = ( x + 1 ) > ( width - 1 ) ? ( width - 1 ) : ( x + 1 );
// x gradient
u.x = (
-1.0 * src[ ym1 * width + xm1 ].x +
-2.0 * src[ y * width + xm1 ].x +
-1.0 * src[ yp1 * width + xm1 ].x +
+1.0 * src[ ym1 * width + xp1 ].x +
+2.0 * src[ y * width + xp1 ].x +
+1.0 * src[ yp1 * width + xp1 ].x ) / 4.0;
u.y = (
-1.0 * src[ ym1 * width + xm1 ].y +
-2.0 * src[ y * width + xm1 ].y +
-1.0 * src[ yp1 * width + xm1 ].y +
+1.0 * src[ ym1 * width + xp1 ].y +
+2.0 * src[ y * width + xp1 ].y +
+1.0 * src[ yp1 * width + xp1 ].y ) / 4.0;
u.z = (
-1.0 * src[ ym1 * width + xm1 ].z +
-2.0 * src[ y * width + xm1 ].z +
-1.0 * src[ yp1 * width + xm1 ].z +
+1.0 * src[ ym1 * width + xp1 ].z +
+2.0 * src[ y * width + xp1 ].z +
+1.0 * src[ yp1 * width + xp1 ].z ) / 4.0;
//y gradient
v.x = (
-1.0 * src[ ym1 * width + xm1 ].x +
-2.0 * src[ ym1 * width + x ].x +
-1.0 * src[ ym1 * width + xp1 ].x +
+1.0 * src[ yp1 * width + xm1 ].x +
+2.0 * src[ yp1 * width + x ].x +
+1.0 * src[ yp1 * width + xp1 ].x ) / 4.0;
v.y = (
-1.0 * src[ ym1 * width + xm1 ].y +
-2.0 * src[ ym1 * width + x ].y +
-1.0 * src[ ym1 * width + xp1 ].y +
+1.0 * src[ yp1 * width + xm1 ].y +
+2.0 * src[ yp1 * width + x ].y +
+1.0 * src[ yp1 * width + xp1 ].y ) / 4.0;
v.z = (
-1.0 * src[ ym1 * width + xm1 ].z +
-2.0 * src[ ym1 * width + x ].z +
-1.0 * src[ ym1 * width + xp1 ].z +
+1.0 * src[ yp1 * width + xm1 ].z +
+2.0 * src[ yp1 * width + x ].z +
+1.0 * src[ yp1 * width + xp1 ].z ) / 4.0;
//structure tensor
st[ y * width + x ].x = u.x * u.x + u.y * u.y + u.z * u.z;
st[ y * width + x ].y = v.x * v.x + v.y * v.y + v.z * v.z;
st[ y * width + x ].z = u.x * v.x + u.y * v.y + u.z * v.z;
}
__global__
void cu_gauss_filter(float3* src, float sigma, float3* dest, int width, int height, int halfWidth_d, float *kernel_d){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height || x<0 || y<0) return;
float3 result = {0.,0.,0.};
float norm = 0.;
// parallel
for ( int i = -halfWidth_d; i<= halfWidth_d; ++i )
{
for ( int j = -halfWidth_d; j<= halfWidth_d; ++j )
{
//if (blockIdx.x == 0 && blockIdx.y == 0)
/*printf("id = %d, (%f, %f, %f), norm = %f\n", y * width + x, dest[ y * width + x ].x, dest[ y * width + x ].y, dest[ y * width + x ].z, norm);*/
/*printf("bidx.x = %d, bidx.y = %d\n", blockIdx.x, blockIdx.y);*/
if ( ( y + i ) >= 0 && ( x + j ) >= 0 && ( y + i ) < height && ( x + j ) < width )// judge whether out of map
{
result.x += src[ ( y + i ) * width + x + j ].x * kernel_d[ ( i + halfWidth_d ) * ( halfWidth_d * 2 + 1 ) + j + halfWidth_d ];
result.y += src[ ( y + i ) * width + x + j ].y * kernel_d[ ( i + halfWidth_d ) * ( halfWidth_d * 2 + 1 ) + j + halfWidth_d ];
result.z += src[ ( y + i ) * width + x + j ].z * kernel_d[ ( i + halfWidth_d ) * ( halfWidth_d * 2 + 1 ) + j + halfWidth_d ];
norm += kernel_d[ ( i + halfWidth_d ) * ( halfWidth_d * 2 + 1 ) + j + halfWidth_d ];
}
}
}
if(fabs(norm)>EPLISION){
dest[ y * width + x ].x = result.x / norm;
dest[ y * width + x ].y = result.y / norm;
dest[ y * width + x ].z = result.z / norm;
}else{
dest[ y * width + x ].x = 0.;
dest[ y * width + x ].y = 0.;
dest[ y * width + x ].z = 0.;
}
/*if (blockIdx.x == 0 && blockIdx.y == 0)
/*{*/
/*printf("id = %d, (%f, %f, %f), norm = %f\n", y * width + x, dest[ y * width + x ].x, dest[ y * width + x ].y, dest[ y * width + x ].z, norm);*/
/*}*/
//
/*dest[ y * width + x ].x = 0.123;*/
/*dest[ y * width + x ].y = 0.123;*/
/*dest[ y * width + x ].z = 0.123;*/
}
__global__
void cu_tangent_flow_map(float3* sst, float sigma, float3* tfm, int width, int height ){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height) return;
float3 g = sst[ y * width + x ];
float lambda1 = 0.5 * ( g.y + g.x + sqrt( g.y * g.y - 2.0 * g.x * g.y + g.x * g.x + 4.0 * g.z * g.z ) );
float3 v = { g.x - lambda1, g.z, 0.0 };
if ( length3( v ) > 0.0 )
{
tfm[ y * width + x ] = normalize( v );
tfm[ y * width + x ].z = sqrt( lambda1 );//may be used for weight???????????????????????????????????????????
}
else//ensure no zero floattor
{
tfm[ y * width + x ].x = 0.0;
tfm[ y * width + x ].y = 1.0;
tfm[ y * width + x ].z = 0.0;
}
}
__global__
void cu_orientation_aligned_bilateral_filter( float3*src, float3* tfm, float3*dest, float3* tmp, int n, float sigma_d, float sigma_r, int width, int height){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height) return;
float twoSigmaD2 = 2.0 * sigma_d * sigma_d;
float twoSigmaR2 = 2.0 * sigma_r * sigma_r;
for(int i = 0; i < n; ++i)
{
for(int pass = 0; pass < 2; ++pass)
{
float2 t = { tfm[ y * width + x ].x, tfm[ y * width + x ].y };
float2 tt = { t.y, -t.x };
float2 dir = ( pass == 0 ) ? tt : t;
float2 dabs = abs2( dir );
float ds = 1.0 / ( ( dabs.x > dabs.y ) ? dabs.x : dabs.y );
float3* midsrc = ( i == 0 ? ( pass == 0 ? src : tmp ) : ( pass == 0 ? dest : tmp ) );
float3 center = midsrc[ y * width + x ];
float3 sum = center;
float norm = 1.0;
float halfWidth = 2.0 * sigma_d;
for ( float d = ds; d <= halfWidth; d += ds )
{
float2 p0 = { x + d * dir.x, y + d * dir.y };
float3 c0 = bilinear_interpolate( midsrc, p0, width, height );
float2 p1 = { x - d * dir.x, y - d * dir.y };
float3 c1 = bilinear_interpolate( midsrc, p1, width, height );
float3 d0 = { c0.x -center.x, c0.y - center.y, c0.z - center.z };
float3 d1 = { c1.x -center.x, c1.y - center.y, c1.z - center.z };
float e0 = length3( d0 );
float e1 = length3( d1 );
float kerneld = expf( -d * d / twoSigmaD2 );
float kernele0 = expf( - e0 * e0 / twoSigmaR2 );
float kernele1 = expf( - e1 * e1 / twoSigmaR2 );
norm += kerneld * kernele0;
norm += kerneld * kernele1;
//printf("%lf, %lf, %lf\n", kerneld, kernele0, kernele1);
sum.x += kerneld * kernele0 * c0.x;
sum.x += kerneld * kernele1 * c1.x;
sum.y += kerneld * kernele0 * c0.y;
sum.y += kerneld * kernele1 * c1.y;
sum.z += kerneld * kernele0 * c0.z;
sum.z += kerneld * kernele1 * c1.z;
}
if(fabs(norm)>EPLISION){
sum.x /= norm;
sum.y /= norm;
sum.z /= norm;
}else{
sum.x /= 1;
sum.y /= 1;
sum.z /= 1;
}
( pass == 0 ? tmp : dest )[ y * width + x ] = sum ;
}
}
}
void cudaSafeFree(void** p){
if(*p){
hipFree(p);
}
}
void cleanup(float3* fsrc, vec3* dest, int size){
if(fsrc&&dest){
printf("clean up\n");
for(int i = 0; i < size; ++i)
{
dest[i].r = fsrc[i].x;
dest[i].g = fsrc[i].y;
dest[i].b = fsrc[i].z;
}
}else{
printf("ZERO\n");
}
}
void print(float3* fsrc, int width, int height){
if(fsrc){
for(int i = 0; i < height; ++i)
{
for(int j = 0; j < width; ++j)
{
fprintf(stdout, "(%.6f %.6f %.6f) ", fsrc[i*width+j].x, fsrc[i*width+j].y, fsrc[i*width+j].z);
}
fprintf(stdout, "\n");
}
}
}
void printd(float* d,int size){
if(d){
printf("%d\n",size);
for(int i = 0; i < size; ++i)
{
fprintf(stdout, "%.6f ", d[i]);
}
}else{
printf("ZERO\n");
}
}
void creat_gauss_kernel(float** ker, float sigma, int* half){
float twoSigma2 = 2.0 * sigma * sigma;
int halfWidth = int ( ceil( 2.0 * sigma ) );
cout << "halfwidth = " << halfWidth << endl;
*half = halfWidth;
float* kernel = new float[(2*halfWidth+1)*(2*halfWidth+1)];
*ker = kernel;
float norm = 0.0;
for ( int i = -halfWidth; i <= halfWidth; i++ )
{
for ( int j = -halfWidth; j <= halfWidth; j++)
{
norm += kernel[ ( i + halfWidth ) * ( halfWidth * 2 + 1 ) + j + halfWidth ] = exp( - ( i * i + j * j ) / twoSigma2 );
}
}
for ( int i = -halfWidth; i <= halfWidth; i++ )
{
for ( int j = -halfWidth; j <= halfWidth; j++)
{
kernel[ ( i + halfWidth ) * ( halfWidth * 2 + 1 ) + j + halfWidth ] /= norm ;
}
}
//printd(kernel, (2*halfWidth+1)*(2*halfWidth+1));
}
#define NO 900
void cu_flowabs(vec3* src, vec3* dest, float sigma, vec3* tfm, int width, int height){
if(!(src && dest && tfm)){
return;
}
if(width*height>1024*1024){
printf("Too Large Image\n");
return;
}
struct timeval ts;
struct timezone tz;
gettimeofday (&ts , &tz);
long sec = ts.tv_sec;
long usec = ts.tv_usec;
dim3 dimBlock(16,16);
dim3 dimGrid((width+dimBlock.x-1)/dimBlock.x, (height+dimBlock.y-1)/dimBlock.y);
cout << "dim = (" << dimGrid.x << ", " << dimGrid.y << ")" << endl;
float3* st;
hipMalloc((void**)&st, sizeof(float3)*width*height); // structure tensor
hipError_t err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
float3* tmp;
hipMalloc((void**)&tmp, sizeof(float3)*width*height);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
float3* sst; // smoothed structure tensor
hipMalloc((void**)&sst, sizeof(float3)*width*height);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
float3* ftfm; //
hipMalloc((void**)&ftfm, sizeof(float3)*width*height);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
float3* lab;
hipMalloc((void**)&lab, sizeof(float3)*width*height);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
float3* midlab;
hipMalloc((void**)&midlab, sizeof(float3)*width*height);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
float3* rgb;
hipMalloc((void**)&rgb, sizeof(float3)*width*height);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
float3* fsrc = 0;
fsrc = new float3[width*height];
//cout << "fsrc = " << (long)fsrc << endl;
for(int i = 0; i < width*height; ++i)
{
fsrc[i].x = src[i].r;
fsrc[i].y = src[i].g;
fsrc[i].z = src[i].b;
}
//printf("%.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
hipMemcpy(tmp, fsrc, sizeof(float3)*width*height, hipMemcpyHostToDevice);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
// structure_tensor
hipLaunchKernelGGL(( cu_structure_tensor), dim3(dimGrid), dim3(dimBlock), 0, 0, tmp, st, width, height);
err = hipGetLastError();
if(err)
printf("line : %d, %s\n", __LINE__, hipGetErrorString(err));
//
hipMemcpy(fsrc, st, sizeof(float3)*width*height, hipMemcpyDeviceToHost);
//printf("%.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
// print(fsrc, width, height);
err = hipGetLastError();
if(err)
printf("%d %s\n", err, hipGetErrorString(err));
// Creat gauss kernal
float* kernel = NULL;
int halfWidth;
creat_gauss_kernel(&kernel, sigma, &halfWidth);
//
//printd(kernel, (2*halfWidth+1)*(2*halfWidth+1));
//halfWidth_d = halfWidth;
float *kernel_d;
hipMalloc((void**)&kernel_d, sizeof(float)*(2*halfWidth+1)*(2*halfWidth+1));
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
hipMemcpy(kernel_d, kernel, sizeof(float)*(2*halfWidth+1)*(2*halfWidth+1), hipMemcpyHostToDevice);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
printf("line : %d, dimGrid %d %d \n", __LINE__ , dimGrid.x, dimGrid.y);
hipLaunchKernelGGL(( cu_gauss_filter), dim3(dimGrid), dim3(dimBlock), 0, 0, st, sigma, sst, width, height, halfWidth, kernel_d);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
hipMemcpy(fsrc, sst, sizeof(float3)*width*height, hipMemcpyDeviceToHost);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
printf("line : %d, error = %d %.6f %.6f %.6f\n", __LINE__, err, fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
hipLaunchKernelGGL(( cu_tangent_flow_map), dim3(dimGrid), dim3(dimBlock), 0, 0, sst, sigma, ftfm, width, height);
err = hipGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
hipMemcpy(fsrc, ftfm, sizeof(float3)*width*height, hipMemcpyDeviceToHost);
err = hipGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
printf("%.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
// rgb
hipLaunchKernelGGL(( cu_rgb2lab), dim3(dimGrid), dim3(dimBlock), 0, 0, tmp,lab,width, height);
err = hipGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
hipMemcpy(fsrc, lab, sizeof(float3)*width*height, hipMemcpyDeviceToHost);
err = hipGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
printf("%.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
hipLaunchKernelGGL(( cu_orientation_aligned_bilateral_filter), dim3(dimGrid), dim3(dimBlock), 0, 0, lab, ftfm, midlab, tmp, 4, 3.0f, 0.0425f, width, height);
err = hipGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
hipMemcpy(fsrc, midlab, sizeof(float3)*width*height, hipMemcpyDeviceToHost);
err = hipGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
printf(" %.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
hipLaunchKernelGGL(( cu_lab2rgb), dim3(dimGrid), dim3(dimBlock), 0, 0, midlab,rgb,width, height);
err = hipGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
hipMemcpy(fsrc, rgb, sizeof(float3)*width*height, hipMemcpyDeviceToHost);
//err = hipGetLastError();
//cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << hipGetErrorString(err) << endl;
printf(" %.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
// clean up
hipMemcpy(fsrc, rgb, sizeof(float3)*width*height, hipMemcpyDeviceToHost);
cleanup(fsrc, dest,width*height);
printf("%.6f %.6f %.6f\n", fsrc[512].x,fsrc[512].y, fsrc[512].z);
// release
FREE:
cudaSafeFree((void**)&st);
cudaSafeFree((void**)&lab);
cudaSafeFree((void**)&rgb);
cudaSafeFree((void**)&midlab);
cudaSafeFree((void**)&tmp);
cudaSafeFree((void**)&sst);
cudaSafeFree((void**)&ftfm);
cudaSafeFree((void**)&kernel_d);
delete[] fsrc;
delete[] kernel;
gettimeofday (&ts , &tz);
printf("sec; %ld\n", ts.tv_sec - sec);
printf("usec; %ld\n",ts.tv_usec - usec);
}
| d50d5c85d472af17c6dd36e26636ef7415d0b007.cu | #include<cstdio>
#include<cstdlib>
#include<cuda.h>
#include"vector.h"
#include <iostream>
#include<sys/time.h>
using namespace std;
/*
*/
//Bilinear interpolation
#define INTERPOLATE_IMAGE 0
#define INTERPOLATE_NORM 1
#define EPLISION 0.0000001f
/*__device__ int halfWidth_d;*/
/*__device__ float* kernel_d = NULL;*/
__device__
float cu_dabs( float d){
return d>0?d:-d;
}
__device__
float2 abs2( float2 c )
{
float2 a = { cu_dabs( c.x ), cu_dabs( c.y ) };
return a;
}
__device__
float length3( float3 c){
return sqrtf ( c.x * c.x + c.y * c.y + c.z * c.z );
}
__device__
float3 normalize( float3 c ){
float len = length3(c);
float3 n;
if(fabs(len)>EPLISION){
n.x = c.x / len;
n.y = c.y / len;
n.z = c.z / len;
}else{
}
return n;
}
__device__
float dclamp(float c, float min = 0.0 , float max = 1.0 ){
return c < min ? min : ( c > max ? max : c );
}
__device__
float3 clamp(float3 c, float min = 0.0 , float max = 1.0 ){
float3 tmp;
tmp.x = c.x < min ? min : ( c.x > max ? max : c.x );
tmp.y = c.y < min ? min : ( c.y > max ? max : c.y );
tmp.z = c.z < min ? min : ( c.z > max ? max : c.z );
return tmp;
}
__device__
float3 bilinear_interpolate( float3* img, float2 p, int width, int height, int flag = INTERPOLATE_IMAGE )
{
// const unsigned int x = blockIdx.x;
// const unsigned int y = threadIdx.x;
float2 q = { dclamp( p.x, 0.0, width - 1.1 ), dclamp( p.y, 0.0, height - 1.1 ) };//prevent being out of map
int qx = int ( q.x );
int qy = int ( q.y );
float tx = qx + 1 -q.x;
float ty = qy + 1 -q.y;
float3 result;
switch (flag)
{
case INTERPOLATE_IMAGE:
result.x = ( img[ qy * width + qx ].x * tx + img[ qy * width + qx + 1 ].x * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].x * tx + img[ ( qy + 1 ) * width + qx + 1 ].x * ( 1 - tx ) ) * ( 1 - ty );
result.y = ( img[ qy * width + qx ].y * tx + img[ qy * width + qx + 1 ].y * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].y * tx + img[ ( qy + 1 ) * width + qx + 1 ].y * ( 1 - tx ) ) * ( 1 - ty );
result.z = ( img[ qy * width + qx ].z * tx + img[ qy * width + qx + 1 ].z * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].z * tx + img[ ( qy + 1 ) * width + qx + 1 ].z * ( 1 - tx ) ) * ( 1 - ty );
break;
case INTERPOLATE_NORM:
result.x = ( img[ qy * width + qx ].x * tx + img[ qy * width + qx + 1 ].x * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].x * tx + img[ ( qy + 1 ) * width + qx + 1 ].x * ( 1 - tx ) ) * ( 1 - ty );
result.y = ( img[ qy * width + qx ].y * tx + img[ qy * width + qx + 1 ].y * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].y * tx + img[ ( qy + 1 ) * width + qx + 1 ].y * ( 1 - tx ) ) * ( 1 - ty );
result.z = ( img[ qy * width + qx ].z * tx + img[ qy * width + qx + 1 ].z * ( 1 - tx ) ) * ty +
( img[ ( qy + 1 ) * width + qx ].z * tx + img[ ( qy + 1 ) * width + qx + 1 ].z * ( 1 - tx ) ) * ( 1 - ty );
}
return result;
}
// __device__
// float length( float3 c )
// {
// return sqrtf ( c.x * c.x + c.y * c.y + c.z * c.z );
// }
__global__
void cu_rgb2lab(float3* src, float3* dest, int width, int height){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height) return;
float3 c, tmp, xyz, n, v, lab;
c = src[ y * width + x];
tmp.x = ( c.x > 0.04045 ) ? powf( ( c.x + 0.055 ) / 1.055, 2.4 ) : c.x / 12.92;
tmp.y = ( c.y > 0.04045 ) ? powf( ( c.y + 0.055 ) / 1.055, 2.4 ) : c.y / 12.92,
tmp.z = ( c.z > 0.04045 ) ? powf( ( c.z + 0.055 ) / 1.055, 2.4 ) : c.z / 12.92;
xyz.x = 100.0 * ( tmp.x * 0.4124 + tmp.y * 0.3576 + tmp.z * 0.1805 ) ;
xyz.y = 100.0 * ( tmp.x * 0.2126 + tmp.y * 0.7152 + tmp.z * 0.0722 ) ;
xyz.z = 100.0 * ( tmp.x * 0.0193 + tmp.y * 0.1192 + tmp.z * 0.9505 ) ;
n.x = xyz.x / 95.047;
n.y = xyz.y / 100;
n.z = xyz.z / 108.883;
v.x = ( n.x > 0.008856 ) ? powf( n.x, 1.0 / 3.0 ) : ( 7.787 * n.x ) + ( 16.0 / 116.0 );
v.y = ( n.y > 0.008856 ) ? powf( n.y, 1.0 / 3.0 ) : ( 7.787 * n.y ) + ( 16.0 / 116.0 );
v.z = ( n.z > 0.008856 ) ? powf( n.z, 1.0 / 3.0 ) : ( 7.787 * n.z ) + ( 16.0 / 116.0 );
lab.x = ( 116.0 * v.y ) - 16.0;
lab.y = 500.0 * ( v.x - v.y );
lab.z = 200.0 * ( v.y - v.z );
dest[ y * width + x ].x = lab.x / 100.0;
dest[ y * width + x ].y = 0.5 + 0.5 * ( lab.y / 127.0 );
dest[ y * width + x ].z = 0.5 + 0.5 * ( lab.z / 127.0 );
}
__global__
void cu_lab2rgb(float3* src, float3* dest, int width, int height){
// const int x = blockIdx.x;
// const int y = threadIdx.x;
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height) return;
float3 c, tmp, xyz, r, v, lab;
c = clamp ( src[ y * width + x] );
lab.x = 100.0 * c.x;
lab.y = 2.0 * 127.0 * ( c.y - 0.5 );
lab.z = 2.0 * 127.0 * ( c.z - 0.5 );
tmp.y = ( lab.x + 16.0 ) / 116.0;
tmp.x = lab.y / 500.0 + tmp.y;
tmp.z = tmp.y - lab.z / 200.0;
xyz.x = 95.047 * (( tmp.x > 0.206897 ) ? tmp.x * tmp.x * tmp.x : ( tmp.x -16.0 / 116.0 ) / 7.787);
xyz.y = 100.000 * (( tmp.y > 0.206897 ) ? tmp.y * tmp.y * tmp.y : ( tmp.y -16.0 / 116.0 ) / 7.787);
xyz.z = 108.883 * (( tmp.z > 0.206897 ) ? tmp.z * tmp.z * tmp.z : ( tmp.z -16.0 / 116.0 ) / 7.787);
v.x = ( xyz.x * 3.2406 + xyz.y * -1.5372 + xyz.z * -0.4986 ) / 100.0;
v.y = ( xyz.x * -0.9689 + xyz.y * 1.8758 + xyz.z * 0.0415 ) / 100.0;
v.z = ( xyz.x * 0.0557 + xyz.y * -0.2040 + xyz.z * 1.0570 ) / 100.0;
r.x = ( v.x > 0.0031308 ) ? (( 1.055 * powf( v.x, (1.0 / 2.4 ))) - 0.055 ) : 12.92 * v.x;
r.y = ( v.y > 0.0031308 ) ? (( 1.055 * powf( v.y, (1.0 / 2.4 ))) - 0.055 ) : 12.92 * v.y;
r.z = ( v.z > 0.0031308 ) ? (( 1.055 * powf( v.z, (1.0 / 2.4 ))) - 0.055 ) : 12.92 * v.z;
dest[ y * width + x ] = clamp( r );
}
__global__
void cu_structure_tensor(float3* src, float3* st, int width, int height){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height || x<0 || y<0) return;
float3 u,v;
int ym1 = ( y - 1 ) < 0 ? 0 : ( y - 1 );
int xm1 = ( x - 1 ) < 0 ? 0 : ( x - 1 );
int yp1 = ( y + 1 ) > ( height - 1 ) ? ( height - 1 ) : ( y + 1 );
int xp1 = ( x + 1 ) > ( width - 1 ) ? ( width - 1 ) : ( x + 1 );
// x gradient
u.x = (
-1.0 * src[ ym1 * width + xm1 ].x +
-2.0 * src[ y * width + xm1 ].x +
-1.0 * src[ yp1 * width + xm1 ].x +
+1.0 * src[ ym1 * width + xp1 ].x +
+2.0 * src[ y * width + xp1 ].x +
+1.0 * src[ yp1 * width + xp1 ].x ) / 4.0;
u.y = (
-1.0 * src[ ym1 * width + xm1 ].y +
-2.0 * src[ y * width + xm1 ].y +
-1.0 * src[ yp1 * width + xm1 ].y +
+1.0 * src[ ym1 * width + xp1 ].y +
+2.0 * src[ y * width + xp1 ].y +
+1.0 * src[ yp1 * width + xp1 ].y ) / 4.0;
u.z = (
-1.0 * src[ ym1 * width + xm1 ].z +
-2.0 * src[ y * width + xm1 ].z +
-1.0 * src[ yp1 * width + xm1 ].z +
+1.0 * src[ ym1 * width + xp1 ].z +
+2.0 * src[ y * width + xp1 ].z +
+1.0 * src[ yp1 * width + xp1 ].z ) / 4.0;
//y gradient
v.x = (
-1.0 * src[ ym1 * width + xm1 ].x +
-2.0 * src[ ym1 * width + x ].x +
-1.0 * src[ ym1 * width + xp1 ].x +
+1.0 * src[ yp1 * width + xm1 ].x +
+2.0 * src[ yp1 * width + x ].x +
+1.0 * src[ yp1 * width + xp1 ].x ) / 4.0;
v.y = (
-1.0 * src[ ym1 * width + xm1 ].y +
-2.0 * src[ ym1 * width + x ].y +
-1.0 * src[ ym1 * width + xp1 ].y +
+1.0 * src[ yp1 * width + xm1 ].y +
+2.0 * src[ yp1 * width + x ].y +
+1.0 * src[ yp1 * width + xp1 ].y ) / 4.0;
v.z = (
-1.0 * src[ ym1 * width + xm1 ].z +
-2.0 * src[ ym1 * width + x ].z +
-1.0 * src[ ym1 * width + xp1 ].z +
+1.0 * src[ yp1 * width + xm1 ].z +
+2.0 * src[ yp1 * width + x ].z +
+1.0 * src[ yp1 * width + xp1 ].z ) / 4.0;
//structure tensor
st[ y * width + x ].x = u.x * u.x + u.y * u.y + u.z * u.z;
st[ y * width + x ].y = v.x * v.x + v.y * v.y + v.z * v.z;
st[ y * width + x ].z = u.x * v.x + u.y * v.y + u.z * v.z;
}
__global__
void cu_gauss_filter(float3* src, float sigma, float3* dest, int width, int height, int halfWidth_d, float *kernel_d){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height || x<0 || y<0) return;
float3 result = {0.,0.,0.};
float norm = 0.;
// parallel
for ( int i = -halfWidth_d; i<= halfWidth_d; ++i )
{
for ( int j = -halfWidth_d; j<= halfWidth_d; ++j )
{
//if (blockIdx.x == 0 && blockIdx.y == 0)
/*printf("id = %d, (%f, %f, %f), norm = %f\n", y * width + x, dest[ y * width + x ].x, dest[ y * width + x ].y, dest[ y * width + x ].z, norm);*/
/*printf("bidx.x = %d, bidx.y = %d\n", blockIdx.x, blockIdx.y);*/
if ( ( y + i ) >= 0 && ( x + j ) >= 0 && ( y + i ) < height && ( x + j ) < width )// judge whether out of map
{
result.x += src[ ( y + i ) * width + x + j ].x * kernel_d[ ( i + halfWidth_d ) * ( halfWidth_d * 2 + 1 ) + j + halfWidth_d ];
result.y += src[ ( y + i ) * width + x + j ].y * kernel_d[ ( i + halfWidth_d ) * ( halfWidth_d * 2 + 1 ) + j + halfWidth_d ];
result.z += src[ ( y + i ) * width + x + j ].z * kernel_d[ ( i + halfWidth_d ) * ( halfWidth_d * 2 + 1 ) + j + halfWidth_d ];
norm += kernel_d[ ( i + halfWidth_d ) * ( halfWidth_d * 2 + 1 ) + j + halfWidth_d ];
}
}
}
if(fabs(norm)>EPLISION){
dest[ y * width + x ].x = result.x / norm;
dest[ y * width + x ].y = result.y / norm;
dest[ y * width + x ].z = result.z / norm;
}else{
dest[ y * width + x ].x = 0.;
dest[ y * width + x ].y = 0.;
dest[ y * width + x ].z = 0.;
}
/*if (blockIdx.x == 0 && blockIdx.y == 0)
/*{*/
/*printf("id = %d, (%f, %f, %f), norm = %f\n", y * width + x, dest[ y * width + x ].x, dest[ y * width + x ].y, dest[ y * width + x ].z, norm);*/
/*}*/
//
/*dest[ y * width + x ].x = 0.123;*/
/*dest[ y * width + x ].y = 0.123;*/
/*dest[ y * width + x ].z = 0.123;*/
}
__global__
void cu_tangent_flow_map(float3* sst, float sigma, float3* tfm, int width, int height ){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height) return;
float3 g = sst[ y * width + x ];
float lambda1 = 0.5 * ( g.y + g.x + sqrt( g.y * g.y - 2.0 * g.x * g.y + g.x * g.x + 4.0 * g.z * g.z ) );
float3 v = { g.x - lambda1, g.z, 0.0 };
if ( length3( v ) > 0.0 )
{
tfm[ y * width + x ] = normalize( v );
tfm[ y * width + x ].z = sqrt( lambda1 );//may be used for weight???????????????????????????????????????????
}
else//ensure no zero floattor
{
tfm[ y * width + x ].x = 0.0;
tfm[ y * width + x ].y = 1.0;
tfm[ y * width + x ].z = 0.0;
}
}
__global__
void cu_orientation_aligned_bilateral_filter( float3*src, float3* tfm, float3*dest, float3* tmp, int n, float sigma_d, float sigma_r, int width, int height){
const int x = blockIdx.x*blockDim.x+threadIdx.x;
const int y = blockIdx.y*blockDim.y+threadIdx.y;
if(x>=width || y>=height) return;
float twoSigmaD2 = 2.0 * sigma_d * sigma_d;
float twoSigmaR2 = 2.0 * sigma_r * sigma_r;
for(int i = 0; i < n; ++i)
{
for(int pass = 0; pass < 2; ++pass)
{
float2 t = { tfm[ y * width + x ].x, tfm[ y * width + x ].y };
float2 tt = { t.y, -t.x };
float2 dir = ( pass == 0 ) ? tt : t;
float2 dabs = abs2( dir );
float ds = 1.0 / ( ( dabs.x > dabs.y ) ? dabs.x : dabs.y );
float3* midsrc = ( i == 0 ? ( pass == 0 ? src : tmp ) : ( pass == 0 ? dest : tmp ) );
float3 center = midsrc[ y * width + x ];
float3 sum = center;
float norm = 1.0;
float halfWidth = 2.0 * sigma_d;
for ( float d = ds; d <= halfWidth; d += ds )
{
float2 p0 = { x + d * dir.x, y + d * dir.y };
float3 c0 = bilinear_interpolate( midsrc, p0, width, height );
float2 p1 = { x - d * dir.x, y - d * dir.y };
float3 c1 = bilinear_interpolate( midsrc, p1, width, height );
float3 d0 = { c0.x -center.x, c0.y - center.y, c0.z - center.z };
float3 d1 = { c1.x -center.x, c1.y - center.y, c1.z - center.z };
float e0 = length3( d0 );
float e1 = length3( d1 );
float kerneld = expf( -d * d / twoSigmaD2 );
float kernele0 = expf( - e0 * e0 / twoSigmaR2 );
float kernele1 = expf( - e1 * e1 / twoSigmaR2 );
norm += kerneld * kernele0;
norm += kerneld * kernele1;
//printf("%lf, %lf, %lf\n", kerneld, kernele0, kernele1);
sum.x += kerneld * kernele0 * c0.x;
sum.x += kerneld * kernele1 * c1.x;
sum.y += kerneld * kernele0 * c0.y;
sum.y += kerneld * kernele1 * c1.y;
sum.z += kerneld * kernele0 * c0.z;
sum.z += kerneld * kernele1 * c1.z;
}
if(fabs(norm)>EPLISION){
sum.x /= norm;
sum.y /= norm;
sum.z /= norm;
}else{
sum.x /= 1;
sum.y /= 1;
sum.z /= 1;
}
( pass == 0 ? tmp : dest )[ y * width + x ] = sum ;
}
}
}
void cudaSafeFree(void** p){
if(*p){
cudaFree(p);
}
}
void cleanup(float3* fsrc, vec3* dest, int size){
if(fsrc&&dest){
printf("clean up\n");
for(int i = 0; i < size; ++i)
{
dest[i].r = fsrc[i].x;
dest[i].g = fsrc[i].y;
dest[i].b = fsrc[i].z;
}
}else{
printf("ZERO\n");
}
}
void print(float3* fsrc, int width, int height){
if(fsrc){
for(int i = 0; i < height; ++i)
{
for(int j = 0; j < width; ++j)
{
fprintf(stdout, "(%.6f %.6f %.6f) ", fsrc[i*width+j].x, fsrc[i*width+j].y, fsrc[i*width+j].z);
}
fprintf(stdout, "\n");
}
}
}
void printd(float* d,int size){
if(d){
printf("%d\n",size);
for(int i = 0; i < size; ++i)
{
fprintf(stdout, "%.6f ", d[i]);
}
}else{
printf("ZERO\n");
}
}
void creat_gauss_kernel(float** ker, float sigma, int* half){
float twoSigma2 = 2.0 * sigma * sigma;
int halfWidth = int ( ceil( 2.0 * sigma ) );
cout << "halfwidth = " << halfWidth << endl;
*half = halfWidth;
float* kernel = new float[(2*halfWidth+1)*(2*halfWidth+1)];
*ker = kernel;
float norm = 0.0;
for ( int i = -halfWidth; i <= halfWidth; i++ )
{
for ( int j = -halfWidth; j <= halfWidth; j++)
{
norm += kernel[ ( i + halfWidth ) * ( halfWidth * 2 + 1 ) + j + halfWidth ] = exp( - ( i * i + j * j ) / twoSigma2 );
}
}
for ( int i = -halfWidth; i <= halfWidth; i++ )
{
for ( int j = -halfWidth; j <= halfWidth; j++)
{
kernel[ ( i + halfWidth ) * ( halfWidth * 2 + 1 ) + j + halfWidth ] /= norm ;
}
}
//printd(kernel, (2*halfWidth+1)*(2*halfWidth+1));
}
#define NO 900
void cu_flowabs(vec3* src, vec3* dest, float sigma, vec3* tfm, int width, int height){
if(!(src && dest && tfm)){
return;
}
if(width*height>1024*1024){
printf("Too Large Image\n");
return;
}
struct timeval ts;
struct timezone tz;
gettimeofday (&ts , &tz);
long sec = ts.tv_sec;
long usec = ts.tv_usec;
dim3 dimBlock(16,16);
dim3 dimGrid((width+dimBlock.x-1)/dimBlock.x, (height+dimBlock.y-1)/dimBlock.y);
cout << "dim = (" << dimGrid.x << ", " << dimGrid.y << ")" << endl;
float3* st;
cudaMalloc((void**)&st, sizeof(float3)*width*height); // structure tensor
cudaError_t err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
float3* tmp;
cudaMalloc((void**)&tmp, sizeof(float3)*width*height);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
float3* sst; // smoothed structure tensor
cudaMalloc((void**)&sst, sizeof(float3)*width*height);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
float3* ftfm; //
cudaMalloc((void**)&ftfm, sizeof(float3)*width*height);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
float3* lab;
cudaMalloc((void**)&lab, sizeof(float3)*width*height);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
float3* midlab;
cudaMalloc((void**)&midlab, sizeof(float3)*width*height);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
float3* rgb;
cudaMalloc((void**)&rgb, sizeof(float3)*width*height);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
float3* fsrc = 0;
fsrc = new float3[width*height];
//cout << "fsrc = " << (long)fsrc << endl;
for(int i = 0; i < width*height; ++i)
{
fsrc[i].x = src[i].r;
fsrc[i].y = src[i].g;
fsrc[i].z = src[i].b;
}
//printf("%.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
cudaMemcpy(tmp, fsrc, sizeof(float3)*width*height, cudaMemcpyHostToDevice);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
// structure_tensor
cu_structure_tensor<<<dimGrid, dimBlock>>>(tmp, st, width, height);
err = cudaGetLastError();
if(err)
printf("line : %d, %s\n", __LINE__, cudaGetErrorString(err));
//
cudaMemcpy(fsrc, st, sizeof(float3)*width*height, cudaMemcpyDeviceToHost);
//printf("%.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
// print(fsrc, width, height);
err = cudaGetLastError();
if(err)
printf("%d %s\n", err, cudaGetErrorString(err));
// Creat gauss kernal
float* kernel = NULL;
int halfWidth;
creat_gauss_kernel(&kernel, sigma, &halfWidth);
//
//printd(kernel, (2*halfWidth+1)*(2*halfWidth+1));
//halfWidth_d = halfWidth;
float *kernel_d;
cudaMalloc((void**)&kernel_d, sizeof(float)*(2*halfWidth+1)*(2*halfWidth+1));
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
cudaMemcpy(kernel_d, kernel, sizeof(float)*(2*halfWidth+1)*(2*halfWidth+1), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
printf("line : %d, dimGrid %d %d \n", __LINE__ , dimGrid.x, dimGrid.y);
cu_gauss_filter<<<dimGrid, dimBlock>>>(st, sigma, sst, width, height, halfWidth, kernel_d);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
cudaMemcpy(fsrc, sst, sizeof(float3)*width*height, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
printf("line : %d, error = %d %.6f %.6f %.6f\n", __LINE__, err, fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
cu_tangent_flow_map<<<dimGrid, dimBlock>>>(sst, sigma, ftfm, width, height);
err = cudaGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
cudaMemcpy(fsrc, ftfm, sizeof(float3)*width*height, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
printf("%.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
// rgb
cu_rgb2lab<<<dimGrid, dimBlock>>>(tmp,lab,width, height);
err = cudaGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
cudaMemcpy(fsrc, lab, sizeof(float3)*width*height, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
printf("%.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
cu_orientation_aligned_bilateral_filter<<<dimGrid, dimBlock>>>(lab, ftfm, midlab, tmp, 4, 3.0f, 0.0425f, width, height);
err = cudaGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
cudaMemcpy(fsrc, midlab, sizeof(float3)*width*height, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
printf(" %.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
cu_lab2rgb<<<dimGrid, dimBlock>>>(midlab,rgb,width, height);
err = cudaGetLastError();
if(err)
cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
cudaMemcpy(fsrc, rgb, sizeof(float3)*width*height, cudaMemcpyDeviceToHost);
//err = cudaGetLastError();
//cout << "LINE = " << __LINE__ << ", err No = " << err << ", " << cudaGetErrorString(err) << endl;
printf(" %.6f %.6f %.6f\n", fsrc[NO].x,fsrc[NO].y, fsrc[NO].z);
// clean up
cudaMemcpy(fsrc, rgb, sizeof(float3)*width*height, cudaMemcpyDeviceToHost);
cleanup(fsrc, dest,width*height);
printf("%.6f %.6f %.6f\n", fsrc[512].x,fsrc[512].y, fsrc[512].z);
// release
FREE:
cudaSafeFree((void**)&st);
cudaSafeFree((void**)&lab);
cudaSafeFree((void**)&rgb);
cudaSafeFree((void**)&midlab);
cudaSafeFree((void**)&tmp);
cudaSafeFree((void**)&sst);
cudaSafeFree((void**)&ftfm);
cudaSafeFree((void**)&kernel_d);
delete[] fsrc;
delete[] kernel;
gettimeofday (&ts , &tz);
printf("sec; %ld\n", ts.tv_sec - sec);
printf("usec; %ld\n",ts.tv_usec - usec);
}
|
bdc49580c405401f518051a72b33c20a562ee27c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void hello_kernel()
{
int threadInBlock = threadIdx.x;
int blockIndex = blockIdx.x;
int blockSize = blockDim.x;
int threadIndex = blockIndex * blockSize + threadInBlock;
printf("Hello from GPU thread %d = %d * %d + %d\n", threadIndex, blockIndex, blockSize, threadInBlock);
}
int main()
{
int numThreadsInBlock = 32;
int numBlocks = 3;
hipLaunchKernelGGL(( hello_kernel), dim3(numBlocks), dim3(numThreadsInBlock), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| bdc49580c405401f518051a72b33c20a562ee27c.cu | #include <stdio.h>
__global__ void hello_kernel()
{
int threadInBlock = threadIdx.x;
int blockIndex = blockIdx.x;
int blockSize = blockDim.x;
int threadIndex = blockIndex * blockSize + threadInBlock;
printf("Hello from GPU thread %d = %d * %d + %d\n", threadIndex, blockIndex, blockSize, threadInBlock);
}
int main()
{
int numThreadsInBlock = 32;
int numBlocks = 3;
hello_kernel<<<numBlocks, numThreadsInBlock>>>();
cudaDeviceSynchronize();
return 0;
}
|
72e58f482bc45afccbd125ad0c697d59e724ab60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "advection.h"
#ifndef BLOCK_SIZE_3D
#define BLOCK_SIZE_3D 8
#endif
#ifndef BLOCK_SIZE_1D
#define BLOCK_SIZE_1D 256
#endif
__device__ unsigned int roundUp(float val) {
return (unsigned int)(val/LATTICE_SIZE)+1;
}
__device__ unsigned int roundDown(float val) {
return (unsigned int)(val/LATTICE_SIZE);
}
__device__ bool checkBound(unsigned int x0,unsigned int x1,unsigned int y0,unsigned int y1,unsigned int z0,unsigned int z1,unsigned int size_x,unsigned int size_y,unsigned int size_z) {
return (x0<size_x&&x1<size_x)&&
(y0<size_y&&y1<size_y)&&
(z0<size_z&&z1<size_z);
}
__global__ void interpolate(float *pos, float *field, float *val, unsigned int n, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<n) {
unsigned int x_0=roundDown(pos[3*i]), x_1=roundUp(pos[3*i]);
unsigned int y_0=roundDown(pos[3*i+1]), y_1=roundUp(pos[3*i+1]);
unsigned int z_0=roundDown(pos[3*i+2]), z_1=roundUp(pos[3*i+2]);
if(checkBound(x_0,x_1,y_0,y_1,z_0,z_1,size_x,size_y,size_z)) {
float x_d = (pos[3*i]-x_0)/(x_1-pos[3*i]);
float y_d = (pos[3*i+1]-y_0)/(y_1-pos[3*i+1]);
float z_d = (pos[3*i+2]-z_0)/(z_1-pos[3*i+2]);
float c8[8];
float c4[4];
float c2[2];
for(int l=0;l<3;l++) {
c8[0] = field[((z_0*size_y+y_0)*size_x+x_1)*3+l];
c8[1] = field[((z_1*size_y+y_0)*size_x+x_1)*3+l];
c8[2] = field[((z_0*size_y+y_0)*size_x+x_0)*3+l];
c8[3] = field[((z_1*size_y+y_0)*size_x+x_0)*3+l];
c8[4] = field[((z_0*size_y+y_1)*size_x+x_1)*3+l];
c8[5] = field[((z_1*size_y+y_1)*size_x+x_1)*3+l];
c8[6] = field[((z_0*size_y+y_1)*size_x+x_0)*3+l];
c8[7] = field[((z_1*size_y+y_1)*size_x+x_0)*3+l];
for(int j=0;j<4;j++)
c4[j] = c8[j]*(1-x_d)+c8[j+4]*x_d;
for(int j=0;j<2;j++)
c2[j] = c4[j]*(1-y_d)+c4[j+2]*y_d;
val[3*i+l] = c2[0]*(1-z_d)+c2[1]*z_d;
}
} else {
val[3*i]=0;
val[3*i+1]=0;
val[3*i+2]=0;
}
}
}
__global__ void reverseStep(float *field, float *pos, unsigned int n, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int z = i/(size_x*size_y);
unsigned int y = (i-z*(size_x*size_y))/size_x;
unsigned int x = i-(z*size_y+y)*size_x;
if(i<n) {
pos[3*i] = x*LATTICE_SIZE - field[((z*size_y+y)*size_x+x)*3];
pos[3*i+1] = x*LATTICE_SIZE - field[((z*size_y+y)*size_x+x)*3+1];
pos[3*i+2] = x*LATTICE_SIZE - field[((z*size_y+y)*size_x+x)*3+2];
}
}
void advection(float *&vec_field, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
unsigned int n = size_x*size_y*size_z;
float *pos, *temp;
hipMalloc((void**) &pos, n*3*sizeof(float));
hipMalloc((void**) &temp, n*3*sizeof(float));
hipDeviceSynchronize();
dim3 dimBlock(BLOCK_SIZE_1D,1,1);
dim3 dimGrid((n-1)/BLOCK_SIZE_1D+1,1,1);
hipLaunchKernelGGL(( reverseStep), dim3(dimGrid), dim3(dimBlock), 0, 0, vec_field, pos, n, size_x, size_y, size_z);
hipDeviceSynchronize();
hipLaunchKernelGGL(( interpolate), dim3(dimGrid), dim3(dimBlock), 0, 0, pos, vec_field, temp, n, size_x, size_y, size_z);
hipDeviceSynchronize();
hipLaunchKernelGGL(( applyVelocityBoundary), dim3(dimGrid), dim3(dimBlock), 0, 0, temp, size_x, size_y, size_z);
float *temp_swap = vec_field;
vec_field = temp;
temp = temp_swap;
//vectorCopy<<<1000,(n*3-1)/1000+1>>>(temp, vec_field, n*3);
hipError_t cuda_ret = hipFree(pos);
if(cuda_ret != hipSuccess)
printf("%s\n",hipGetErrorString(cuda_ret)); fflush(stdout);
cuda_ret = hipFree(temp);
if(cuda_ret != hipSuccess)
printf("%s\n",hipGetErrorString(cuda_ret)); fflush(stdout);
hipDeviceSynchronize();
}
| 72e58f482bc45afccbd125ad0c697d59e724ab60.cu | #include <stdio.h>
#include "advection.h"
#ifndef BLOCK_SIZE_3D
#define BLOCK_SIZE_3D 8
#endif
#ifndef BLOCK_SIZE_1D
#define BLOCK_SIZE_1D 256
#endif
__device__ unsigned int roundUp(float val) {
return (unsigned int)(val/LATTICE_SIZE)+1;
}
__device__ unsigned int roundDown(float val) {
return (unsigned int)(val/LATTICE_SIZE);
}
__device__ bool checkBound(unsigned int x0,unsigned int x1,unsigned int y0,unsigned int y1,unsigned int z0,unsigned int z1,unsigned int size_x,unsigned int size_y,unsigned int size_z) {
return (x0<size_x&&x1<size_x)&&
(y0<size_y&&y1<size_y)&&
(z0<size_z&&z1<size_z);
}
__global__ void interpolate(float *pos, float *field, float *val, unsigned int n, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<n) {
unsigned int x_0=roundDown(pos[3*i]), x_1=roundUp(pos[3*i]);
unsigned int y_0=roundDown(pos[3*i+1]), y_1=roundUp(pos[3*i+1]);
unsigned int z_0=roundDown(pos[3*i+2]), z_1=roundUp(pos[3*i+2]);
if(checkBound(x_0,x_1,y_0,y_1,z_0,z_1,size_x,size_y,size_z)) {
float x_d = (pos[3*i]-x_0)/(x_1-pos[3*i]);
float y_d = (pos[3*i+1]-y_0)/(y_1-pos[3*i+1]);
float z_d = (pos[3*i+2]-z_0)/(z_1-pos[3*i+2]);
float c8[8];
float c4[4];
float c2[2];
for(int l=0;l<3;l++) {
c8[0] = field[((z_0*size_y+y_0)*size_x+x_1)*3+l];
c8[1] = field[((z_1*size_y+y_0)*size_x+x_1)*3+l];
c8[2] = field[((z_0*size_y+y_0)*size_x+x_0)*3+l];
c8[3] = field[((z_1*size_y+y_0)*size_x+x_0)*3+l];
c8[4] = field[((z_0*size_y+y_1)*size_x+x_1)*3+l];
c8[5] = field[((z_1*size_y+y_1)*size_x+x_1)*3+l];
c8[6] = field[((z_0*size_y+y_1)*size_x+x_0)*3+l];
c8[7] = field[((z_1*size_y+y_1)*size_x+x_0)*3+l];
for(int j=0;j<4;j++)
c4[j] = c8[j]*(1-x_d)+c8[j+4]*x_d;
for(int j=0;j<2;j++)
c2[j] = c4[j]*(1-y_d)+c4[j+2]*y_d;
val[3*i+l] = c2[0]*(1-z_d)+c2[1]*z_d;
}
} else {
val[3*i]=0;
val[3*i+1]=0;
val[3*i+2]=0;
}
}
}
__global__ void reverseStep(float *field, float *pos, unsigned int n, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int z = i/(size_x*size_y);
unsigned int y = (i-z*(size_x*size_y))/size_x;
unsigned int x = i-(z*size_y+y)*size_x;
if(i<n) {
pos[3*i] = x*LATTICE_SIZE - field[((z*size_y+y)*size_x+x)*3];
pos[3*i+1] = x*LATTICE_SIZE - field[((z*size_y+y)*size_x+x)*3+1];
pos[3*i+2] = x*LATTICE_SIZE - field[((z*size_y+y)*size_x+x)*3+2];
}
}
void advection(float *&vec_field, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
unsigned int n = size_x*size_y*size_z;
float *pos, *temp;
cudaMalloc((void**) &pos, n*3*sizeof(float));
cudaMalloc((void**) &temp, n*3*sizeof(float));
cudaDeviceSynchronize();
dim3 dimBlock(BLOCK_SIZE_1D,1,1);
dim3 dimGrid((n-1)/BLOCK_SIZE_1D+1,1,1);
reverseStep<<<dimGrid, dimBlock>>>(vec_field, pos, n, size_x, size_y, size_z);
cudaDeviceSynchronize();
interpolate<<<dimGrid, dimBlock>>>(pos, vec_field, temp, n, size_x, size_y, size_z);
cudaDeviceSynchronize();
applyVelocityBoundary<<<dimGrid, dimBlock>>>(temp, size_x, size_y, size_z);
float *temp_swap = vec_field;
vec_field = temp;
temp = temp_swap;
//vectorCopy<<<1000,(n*3-1)/1000+1>>>(temp, vec_field, n*3);
cudaError cuda_ret = cudaFree(pos);
if(cuda_ret != cudaSuccess)
printf("%s\n",cudaGetErrorString(cuda_ret)); fflush(stdout);
cuda_ret = cudaFree(temp);
if(cuda_ret != cudaSuccess)
printf("%s\n",cudaGetErrorString(cuda_ret)); fflush(stdout);
cudaDeviceSynchronize();
}
|
40496561f3f4e7ed3fd5f7750fc4473049b018eb.hip | // !!! This is a file automatically generated by hipify!!!
/* Norman Ponte; Joey Fernau
* annotation generation test
*/
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <getopt.h>
#include <string>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "../../lib/CycleTimer.h"
#define BBLOG(bbid) printf("%d,%d\n", blockIdx.x * blockDim.x + threadIdx.x, bbid)
extern float toBW(int bytes, float sec);
__device__ int test ( int x , int y , int z ) {
BBLOG(2);
int result = 0;
if (x == 0) {
BBLOG(3);
for (int i = 0; i < 1000000; i++)
result += y - z;
} else if (x == 1) {
BBLOG(4);
for (int i = 0; i < 1000000; i++)
result += y + z;
} else if (x == 2) {
BBLOG(5);
for (int i = 0; i < 1000000; i++)
result += y * z;
} else {
BBLOG(6);
for (int i = 0; i < 1000000; i++)
result += y / z;
}
BBLOG(7);
return result;
}
__global__ void
test_kernel(int N, float* result) {
BBLOG(0);
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
BBLOG(1);
result[index] = test(index % 4, index % 13, index % 7);
}
BBLOG(8);
}
void
mainCuda(int N, float* resultarray) {
// compute number of blocks and threads per block
const int threadsPerBlock = 32;
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
float* device_result;
hipMalloc((void **) &device_result, N * sizeof(float));
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//hipMemcpy(device_x, xarray, N * sizeof(float),
// hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, device_result);
hipDeviceSynchronize();
hipMemcpy(resultarray, device_result, N * sizeof(float),
hipMemcpyDeviceToHost);
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n",
errCode, hipGetErrorString(errCode));
}
hipFree(device_result);
}
// return GB/s
float toBW(int bytes, float sec) {
return static_cast<float>(bytes) / (1024. * 1024. * 1024.) / sec;
}
void mainCuda(int N, float* result);
int main(int argc, char** argv)
{
printf("tid,bb\n");
int N = std::atoi(argv[1]);
float* resultarray = new float[N];
mainCuda(N, resultarray);
return 0;
}
| 40496561f3f4e7ed3fd5f7750fc4473049b018eb.cu | /* Norman Ponte; Joey Fernau
* annotation generation test
*/
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <getopt.h>
#include <string>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "../../lib/CycleTimer.h"
#define BBLOG(bbid) printf("%d,%d\n", blockIdx.x * blockDim.x + threadIdx.x, bbid)
extern float toBW(int bytes, float sec);
__device__ int test ( int x , int y , int z ) {
BBLOG(2);
int result = 0;
if (x == 0) {
BBLOG(3);
for (int i = 0; i < 1000000; i++)
result += y - z;
} else if (x == 1) {
BBLOG(4);
for (int i = 0; i < 1000000; i++)
result += y + z;
} else if (x == 2) {
BBLOG(5);
for (int i = 0; i < 1000000; i++)
result += y * z;
} else {
BBLOG(6);
for (int i = 0; i < 1000000; i++)
result += y / z;
}
BBLOG(7);
return result;
}
__global__ void
test_kernel(int N, float* result) {
BBLOG(0);
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
BBLOG(1);
result[index] = test(index % 4, index % 13, index % 7);
}
BBLOG(8);
}
void
mainCuda(int N, float* resultarray) {
// compute number of blocks and threads per block
const int threadsPerBlock = 32;
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
float* device_result;
cudaMalloc((void **) &device_result, N * sizeof(float));
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//cudaMemcpy(device_x, xarray, N * sizeof(float),
// cudaMemcpyHostToDevice);
test_kernel<<<blocks, threadsPerBlock>>>(N, device_result);
cudaThreadSynchronize();
cudaMemcpy(resultarray, device_result, N * sizeof(float),
cudaMemcpyDeviceToHost);
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n",
errCode, cudaGetErrorString(errCode));
}
cudaFree(device_result);
}
// return GB/s
float toBW(int bytes, float sec) {
return static_cast<float>(bytes) / (1024. * 1024. * 1024.) / sec;
}
void mainCuda(int N, float* result);
int main(int argc, char** argv)
{
printf("tid,bb\n");
int N = std::atoi(argv[1]);
float* resultarray = new float[N];
mainCuda(N, resultarray);
return 0;
}
|
f03a15a763112b3f489ecc55225feb3b7da2a8e1.hip | // !!! This is a file automatically generated by hipify!!!
#include <THHUNN/THHUNN.h>
#include <THHUNN/common.h>
#include <TH/THHalf.h>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <THH/THHApply.cuh>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
template <typename Dtype, typename Acctype>
struct softmargin_functor
{
__host__ __device__ Acctype operator()(const Dtype& x, const Dtype& y) const
{
return log(1 + exp(ScalarConvert<Dtype, Acctype>::to(-x)*y));
}
};
template <typename Dtype, typename Acctype>
struct softmargin_no_reduce_functor
{
__host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *out) const
{
*out = ScalarConvert<Acctype, Dtype>::to(log(ScalarConvert<int, Acctype>::to(1)
+ exp(ScalarConvert<Dtype, Acctype>::to(-*x) * *y)));
}
};
template <typename Dtype, typename Acctype>
struct softmargin_updateGradInput_functor
{
const Acctype norm;
const Dtype gradOutput;
softmargin_updateGradInput_functor(Acctype norm_, Dtype gradOutput_) :
norm(norm_), gradOutput(gradOutput_) {}
__host__ __device__ Dtype operator()(const Dtype& x, const Dtype& y) const
{
Acctype temp = exp(ScalarConvert<Dtype, Acctype>::to(-x)*y);
return ScalarConvert<Acctype, Dtype>::to(-y*temp*norm/(ScalarConvert<int, Acctype>::to(1) + temp) * gradOutput);
}
};
template <typename Dtype, typename Acctype>
struct softmargin_updateGradInput_no_reduce_functor
{
__forceinline__ __host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *gradInput) const
{
Acctype temp = exp(ScalarConvert<Dtype, Acctype>::to(-*x) * *y);
*gradInput = ScalarConvert<Acctype, Dtype>::to(-*y * temp / (ScalarConvert<int, Acctype>::to(1) + temp));
}
};
#include <THHUNN/generic/SoftMarginCriterion.hip>
#include <THH/THHGenerateFloatTypes.h>
| f03a15a763112b3f489ecc55225feb3b7da2a8e1.cu | #include <THCUNN/THCUNN.h>
#include <THCUNN/common.h>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCApply.cuh>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
template <typename Dtype, typename Acctype>
struct softmargin_functor
{
__host__ __device__ Acctype operator()(const Dtype& x, const Dtype& y) const
{
return log(1 + exp(ScalarConvert<Dtype, Acctype>::to(-x)*y));
}
};
template <typename Dtype, typename Acctype>
struct softmargin_no_reduce_functor
{
__host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *out) const
{
*out = ScalarConvert<Acctype, Dtype>::to(log(ScalarConvert<int, Acctype>::to(1)
+ exp(ScalarConvert<Dtype, Acctype>::to(-*x) * *y)));
}
};
template <typename Dtype, typename Acctype>
struct softmargin_updateGradInput_functor
{
const Acctype norm;
const Dtype gradOutput;
softmargin_updateGradInput_functor(Acctype norm_, Dtype gradOutput_) :
norm(norm_), gradOutput(gradOutput_) {}
__host__ __device__ Dtype operator()(const Dtype& x, const Dtype& y) const
{
Acctype temp = exp(ScalarConvert<Dtype, Acctype>::to(-x)*y);
return ScalarConvert<Acctype, Dtype>::to(-y*temp*norm/(ScalarConvert<int, Acctype>::to(1) + temp) * gradOutput);
}
};
template <typename Dtype, typename Acctype>
struct softmargin_updateGradInput_no_reduce_functor
{
__forceinline__ __host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *gradInput) const
{
Acctype temp = exp(ScalarConvert<Dtype, Acctype>::to(-*x) * *y);
*gradInput = ScalarConvert<Acctype, Dtype>::to(-*y * temp / (ScalarConvert<int, Acctype>::to(1) + temp));
}
};
#include <THCUNN/generic/SoftMarginCriterion.cu>
#include <THC/THCGenerateFloatTypes.h>
|
df1b4784a60d5400ea9a37d4a1def9e8223e455c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 1000
#define THREADS_N 512
__global__ void VecAdd(int* DA, int* DB, int* DC)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x); // id_hebra + id_bloque x hebras_totales
if(i<=N){
DC[i] = DA[i] + DB[i];
}
}
int main()
{
hipFree(0);
int HA[N], HB[N], HC[N];
int *DA, *DB, *DC;
int i;
int size = N*sizeof(int);
hipError_t aM,bM,cM,aN,bN,cN,e_kernel; //Guardar errores
// reservamos espacio en la memoria global del device
aM = hipMalloc((void**)&DA, size);
printf(" hipMalloc DA: %s \n",hipGetErrorString(aM));
bM = hipMalloc((void**)&DB, size);
printf("hipMalloc DB: %s \n",hipGetErrorString(bM));
cM = hipMalloc((void**)&DC, size);
printf("hipMalloc DC: %s \n",hipGetErrorString(cM));
// inicializamos HA y HB
for (i=0; i<N; i++) {HA[i]=-i; HB[i] = 3*i;}
// copiamos HA y HB del host a DA y DB en el device, respectivamente
aN = hipMemcpy(DA, HA, size, hipMemcpyHostToDevice);
printf(" hipMemcpy DA: %s \n",hipGetErrorString(aN));
bN = hipMemcpy(DB, HB, size, hipMemcpyHostToDevice);
printf(" hipMemcpy DB: %s \n",hipGetErrorString(bN));
// llamamos al kernel (1 bloque de N hilos)
dim3 dg, db; // tuplas de 3 dimensiones para grid y bloques
dg.x = (N + THREADS_N - 1) / THREADS_N; /*determinar bloques de 1 dimension*/
db.x = THREADS_N;
hipLaunchKernelGGL(( VecAdd) , dim3(dg), dim3(db), 0, 0, DA, DB, DC);
e_kernel = hipGetLastError(); //Cojer ultimo error, ya que el kernel no devuelve ningun error_t
printf(" kernel: %s \n",hipGetErrorString(e_kernel)); //Imprimir ultimo error
// copiamos el resultado, que est en la memoria global del device, (DC) al host (a HC)
cN = hipMemcpy(HC, DC, size, hipMemcpyDeviceToHost);
printf(" hipMemcpy HC: %s \n",hipGetErrorString(cN));
// liberamos la memoria reservada en el device
hipFree(DA); hipFree(DB); hipFree(DC);
// una vez que tenemos los resultados en el host, comprobamos que son correctos
// esta comprobacin debe quitarse una vez que el programa es correcto (p. ej., para medir el tiempo de ejecucin)
for (i = 0; i < N; i++){
printf("pos:%d, %d + %d = %d\n",i,HA[i],HB[i],HC[i]);
if (HC[i]!= (HA[i]+HB[i]))
{
printf("error en componente %d\n", i);}
}
return 0;
}
| df1b4784a60d5400ea9a37d4a1def9e8223e455c.cu | #include <stdio.h>
#define N 1000
#define THREADS_N 512
__global__ void VecAdd(int* DA, int* DB, int* DC)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x); // id_hebra + id_bloque x hebras_totales
if(i<=N){
DC[i] = DA[i] + DB[i];
}
}
int main()
{
cudaFree(0);
int HA[N], HB[N], HC[N];
int *DA, *DB, *DC;
int i;
int size = N*sizeof(int);
cudaError_t aM,bM,cM,aN,bN,cN,e_kernel; //Guardar errores
// reservamos espacio en la memoria global del device
aM = cudaMalloc((void**)&DA, size);
printf(" cudaMalloc DA: %s \n",cudaGetErrorString(aM));
bM = cudaMalloc((void**)&DB, size);
printf("cudaMalloc DB: %s \n",cudaGetErrorString(bM));
cM = cudaMalloc((void**)&DC, size);
printf("cudaMalloc DC: %s \n",cudaGetErrorString(cM));
// inicializamos HA y HB
for (i=0; i<N; i++) {HA[i]=-i; HB[i] = 3*i;}
// copiamos HA y HB del host a DA y DB en el device, respectivamente
aN = cudaMemcpy(DA, HA, size, cudaMemcpyHostToDevice);
printf(" cudaMemcpy DA: %s \n",cudaGetErrorString(aN));
bN = cudaMemcpy(DB, HB, size, cudaMemcpyHostToDevice);
printf(" cudaMemcpy DB: %s \n",cudaGetErrorString(bN));
// llamamos al kernel (1 bloque de N hilos)
dim3 dg, db; // tuplas de 3 dimensiones para grid y bloques
dg.x = (N + THREADS_N - 1) / THREADS_N; /*determinar bloques de 1 dimension*/
db.x = THREADS_N;
VecAdd <<<dg, db>>>(DA, DB, DC);
e_kernel = cudaGetLastError(); //Cojer ultimo error, ya que el kernel no devuelve ningun error_t
printf(" kernel: %s \n",cudaGetErrorString(e_kernel)); //Imprimir ultimo error
// copiamos el resultado, que está en la memoria global del device, (DC) al host (a HC)
cN = cudaMemcpy(HC, DC, size, cudaMemcpyDeviceToHost);
printf(" cudaMemcpy HC: %s \n",cudaGetErrorString(cN));
// liberamos la memoria reservada en el device
cudaFree(DA); cudaFree(DB); cudaFree(DC);
// una vez que tenemos los resultados en el host, comprobamos que son correctos
// esta comprobación debe quitarse una vez que el programa es correcto (p. ej., para medir el tiempo de ejecución)
for (i = 0; i < N; i++){
printf("pos:%d, %d + %d = %d\n",i,HA[i],HB[i],HC[i]);
if (HC[i]!= (HA[i]+HB[i]))
{
printf("error en componente %d\n", i);}
}
return 0;
}
|
fe9139edb8a2956d3df8c157928367cc7dd2c480.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define TILE 16
__global__ void transposeNoBankConflicts(float *odata, float *idata,\
int in_pitch, int out_pitch) {
__shared__ float tile[TILE][TILE+1];
int xIndex = blockIdx.x * TILE + threadIdx.x;
int yIndex = blockIdx.y * TILE + threadIdx.y;
int index_in = xIndex + (yIndex)*in_pitch;
// AQUI: Decomentar o bien la parte 1, o bien la parte 2
// PARTE 1
// xIndex = blockIdx.y * TILE + threadIdx.x;
// yIndex = blockIdx.x * TILE + threadIdx.y;
//////////
// PARTE 2
// xIndex = blockIdx.y * TILE + threadIdx.y;
// yIndex = blockIdx.x * TILE + threadIdx.x;
//////////
int index_out = xIndex + (yIndex)*out_pitch;
tile[threadIdx.y][threadIdx.x] = idata[index_in];
__syncthreads();
// AQUI: Decomentar o bien la parte 1, o bien la parte 2
// OJO: la parte que se tiene de decomentar depende de su eleccion mas arriba.
// Cual combinacion es preferible ?
// PARTE 1
// odata[index_out] = tile[threadIdx.x][threadIdx.y];
//////
// PARTE 2
// odata[index_out] = tile[threadIdx.y][threadIdx.x];
//////
}
extern "C" void transpose (float *matrix_in, float *matrix_out, int inp, int outp) {
dim3 grid, threads;
if (inp % TILE != 0) {
fprintf (stderr, "Size problem...\n");
exit (EXIT_FAILURE);
}
if (outp % TILE != 0) {
fprintf (stderr, "Size problem...\n");
exit (EXIT_FAILURE);
}
grid.x = inp/TILE;
grid.y = outp/TILE;
threads.x = TILE;
threads.y = TILE;
hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(grid),dim3(threads), 0, 0, matrix_out,matrix_in,inp,outp);
}
| fe9139edb8a2956d3df8c157928367cc7dd2c480.cu | #include <stdio.h>
#include <stdlib.h>
#define TILE 16
__global__ void transposeNoBankConflicts(float *odata, float *idata,\
int in_pitch, int out_pitch) {
__shared__ float tile[TILE][TILE+1];
int xIndex = blockIdx.x * TILE + threadIdx.x;
int yIndex = blockIdx.y * TILE + threadIdx.y;
int index_in = xIndex + (yIndex)*in_pitch;
// AQUI: Decomentar o bien la parte 1, o bien la parte 2
// PARTE 1
// xIndex = blockIdx.y * TILE + threadIdx.x;
// yIndex = blockIdx.x * TILE + threadIdx.y;
//////////
// PARTE 2
// xIndex = blockIdx.y * TILE + threadIdx.y;
// yIndex = blockIdx.x * TILE + threadIdx.x;
//////////
int index_out = xIndex + (yIndex)*out_pitch;
tile[threadIdx.y][threadIdx.x] = idata[index_in];
__syncthreads();
// AQUI: Decomentar o bien la parte 1, o bien la parte 2
// OJO: la parte que se tiene de decomentar depende de su eleccion mas arriba.
// Cual combinacion es preferible ?
// PARTE 1
// odata[index_out] = tile[threadIdx.x][threadIdx.y];
//////
// PARTE 2
// odata[index_out] = tile[threadIdx.y][threadIdx.x];
//////
}
extern "C" void transpose (float *matrix_in, float *matrix_out, int inp, int outp) {
dim3 grid, threads;
if (inp % TILE != 0) {
fprintf (stderr, "Size problem...\n");
exit (EXIT_FAILURE);
}
if (outp % TILE != 0) {
fprintf (stderr, "Size problem...\n");
exit (EXIT_FAILURE);
}
grid.x = inp/TILE;
grid.y = outp/TILE;
threads.x = TILE;
threads.y = TILE;
transposeNoBankConflicts<<<grid,threads>>>(matrix_out,matrix_in,inp,outp);
}
|
73eb211fcd78b3e1c2f4661c93414cde9e390346.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include "distance.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int32_t *d_mss, *d_mss_offsets, *d_ts, *d_ss, *d_tlen, *d_toffsets, *d_slen, *d_soffsets, *d_params, *d_tmp_windows, *d_tmp_windows_offsets, *d_2d_cost_matrix;
int32_t *h_matching_scores;
int num_templates, num_streams, num_params_sets, h_ts_length, h_ss_length, h_mss_length, len_h_tmp_windows;
__global__ void wlcss_cuda_kernel(int32_t *d_mss, int32_t *d_mss_offsets, int32_t *d_ts, int32_t *d_ss, int32_t *d_tlen, int32_t *d_toffsets, int32_t *d_slen, int32_t *d_soffsets, int32_t *d_params, int32_t *d_tmp_windows, int32_t *d_tmp_windows_offsets, int32_t *d_2d_cost_matrix){
int32_t params_idx = threadIdx.x;
int32_t template_idx = threadIdx.x;
int32_t stream_idx = blockIdx.x;
int32_t t_len = d_tlen[template_idx];
int32_t s_len = d_slen[stream_idx];
int32_t t_offset = d_toffsets[template_idx];
int32_t s_offset = d_soffsets[stream_idx];
int32_t d_mss_offset = d_mss_offsets[stream_idx*blockDim.x+template_idx];
int32_t d_tmp_windows_offset = d_tmp_windows_offsets[stream_idx*blockDim.x+template_idx];
int32_t *tmp_window = &d_tmp_windows[d_tmp_windows_offset];
int32_t *mss = &d_mss[d_mss_offset];
int32_t *t = &d_ts[t_offset];
int32_t *s = &d_ss[s_offset];
int32_t reward = d_params[params_idx*3];
int32_t penalty = d_params[params_idx*3+1];
int32_t accepteddist = d_params[params_idx*3+2];
int32_t tmp = 0;
for(int32_t j=0;j<s_len;j++){
for(int32_t i=0;i<t_len;i++){
int32_t distance = d_2d_cost_matrix[s[j]*8 + t[i]];
if (distance <= accepteddist){
tmp = tmp_window[i]+reward;
} else{
tmp = max(tmp_window[i]-penalty*distance,
max(tmp_window[i+1]-penalty*distance,
tmp_window[t_len+1]-penalty*distance));
}
tmp_window[i] = tmp_window[t_len+1];
tmp_window[t_len+1] = tmp;
}
tmp_window[t_len] = tmp_window[t_len+1];
mss[j] = tmp_window[t_len+1];
tmp_window[t_len+1] = 0;
}
}
extern "C"{
void wlcss_cuda_init(
int32_t *h_ts, int32_t *h_tlen, int32_t *h_toffsets,
int32_t *h_ss, int32_t *h_slen, int32_t *h_soffsets,
int32_t *h_params,
int32_t *h_mss, int32_t *h_mss_offsets,
int32_t *h_tmp_windows, int32_t *h_tmp_windows_offsets,
int32_t num_ts, int32_t num_ss, int32_t num_ps,
int32_t h_ts_len, int32_t h_ss_len, int32_t h_mss_len){
num_templates = num_ts;
num_streams = num_ss;
num_params_sets = num_ps;
h_ts_length = h_ts_len;
h_ss_length = h_ss_len;
h_mss_length = h_mss_len;
h_matching_scores = h_mss;
//Allocate memory for cost matrix
gpuErrchk( hipMalloc((void **) &d_2d_cost_matrix, 64 * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_2d_cost_matrix, h_2d_cost_matrix, 64 * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for templates array
gpuErrchk( hipMalloc((void **) &d_ts, h_ts_length * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_ts, h_ts, h_ts_length * sizeof(int32_t), hipMemcpyHostToDevice) );
//Allocate memory for templates lengths
gpuErrchk( hipMalloc((void **) &d_tlen, num_templates * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_tlen, h_tlen, num_templates * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for templates offsets
gpuErrchk( hipMalloc((void **) &d_toffsets, num_templates * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_toffsets, h_toffsets, num_templates * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for streams array
gpuErrchk( hipMalloc((void **) &d_ss, h_ss_length * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_ss, h_ss, h_ss_length * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for streams lengths
gpuErrchk( hipMalloc((void **) &d_slen, num_streams * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_slen, h_slen, num_streams * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for streams offsets
gpuErrchk( hipMalloc((void **) &d_soffsets, num_streams * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_soffsets, h_soffsets, num_streams * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for matching scores
gpuErrchk( hipMalloc((void **) &d_mss, h_mss_length * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_mss, h_mss, h_mss_length * sizeof(int32_t), hipMemcpyHostToDevice) );
//Allocate memory for matching scores offsets
gpuErrchk( hipMalloc((void **) &d_mss_offsets, num_streams*num_templates*num_params_sets * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_mss_offsets, h_mss_offsets, num_streams*num_templates*num_params_sets * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for d_params
gpuErrchk( hipMalloc((void **) &d_params, num_params_sets * 3 * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_params, h_params, num_params_sets * 3 * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for tmp_windows
len_h_tmp_windows = (h_ts_len + 2 * num_templates) * num_streams;
gpuErrchk( hipMalloc((void **) &d_tmp_windows, len_h_tmp_windows * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_tmp_windows, h_tmp_windows, len_h_tmp_windows * sizeof(int32_t), hipMemcpyHostToDevice) );
int len_h_tmp_windows_offsets = num_templates * num_params_sets * num_streams;
gpuErrchk( hipMalloc((void **) &d_tmp_windows_offsets, len_h_tmp_windows_offsets * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_tmp_windows_offsets, h_tmp_windows_offsets, len_h_tmp_windows_offsets * sizeof(int32_t), hipMemcpyHostToDevice) );
}
void wlcss_cuda(){
hipLaunchKernelGGL(( wlcss_cuda_kernel), dim3(dim3(num_streams)), dim3(num_templates), 0, 0, d_mss, d_mss_offsets, d_ts, d_ss, d_tlen, d_toffsets, d_slen, d_soffsets, d_params, d_tmp_windows, d_tmp_windows_offsets, d_2d_cost_matrix);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk( hipMemcpy(h_matching_scores, d_mss, h_mss_length * sizeof(int32_t), hipMemcpyDeviceToHost) );
}
void wlcss_freemem(){
hipFree(d_ts);
hipFree(d_tlen);
hipFree(d_toffsets);
hipFree(d_ss);
hipFree(d_slen);
hipFree(d_soffsets);
hipFree(d_mss);
hipFree(d_mss_offsets);
hipFree(d_params);
hipFree(d_tmp_windows);
hipFree(d_tmp_windows_offsets);
hipFree(d_2d_cost_matrix);
}
}
| 73eb211fcd78b3e1c2f4661c93414cde9e390346.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include "distance.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int32_t *d_mss, *d_mss_offsets, *d_ts, *d_ss, *d_tlen, *d_toffsets, *d_slen, *d_soffsets, *d_params, *d_tmp_windows, *d_tmp_windows_offsets, *d_2d_cost_matrix;
int32_t *h_matching_scores;
int num_templates, num_streams, num_params_sets, h_ts_length, h_ss_length, h_mss_length, len_h_tmp_windows;
__global__ void wlcss_cuda_kernel(int32_t *d_mss, int32_t *d_mss_offsets, int32_t *d_ts, int32_t *d_ss, int32_t *d_tlen, int32_t *d_toffsets, int32_t *d_slen, int32_t *d_soffsets, int32_t *d_params, int32_t *d_tmp_windows, int32_t *d_tmp_windows_offsets, int32_t *d_2d_cost_matrix){
int32_t params_idx = threadIdx.x;
int32_t template_idx = threadIdx.x;
int32_t stream_idx = blockIdx.x;
int32_t t_len = d_tlen[template_idx];
int32_t s_len = d_slen[stream_idx];
int32_t t_offset = d_toffsets[template_idx];
int32_t s_offset = d_soffsets[stream_idx];
int32_t d_mss_offset = d_mss_offsets[stream_idx*blockDim.x+template_idx];
int32_t d_tmp_windows_offset = d_tmp_windows_offsets[stream_idx*blockDim.x+template_idx];
int32_t *tmp_window = &d_tmp_windows[d_tmp_windows_offset];
int32_t *mss = &d_mss[d_mss_offset];
int32_t *t = &d_ts[t_offset];
int32_t *s = &d_ss[s_offset];
int32_t reward = d_params[params_idx*3];
int32_t penalty = d_params[params_idx*3+1];
int32_t accepteddist = d_params[params_idx*3+2];
int32_t tmp = 0;
for(int32_t j=0;j<s_len;j++){
for(int32_t i=0;i<t_len;i++){
int32_t distance = d_2d_cost_matrix[s[j]*8 + t[i]];
if (distance <= accepteddist){
tmp = tmp_window[i]+reward;
} else{
tmp = max(tmp_window[i]-penalty*distance,
max(tmp_window[i+1]-penalty*distance,
tmp_window[t_len+1]-penalty*distance));
}
tmp_window[i] = tmp_window[t_len+1];
tmp_window[t_len+1] = tmp;
}
tmp_window[t_len] = tmp_window[t_len+1];
mss[j] = tmp_window[t_len+1];
tmp_window[t_len+1] = 0;
}
}
extern "C"{
void wlcss_cuda_init(
int32_t *h_ts, int32_t *h_tlen, int32_t *h_toffsets,
int32_t *h_ss, int32_t *h_slen, int32_t *h_soffsets,
int32_t *h_params,
int32_t *h_mss, int32_t *h_mss_offsets,
int32_t *h_tmp_windows, int32_t *h_tmp_windows_offsets,
int32_t num_ts, int32_t num_ss, int32_t num_ps,
int32_t h_ts_len, int32_t h_ss_len, int32_t h_mss_len){
num_templates = num_ts;
num_streams = num_ss;
num_params_sets = num_ps;
h_ts_length = h_ts_len;
h_ss_length = h_ss_len;
h_mss_length = h_mss_len;
h_matching_scores = h_mss;
//Allocate memory for cost matrix
gpuErrchk( cudaMalloc((void **) &d_2d_cost_matrix, 64 * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_2d_cost_matrix, h_2d_cost_matrix, 64 * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for templates array
gpuErrchk( cudaMalloc((void **) &d_ts, h_ts_length * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_ts, h_ts, h_ts_length * sizeof(int32_t), cudaMemcpyHostToDevice) );
//Allocate memory for templates lengths
gpuErrchk( cudaMalloc((void **) &d_tlen, num_templates * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_tlen, h_tlen, num_templates * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for templates offsets
gpuErrchk( cudaMalloc((void **) &d_toffsets, num_templates * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_toffsets, h_toffsets, num_templates * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for streams array
gpuErrchk( cudaMalloc((void **) &d_ss, h_ss_length * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_ss, h_ss, h_ss_length * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for streams lengths
gpuErrchk( cudaMalloc((void **) &d_slen, num_streams * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_slen, h_slen, num_streams * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for streams offsets
gpuErrchk( cudaMalloc((void **) &d_soffsets, num_streams * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_soffsets, h_soffsets, num_streams * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for matching scores
gpuErrchk( cudaMalloc((void **) &d_mss, h_mss_length * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_mss, h_mss, h_mss_length * sizeof(int32_t), cudaMemcpyHostToDevice) );
//Allocate memory for matching scores offsets
gpuErrchk( cudaMalloc((void **) &d_mss_offsets, num_streams*num_templates*num_params_sets * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_mss_offsets, h_mss_offsets, num_streams*num_templates*num_params_sets * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for d_params
gpuErrchk( cudaMalloc((void **) &d_params, num_params_sets * 3 * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_params, h_params, num_params_sets * 3 * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for tmp_windows
len_h_tmp_windows = (h_ts_len + 2 * num_templates) * num_streams;
gpuErrchk( cudaMalloc((void **) &d_tmp_windows, len_h_tmp_windows * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_tmp_windows, h_tmp_windows, len_h_tmp_windows * sizeof(int32_t), cudaMemcpyHostToDevice) );
int len_h_tmp_windows_offsets = num_templates * num_params_sets * num_streams;
gpuErrchk( cudaMalloc((void **) &d_tmp_windows_offsets, len_h_tmp_windows_offsets * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_tmp_windows_offsets, h_tmp_windows_offsets, len_h_tmp_windows_offsets * sizeof(int32_t), cudaMemcpyHostToDevice) );
}
void wlcss_cuda(){
wlcss_cuda_kernel<<<dim3(num_streams), num_templates>>>(d_mss, d_mss_offsets, d_ts, d_ss, d_tlen, d_toffsets, d_slen, d_soffsets, d_params, d_tmp_windows, d_tmp_windows_offsets, d_2d_cost_matrix);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk( cudaMemcpy(h_matching_scores, d_mss, h_mss_length * sizeof(int32_t), cudaMemcpyDeviceToHost) );
}
void wlcss_freemem(){
cudaFree(d_ts);
cudaFree(d_tlen);
cudaFree(d_toffsets);
cudaFree(d_ss);
cudaFree(d_slen);
cudaFree(d_soffsets);
cudaFree(d_mss);
cudaFree(d_mss_offsets);
cudaFree(d_params);
cudaFree(d_tmp_windows);
cudaFree(d_tmp_windows_offsets);
cudaFree(d_2d_cost_matrix);
}
}
|
7128af96cf65b201c6dcf8a1a75e5e105dad2b5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/hashcode.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static SD_KERNEL void splitBufferToChuncks(T* buffer, sd::LongType* tempBuffer, sd::LongType numBlocks,
sd::LongType blockSize, sd::LongType length) {
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < numBlocks; b += gridDim.x * blockDim.x) {
auto blockBuffer = buffer + b * numBlocks;
sd::LongType r = 1LL;
for (int e = 0; e < blockSize && e + (b * numBlocks) < length; e++) {
auto v = longBytes<T>(blockBuffer[e]);
r = 31LL * r + v;
}
tempBuffer[b] = r;
}
}
template <typename T>
static SD_KERNEL void internalHash(sd::LongType* tempBuffer, sd::LongType* tempResult, sd::LongType numBlocks,
sd::LongType blockSize, sd::LongType lastLength) {
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < numBlocks; b += gridDim.x * blockDim.x) {
auto blockBuffer = tempBuffer + b * numBlocks;
sd::LongType r = 1LL;
for (sd::LongType e = 0; e < blockSize && e + (b * numBlocks) < lastLength; e++) {
auto v = longBytes<T>(blockBuffer[e]);
r = 31LL * r + v;
}
tempResult[b] = r;
}
}
static SD_KERNEL void lastStep(sd::LongType* resultBuf, sd::LongType* tempBufferA, sd::LongType* tempResult,
sd::LongType length, sd::LongType blockSize) {
if (threadIdx.x == 0) {
if (length <= blockSize)
*resultBuf = *tempBufferA;
else
*resultBuf = *tempResult;
}
}
template <typename T>
void hashCode_(LaunchContext* context, NDArray& array, NDArray& result) {
auto blockSize = 32;
auto stream = context->getCudaStream();
array.syncToDevice();
NDArray::prepareSpecialUse({&result}, {&array});
auto length = array.lengthOf();
int numBlocks = length / blockSize + ((length % blockSize == 0) ? 0 : 1);
auto tempA = NDArrayFactory::create<sd::LongType>('c', {numBlocks}, context);
auto tempB = NDArrayFactory::create<sd::LongType>('c', {numBlocks / blockSize + 1}, context);
auto buffer = reinterpret_cast<T*>(array.specialBuffer()); // bufferAsT<T>();
auto tempBufferA = reinterpret_cast<sd::LongType*>(tempA.specialBuffer()); // bufferAsT<sd::LongType>();
auto tempBufferB = reinterpret_cast<sd::LongType*>(tempB.specialBuffer()); // bufferAsT<sd::LongType>();
// default buffer is the first one, because it might be the last one in case of small arrays (< blockSize)
auto tempBuffer = tempBufferA;
auto tempResult = tempBufferB;
// we divide array into 32 element chunks, and store intermediate results once
hipLaunchKernelGGL(( splitBufferToChuncks<T>), dim3(numBlocks), dim3(1), 1024, *stream, buffer, tempBuffer, numBlocks, blockSize, length);
// we replace pointer with intermediate one, and repeat only one chunk left
int iterationCount = 0;
while (numBlocks > 1) {
int lastLength = numBlocks;
numBlocks = lastLength / blockSize + ((lastLength % blockSize == 0) ? 0 : 1);
hipLaunchKernelGGL(( internalHash<sd::LongType>)
, dim3(numBlocks), dim3(1), 1024, *stream, tempBuffer, tempResult, numBlocks, blockSize, lastLength);
iterationCount++;
// swapping buffers
if (iterationCount % 2 == 0) {
tempBuffer = tempBufferA;
tempResult = tempBufferB;
} else {
tempBuffer = tempBufferB;
tempResult = tempBufferA;
}
}
hipLaunchKernelGGL(( lastStep), dim3(1), dim3(1), 128, *stream, reinterpret_cast<sd::LongType*>(result.specialBuffer()), tempBufferA, tempResult,
length, blockSize);
// tempA.syncToHost();
// tempB.syncToHost();
// result.assign((length <= blockSize?tempA.e(0) : tempB.e(0)));
NDArray::registerSpecialUse({&result}, {&array});
}
void hashCode(LaunchContext* context, NDArray& array, NDArray& result) {
BUILD_SINGLE_SELECTOR(array.dataType(), hashCode_, (context, array, result), SD_COMMON_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void hashCode_, (LaunchContext * context, NDArray& array, NDArray& result),
SD_COMMON_TYPES);
} // namespace helpers
} // namespace ops
} // namespace sd
| 7128af96cf65b201c6dcf8a1a75e5e105dad2b5d.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/hashcode.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static SD_KERNEL void splitBufferToChuncks(T* buffer, sd::LongType* tempBuffer, sd::LongType numBlocks,
sd::LongType blockSize, sd::LongType length) {
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < numBlocks; b += gridDim.x * blockDim.x) {
auto blockBuffer = buffer + b * numBlocks;
sd::LongType r = 1LL;
for (int e = 0; e < blockSize && e + (b * numBlocks) < length; e++) {
auto v = longBytes<T>(blockBuffer[e]);
r = 31LL * r + v;
}
tempBuffer[b] = r;
}
}
template <typename T>
static SD_KERNEL void internalHash(sd::LongType* tempBuffer, sd::LongType* tempResult, sd::LongType numBlocks,
sd::LongType blockSize, sd::LongType lastLength) {
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < numBlocks; b += gridDim.x * blockDim.x) {
auto blockBuffer = tempBuffer + b * numBlocks;
sd::LongType r = 1LL;
for (sd::LongType e = 0; e < blockSize && e + (b * numBlocks) < lastLength; e++) {
auto v = longBytes<T>(blockBuffer[e]);
r = 31LL * r + v;
}
tempResult[b] = r;
}
}
static SD_KERNEL void lastStep(sd::LongType* resultBuf, sd::LongType* tempBufferA, sd::LongType* tempResult,
sd::LongType length, sd::LongType blockSize) {
if (threadIdx.x == 0) {
if (length <= blockSize)
*resultBuf = *tempBufferA;
else
*resultBuf = *tempResult;
}
}
template <typename T>
void hashCode_(LaunchContext* context, NDArray& array, NDArray& result) {
auto blockSize = 32;
auto stream = context->getCudaStream();
array.syncToDevice();
NDArray::prepareSpecialUse({&result}, {&array});
auto length = array.lengthOf();
int numBlocks = length / blockSize + ((length % blockSize == 0) ? 0 : 1);
auto tempA = NDArrayFactory::create<sd::LongType>('c', {numBlocks}, context);
auto tempB = NDArrayFactory::create<sd::LongType>('c', {numBlocks / blockSize + 1}, context);
auto buffer = reinterpret_cast<T*>(array.specialBuffer()); // bufferAsT<T>();
auto tempBufferA = reinterpret_cast<sd::LongType*>(tempA.specialBuffer()); // bufferAsT<sd::LongType>();
auto tempBufferB = reinterpret_cast<sd::LongType*>(tempB.specialBuffer()); // bufferAsT<sd::LongType>();
// default buffer is the first one, because it might be the last one in case of small arrays (< blockSize)
auto tempBuffer = tempBufferA;
auto tempResult = tempBufferB;
// we divide array into 32 element chunks, and store intermediate results once
splitBufferToChuncks<T><<<numBlocks, 1, 1024, *stream>>>(buffer, tempBuffer, numBlocks, blockSize, length);
// we replace pointer with intermediate one, and repeat only one chunk left
int iterationCount = 0;
while (numBlocks > 1) {
int lastLength = numBlocks;
numBlocks = lastLength / blockSize + ((lastLength % blockSize == 0) ? 0 : 1);
internalHash<sd::LongType>
<<<numBlocks, 1, 1024, *stream>>>(tempBuffer, tempResult, numBlocks, blockSize, lastLength);
iterationCount++;
// swapping buffers
if (iterationCount % 2 == 0) {
tempBuffer = tempBufferA;
tempResult = tempBufferB;
} else {
tempBuffer = tempBufferB;
tempResult = tempBufferA;
}
}
lastStep<<<1, 1, 128, *stream>>>(reinterpret_cast<sd::LongType*>(result.specialBuffer()), tempBufferA, tempResult,
length, blockSize);
// tempA.syncToHost();
// tempB.syncToHost();
// result.assign((length <= blockSize?tempA.e(0) : tempB.e(0)));
NDArray::registerSpecialUse({&result}, {&array});
}
void hashCode(LaunchContext* context, NDArray& array, NDArray& result) {
BUILD_SINGLE_SELECTOR(array.dataType(), hashCode_, (context, array, result), SD_COMMON_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void hashCode_, (LaunchContext * context, NDArray& array, NDArray& result),
SD_COMMON_TYPES);
} // namespace helpers
} // namespace ops
} // namespace sd
|
e06b80c68149a18c960cdfe41f6d4ee82510cf4a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate graident solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
/* Using updated (v2) interfaces to cublas and cusparse */
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include <rocblas.h>
// Utilities and system includes
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
const char *sSDKname = "conjugateGradient";
double mclock(){
struct timeval tp;
double sec,usec;
gettimeofday( &tp, NULL );
sec = double( tp.tv_sec );
usec = double( tp.tv_usec )/1E6;
return sec + usec;
}
#define dot_BS 32
#define kernel_BS 32
__global__
void saxpy(int n, float alpha, float *x, float *y) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < n)
y[i] = alpha * x[i] + y[i];
}
__global__
void scal(int n, float alpha, float *y) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < n)
y[i] = alpha * y[i];
}
__global__
void cpy(int n, float *src, float *dst) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < n)
dst[i] = src[i];
}
__global__
void dot(int n, float *src, float *dst) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
int tid = threadIdx.x;
extern __shared__ float c_shared[];
if (i < n) {
c_shared[i] = src[i] * dst[i];
__syncthreads();
for (unsigned int s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0)
c_shared[tid] += c_shared[tid + s];
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = c_shared[0];
}
}
__global__
void csrmv(int m, int n, int nnz, float alpha, float *csrValA, int *csrRowPtrA,
int *csrColIdA, float *x, float beta, float *y)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j;
float sub = 0;
if (i < n) {
for (j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++)
sub += csrValA[j] * x[csrColIdA[j]];
y[i] = sub;
}
}
__global__
void dot(int n, float *x, float *y, float *result)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < n) {
y[i] = x[i] * y[i];
}
}
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz)
{
double RAND_MAXi = 1e6;
double val_r = 12.345 * 1e5;
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)val_r/RAND_MAXi + 10.0f;
val[1] = (float)val_r/RAND_MAXi;
int start;
for (int i = 1; i < N; i++)
{
if (i > 1)
{
I[i] = I[i-1]+3;
}
else
{
I[1] = 2;
}
start = (i-1)*3 + 2;
J[start] = i - 1;
J[start+1] = i;
if (i < N-1)
{
J[start+2] = i + 1;
}
val[start] = val[start-1];
val[start+1] = (float)val_r/RAND_MAXi + 10.0f;
if (i < N-1)
{
val[start+2] = (float)val_r/RAND_MAXi;
}
}
I[N] = nz;
}
void cgs_basic(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
hipDeviceProp_t deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
hipDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
hipblasHandle_t cublasHandle = 0;
hipblasStatus_t cublasStatus;
cublasStatus = hipblasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
hipsparseMatDescr_t descr = 0;
cusparseStatus = hipsparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Ax, N*sizeof(float)));
hipMemcpy(d_col, J, nz*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_row, I, (N+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_val, val, nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_r, rhs, N*sizeof(float), hipMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
double t_start = mclock();
//hipsparseScsrmv(cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax);
hipLaunchKernelGGL(( csrmv), dim3((N+255)/256), dim3(256), 0, 0, N, N, nz, alpha, d_val, d_row, d_col, d_x, beta, d_Ax);
//hipblasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, alpham1, d_Ax, d_r);
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//cublasStatus = hipblasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (I)
hipLaunchKernelGGL(( scal), dim3((N+255)/256), dim3(256), 0, 0, N, b, d_p);
//cublasStatus = hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, alpha, d_r, d_p);
}
else
{
cublasStatus = hipblasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
//hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (III)
hipLaunchKernelGGL(( csrmv), dim3((N+255)/256), dim3(256), 0, 0, N, N, nz, alpha, d_val, d_row, d_col, d_p, beta, d_Ax);
cublasStatus = hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (II)
a = r1 / dot;
//cublasStatus = hipblasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (I)
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, a, d_p, d_x);
na = -a;
//cublasStatus = hipblasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, na, d_Ax, d_r);
r0 = r1;
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
hipDeviceSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
printf("TIME OF CGS_BASIC = %f\n", mclock() - t_start);
hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
hipDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
void cgs_TODO(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
hipDeviceProp_t deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
hipDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
hipblasHandle_t cublasHandle = 0;
hipblasStatus_t cublasStatus;
cublasStatus = hipblasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
hipsparseMatDescr_t descr = 0;
cusparseStatus = hipsparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Ax, N*sizeof(float)));
hipMemcpy(d_col, J, nz*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_row, I, (N+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_val, val, nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_r, rhs, N*sizeof(float), hipMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
// sparse matrix vector product: d_Ax = A * d_x
//hipsparseScsrmv(cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( csrmv), dim3((N+255)/256), dim3(256), 0, 0, N, N, nz, alpha, d_val, d_row, d_col, d_x, beta, d_Ax);
//azpy: d_r = d_r + alpham1 * d_Ax
//hipblasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, alpham1, d_Ax, d_r);
//dot: r1 = d_r * d_r
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//scal: d_p = b * d_p
//cublasStatus = hipblasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( scal), dim3((N+255)/256), dim3(256), 0, 0, N, b, d_p);
//axpy: d_p = d_p + alpha * d_r
//cublasStatus = hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, alpha, d_r, d_p);
}
else
{
//cpy: d_p = d_r
//cublasStatus = hipblasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( cpy), dim3((N+255)/256), dim3(256), 0, 0, N, d_r, d_p);
}
//sparse matrix-vector product: d_Ax = A * d_p
//hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
hipLaunchKernelGGL(( csrmv), dim3((N+255)/256), dim3(256), 0, 0, N, N, nz, alpha, d_val, d_row, d_col, d_p, beta, d_Ax);
cublasStatus = hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (ZADANIE-III)
a = r1 / dot;
//axpy: d_x = d_x + a*d_p
//cublasStatus = hipblasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, a, d_p, d_x);
na = -a;
//axpy: d_r = d_r + na * d_Ax
//cublasStatus = hipblasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, na, d_Ax, d_r);
r0 = r1;
//dot: r1 = d_r * d_r
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
hipDeviceSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
hipDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
int main(int argc, char **argv)
{
//int N = 1e6;//1 << 20;
//int N = 256 * (1<<10) -10 ; //1e6;//1 << 20;
int N = 1e5;
int M = N;
cgs_basic(argc, argv, N, M);
cgs_TODO(argc, argv, N, M);
}
| e06b80c68149a18c960cdfe41f6d4ee82510cf4a.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate graident solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
/* Using updated (v2) interfaces to cublas and cusparse */
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include <cublas_v2.h>
// Utilities and system includes
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
const char *sSDKname = "conjugateGradient";
double mclock(){
struct timeval tp;
double sec,usec;
gettimeofday( &tp, NULL );
sec = double( tp.tv_sec );
usec = double( tp.tv_usec )/1E6;
return sec + usec;
}
#define dot_BS 32
#define kernel_BS 32
__global__
void saxpy(int n, float alpha, float *x, float *y) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < n)
y[i] = alpha * x[i] + y[i];
}
__global__
void scal(int n, float alpha, float *y) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < n)
y[i] = alpha * y[i];
}
__global__
void cpy(int n, float *src, float *dst) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < n)
dst[i] = src[i];
}
__global__
void dot(int n, float *src, float *dst) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
int tid = threadIdx.x;
extern __shared__ float c_shared[];
if (i < n) {
c_shared[i] = src[i] * dst[i];
__syncthreads();
for (unsigned int s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0)
c_shared[tid] += c_shared[tid + s];
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = c_shared[0];
}
}
__global__
void csrmv(int m, int n, int nnz, float alpha, float *csrValA, int *csrRowPtrA,
int *csrColIdA, float *x, float beta, float *y)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j;
float sub = 0;
if (i < n) {
for (j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++)
sub += csrValA[j] * x[csrColIdA[j]];
y[i] = sub;
}
}
__global__
void dot(int n, float *x, float *y, float *result)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < n) {
y[i] = x[i] * y[i];
}
}
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz)
{
double RAND_MAXi = 1e6;
double val_r = 12.345 * 1e5;
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)val_r/RAND_MAXi + 10.0f;
val[1] = (float)val_r/RAND_MAXi;
int start;
for (int i = 1; i < N; i++)
{
if (i > 1)
{
I[i] = I[i-1]+3;
}
else
{
I[1] = 2;
}
start = (i-1)*3 + 2;
J[start] = i - 1;
J[start+1] = i;
if (i < N-1)
{
J[start+2] = i + 1;
}
val[start] = val[start-1];
val[start+1] = (float)val_r/RAND_MAXi + 10.0f;
if (i < N-1)
{
val[start+2] = (float)val_r/RAND_MAXi;
}
}
I[N] = nz;
}
void cgs_basic(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Ax, N*sizeof(float)));
cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, rhs, N*sizeof(float), cudaMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
double t_start = mclock();
//cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax);
csrmv<<<(N+255)/256, 256>>>(N, N, nz, alpha, d_val, d_row, d_col, d_x, beta, d_Ax);
//cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
saxpy<<<(N+255)/256, 256>>>(N, alpham1, d_Ax, d_r);
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (I)
scal<<<(N+255)/256, 256>>>(N, b, d_p);
//cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
saxpy<<<(N+255)/256, 256>>>(N, alpha, d_r, d_p);
}
else
{
cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
//cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (III)
csrmv<<<(N+255)/256, 256>>>(N, N, nz, alpha, d_val, d_row, d_col, d_p, beta, d_Ax);
cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (II)
a = r1 / dot;
//cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (I)
saxpy<<<(N+255)/256, 256>>>(N, a, d_p, d_x);
na = -a;
//cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
saxpy<<<(N+255)/256, 256>>>(N, na, d_Ax, d_r);
r0 = r1;
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
cudaThreadSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
printf("TIME OF CGS_BASIC = %f\n", mclock() - t_start);
cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
cudaDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
void cgs_TODO(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Ax, N*sizeof(float)));
cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, rhs, N*sizeof(float), cudaMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
// sparse matrix vector product: d_Ax = A * d_x
//cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-I)
csrmv<<<(N+255)/256, 256>>>(N, N, nz, alpha, d_val, d_row, d_col, d_x, beta, d_Ax);
//azpy: d_r = d_r + alpham1 * d_Ax
//cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
saxpy<<<(N+255)/256, 256>>>(N, alpham1, d_Ax, d_r);
//dot: r1 = d_r * d_r
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//scal: d_p = b * d_p
//cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
scal<<<(N+255)/256, 256>>>(N, b, d_p);
//axpy: d_p = d_p + alpha * d_r
//cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
saxpy<<<(N+255)/256, 256>>>(N, alpha, d_r, d_p);
}
else
{
//cpy: d_p = d_r
//cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
cpy<<<(N+255)/256, 256>>>(N, d_r, d_p);
}
//sparse matrix-vector product: d_Ax = A * d_p
//cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
csrmv<<<(N+255)/256, 256>>>(N, N, nz, alpha, d_val, d_row, d_col, d_p, beta, d_Ax);
cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (ZADANIE-III)
a = r1 / dot;
//axpy: d_x = d_x + a*d_p
//cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (ZADANIE-I)
saxpy<<<(N+255)/256, 256>>>(N, a, d_p, d_x);
na = -a;
//axpy: d_r = d_r + na * d_Ax
//cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
saxpy<<<(N+255)/256, 256>>>(N, na, d_Ax, d_r);
r0 = r1;
//dot: r1 = d_r * d_r
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
cudaThreadSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
cudaDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
int main(int argc, char **argv)
{
//int N = 1e6;//1 << 20;
//int N = 256 * (1<<10) -10 ; //1e6;//1 << 20;
int N = 1e5;
int M = N;
cgs_basic(argc, argv, N, M);
cgs_TODO(argc, argv, N, M);
}
|
9277a18ff2bdd47978c1cdea8e4341606153ac21.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include <hip/hip_runtime.h>
#include <cutil_inline.h>
//*****************************************************************************
//init matrix non-boundarys
__global__ void kernelInitMatrixInner(const int n, const float value,
float *m, float *w)
{
int id = blockIdx.x * blockDim.x +threadIdx.x;
if (id < n * n)
{
m[id] = value;
w[id] = value;
}
}
//*****************************************************************************
//init matrix boundarys
__global__ void kernelInitMatrixBoundary(const int n, const boundary b,
float *m, float *w)
{
int id = blockIdx.x * blockDim.x +threadIdx.x;
if (id < n - 1)
{
int leftId = id * n + n;
m[leftId] = b.left; w[leftId] = b.left;
int upId = id;
m[upId] = b.up; w[upId] = b.up;
int rightId = leftId - 1;
m[rightId] = b.right; w[rightId] = b.right;
int downId = n * n - n + 1 + upId;
m[downId] = b.down; w[downId] = b.down;
}
}
//*****************************************************************************
//init matrix
void initMatrix(const int n, const struct boundary b, float *d_m, float *d_w)
{
int blockSize = 256;
dim3 block(blockSize);
dim3 matrixBoundaryGrid(getQuotient(n - 1, blockSize));
dim3 matrixInnerGrid(getQuotient(n * n, blockSize));
hipLaunchKernelGGL(( kernelInitMatrixInner)
, dim3(matrixInnerGrid), dim3(block), 0, 0, n, b.averageValue, d_m, d_w);
hipLaunchKernelGGL(( kernelInitMatrixBoundary)
, dim3(matrixBoundaryGrid), dim3(block), 0, 0, n, b, d_m, d_w);
}
//*****************************************************************************
//kernel of jacobi iteration process with float in iteration mode
__global__ void kernelJacobiIteration(const int n, float *m, float *w)
{
int id = blockIdx.x * blockDim.x +threadIdx.x;
if (id < (n - 2) * (n - 2))
{
int row = id / (n - 2);
int column = id - row * (n - 2);
int location = (row + 1) * n + (column + 1);
w[location] = (m[location - 1] + m[location - n] + m[location + 1]
+ m[location + n]) / 4.0;
}
}
//*****************************************************************************
//kernel of getting epsilon between d_m and d_w
__global__ void kernelGetEpsilon(const int n,
float *m, float *w,
float *ep)
{
int id = blockIdx.x * blockDim.x +threadIdx.x;
if (id < (n - 2) * (n - 2))
{
int row = id / (n - 2);
int column = id - row * (n - 2);
int location = (row + 1) * n + (column + 1);
ep[id] = fabs(m[location] - w[location]);
}
}
//kernel of max of a num group
__global__ void kernelGetMax(const int count, float *ep)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < count)
if (ep[id] < ep[id + count]) ep[id] = ep[id + count];
}
//*****************************************************************************
//get epsilon
float getEpsilon(const int n, float *d_m, float *d_w)
{
float epsilon;
float *d_ep;
/*cutilSafeCall*/
(hipMalloc((void**) &d_ep, (n - 2) * (n - 2) * sizeof(float)));
// setup execution parameters
int blockSize = 256;
dim3 block(blockSize);
dim3 matrixInnerGrid(getQuotient((n - 2) * (n - 2), blockSize));
dim3 getMaxGrid(getQuotient((n - 2) * (n - 2) / 2, blockSize));
hipLaunchKernelGGL(( kernelGetEpsilon), dim3(matrixInnerGrid), dim3(block), 0, 0, n, d_m, d_w, d_ep);
//check if kernel execution generated and error
//cutilCheckMsg("Kernel execution failed");
//get max of d_ep
int count = (int)((n - 2) * (n - 2) / 2);
while(count > 1)
{
hipLaunchKernelGGL(( kernelGetMax), dim3(getMaxGrid), dim3(block), 0, 0, count, d_ep);
//check if kernel execution generated and error
//cutilCheckMsg("Kernel execution failed");
count = (count + 1) / 2;
}
float lastD_ep;
/*cutilSafeCall*/(hipMemcpy(&lastD_ep, d_ep + (n - 2) * (n - 2) - 1,
sizeof(float), hipMemcpyDeviceToHost));
/*cutilSafeCall*/
(hipMemcpy(&epsilon, d_ep, sizeof(float), hipMemcpyDeviceToHost));
/*cutilSafeCall*/(hipFree(d_ep));
if (epsilon < lastD_ep) epsilon = lastD_ep;
return epsilon;
}
//*****************************************************************************
void jacobiCUDAIterationEpsilon_1D_F(const int n, const float epsilon,
long *step, const struct boundary b,
float *d_m, float *d_w,
double *initTime, double *iterTime)
{
float *temp;
//timer
LARGE_INTEGER nStartCounter, nStopCounter;
//init data
printf("--Data initing(n=%d, epsilon=%lf).....", n, epsilon);
//timer starts
QueryPerformanceCounter(&nStartCounter);
//init
initMatrix(n, b, d_m, d_w);
//timer ends
QueryPerformanceCounter(&nStopCounter);
*initTime = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
//iteration
printf("--Computing(n=%d, epsilon=%lf).....", n, epsilon);
//timer starts
QueryPerformanceCounter(&nStartCounter);
*step = 0;
float epsilonTemp = epsilon + 1;
// setup execution parameters
int blockSize = 256;
dim3 block(blockSize);
dim3 matrixInnerGrid(getQuotient((n - 2) * (n - 2), blockSize));
dim3 block2(blockSize);
dim3 matrixInnerGrid2(getQuotient((n - 2) * (n - 2), blockSize));
//iteration
while (epsilonTemp > epsilon)
{
(*step)++;
//execute the kernel
hipLaunchKernelGGL(( kernelJacobiIteration), dim3(matrixInnerGrid), dim3(block), 0, 0, n, d_m, d_w);
//check if kernel execution generated and error
//cutilCheckMsg("Kernel execution failed");
//epsilon
if (*step % JUMP == 0)
epsilonTemp = getEpsilon(n, d_m, d_w);
temp = d_m; d_m = d_w; d_w = temp;
}
//timer ends
QueryPerformanceCounter(&nStopCounter);
*iterTime = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
return;
}
//********************************************************************************
void jacobiCUDAIterationStep_1D_F(const int n, float *epsilon,
const long step, const struct boundary b,
float *d_m, float *d_w,
double *initTime, double *iterTime)
{
////*****************************************************************************
// unsigned int matrixSize = n * n;
// unsigned int matrixMemSize = sizeof(float) * matrixSize;
// float *m = (float *)malloc(sizeof(float) * n * n);
////*****************************************************************************
float *temp;
//timer
LARGE_INTEGER nStartCounter, nStopCounter;
//init data
printf("--Data initing(n=%d, step=%d).....", n, step);
//timer starts
QueryPerformanceCounter(&nStartCounter);
//init
initMatrix(n, b, d_m, d_w);
////*****************************************************************************
// cutilSafeCall(hipMemcpy(m, d_m, matrixMemSize, hipMemcpyDeviceToHost));
// printf("\nm\n");
// for(int i = 0; i < 10; i++)
// {
// for(int j = 0; j < 6; j++)
// printf("%10.2lf ", m[i * n + j]);
// printf("\n");
// }
// cutilSafeCall(hipMemcpy(m, d_w, matrixMemSize, hipMemcpyDeviceToHost));
// printf("\nw\n");
// for(int i = 0; i < 10; i++)
// {
// for(int j = 0; j < 6; j++)
// printf("%10.2lf ", m[i * n + j]);
// printf("\n");
// }
// getchar();
////*****************************************************************************
//timer ends
QueryPerformanceCounter(&nStopCounter);
*initTime = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
//iteration
printf("--Computing(n=%d, step=%d.....", n, epsilon);
//timer starts
QueryPerformanceCounter(&nStartCounter);
// setup execution parameters
int blockSize = 256;
dim3 block(blockSize);
dim3 matrixInnerGrid(getQuotient((n - 2) * (n - 2), blockSize));
for(int i = 0; i < step; i++)
{
//execute the kernel
hipLaunchKernelGGL(( kernelJacobiIteration), dim3(matrixInnerGrid), dim3(block), 0, 0, n, d_m, d_w);
////*****************************************************************************
// cutilSafeCall(hipMemcpy(m, d_m, matrixMemSize, hipMemcpyDeviceToHost));
// printf("\nm\n");
// for(int i = 0; i < 10; i++)
// {
// for(int j = 0; j < 6; j++)
// printf("%10.2lf ", m[i * n + j]);
// printf("\n");
// }
// cutilSafeCall(hipMemcpy(m, d_w, matrixMemSize, hipMemcpyDeviceToHost));
// printf("\nw\n");
// for(int i = 0; i < 10; i++)
// {
// for(int j = 0; j < 6; j++)
// printf("%10.2lf ", m[i * n + j]);
// printf("\n");
// }
// getchar();
////*****************************************************************************
temp = d_m; d_m = d_w; d_w = temp;
}
*epsilon = getEpsilon(n, d_m, d_w);
//timer ends
QueryPerformanceCounter(&nStopCounter);
*iterTime = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
return;
}
//********************************************************************************
void jacobiCUDA_1D_F(int argc, char** argv,
int n, float epsilon,
long step, struct boundary b, char *outFile)
{
printf("Jacobi CUDA 1D -\n");
printf("--n=%d, e=%lf, step=%ld\n--LURD: %lf, %lf, %lf, %lf\n",
n, epsilon, step, b.left, b.up, b.right, b.down);
//init cuda device
if (cutCheckCmdLineFlag(argc, (const char**)argv, "device"))
cutilDeviceInit(argc, argv);
else
hipSetDevice(cutGetMaxGflopsDeviceId());
//allocate devce memory for matrices u
unsigned int matrixSize = n * n;
unsigned int matrixMemSize = sizeof(float) * matrixSize;
float *d_m;
hipMalloc((void**) &d_m, matrixMemSize);
/*cutilSafeCall*/(hipMalloc((void**) &d_m, matrixMemSize));
float *d_w;
/*cutilSafeCall*/(hipMalloc((void**) &d_w, matrixMemSize));
//timer
LARGE_INTEGER nStartCounter, nStopCounter;
double nTime1, nTime2, nTime3;
//jacobi serial 1D solution
if (epsilon != 0)
{
printf("--Epsilon mode\n");
jacobiCUDAIterationEpsilon_1D_F(n, epsilon, &step,
b, d_m, d_w, &nTime1, &nTime2);
printf("--Step = %ld\n", step);
}
else
{
printf("--Step mode\n");
jacobiCUDAIterationStep_1D_F(n, &epsilon, step,
b, d_m, d_w, &nTime1, &nTime2);
printf("--Epsilon = %lf\n", epsilon);
}
printf("--Result outputing...");
char *outDir = getOutDir(n, epsilon, b, step, outFile);
//timer starts
QueryPerformanceCounter(&nStartCounter);
float *m = (float *)malloc(sizeof(float) * n * n);
/*cutilSafeCall*/(hipMemcpy(m, d_m, matrixMemSize, hipMemcpyDeviceToHost));
//output result
outMatrix1DtoF_F(m, n, outDir);
//timer2 ends
QueryPerformanceCounter(&nStopCounter);
//get time
nTime3 = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
printf("--(Time/s)Init=%lf, Computing=%lf, Data-saving=%lf, Total=%lf\n",
nTime1, nTime2, nTime3, nTime1 + nTime2 + nTime3);
outLog(n, epsilon, step, b, nTime1, nTime2, nTime3, outFile, outDir);
return;
} | 9277a18ff2bdd47978c1cdea8e4341606153ac21.cu | #include "stdafx.h"
#include <cuda_runtime.h>
#include <cutil_inline.h>
//*****************************************************************************
//init matrix non-boundarys
__global__ void kernelInitMatrixInner(const int n, const float value,
float *m, float *w)
{
int id = blockIdx.x * blockDim.x +threadIdx.x;
if (id < n * n)
{
m[id] = value;
w[id] = value;
}
}
//*****************************************************************************
//init matrix boundarys
__global__ void kernelInitMatrixBoundary(const int n, const boundary b,
float *m, float *w)
{
int id = blockIdx.x * blockDim.x +threadIdx.x;
if (id < n - 1)
{
int leftId = id * n + n;
m[leftId] = b.left; w[leftId] = b.left;
int upId = id;
m[upId] = b.up; w[upId] = b.up;
int rightId = leftId - 1;
m[rightId] = b.right; w[rightId] = b.right;
int downId = n * n - n + 1 + upId;
m[downId] = b.down; w[downId] = b.down;
}
}
//*****************************************************************************
//init matrix
void initMatrix(const int n, const struct boundary b, float *d_m, float *d_w)
{
int blockSize = 256;
dim3 block(blockSize);
dim3 matrixBoundaryGrid(getQuotient(n - 1, blockSize));
dim3 matrixInnerGrid(getQuotient(n * n, blockSize));
kernelInitMatrixInner
<<<matrixInnerGrid, block>>>(n, b.averageValue, d_m, d_w);
kernelInitMatrixBoundary
<<<matrixBoundaryGrid, block>>>(n, b, d_m, d_w);
}
//*****************************************************************************
//kernel of jacobi iteration process with float in iteration mode
__global__ void kernelJacobiIteration(const int n, float *m, float *w)
{
int id = blockIdx.x * blockDim.x +threadIdx.x;
if (id < (n - 2) * (n - 2))
{
int row = id / (n - 2);
int column = id - row * (n - 2);
int location = (row + 1) * n + (column + 1);
w[location] = (m[location - 1] + m[location - n] + m[location + 1]
+ m[location + n]) / 4.0;
}
}
//*****************************************************************************
//kernel of getting epsilon between d_m and d_w
__global__ void kernelGetEpsilon(const int n,
float *m, float *w,
float *ep)
{
int id = blockIdx.x * blockDim.x +threadIdx.x;
if (id < (n - 2) * (n - 2))
{
int row = id / (n - 2);
int column = id - row * (n - 2);
int location = (row + 1) * n + (column + 1);
ep[id] = fabs(m[location] - w[location]);
}
}
//kernel of max of a num group
__global__ void kernelGetMax(const int count, float *ep)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < count)
if (ep[id] < ep[id + count]) ep[id] = ep[id + count];
}
//*****************************************************************************
//get epsilon
float getEpsilon(const int n, float *d_m, float *d_w)
{
float epsilon;
float *d_ep;
/*cutilSafeCall*/
(cudaMalloc((void**) &d_ep, (n - 2) * (n - 2) * sizeof(float)));
// setup execution parameters
int blockSize = 256;
dim3 block(blockSize);
dim3 matrixInnerGrid(getQuotient((n - 2) * (n - 2), blockSize));
dim3 getMaxGrid(getQuotient((n - 2) * (n - 2) / 2, blockSize));
kernelGetEpsilon<<<matrixInnerGrid, block>>>(n, d_m, d_w, d_ep);
//check if kernel execution generated and error
//cutilCheckMsg("Kernel execution failed");
//get max of d_ep
int count = (int)((n - 2) * (n - 2) / 2);
while(count > 1)
{
kernelGetMax<<<getMaxGrid, block>>>(count, d_ep);
//check if kernel execution generated and error
//cutilCheckMsg("Kernel execution failed");
count = (count + 1) / 2;
}
float lastD_ep;
/*cutilSafeCall*/(cudaMemcpy(&lastD_ep, d_ep + (n - 2) * (n - 2) - 1,
sizeof(float), cudaMemcpyDeviceToHost));
/*cutilSafeCall*/
(cudaMemcpy(&epsilon, d_ep, sizeof(float), cudaMemcpyDeviceToHost));
/*cutilSafeCall*/(cudaFree(d_ep));
if (epsilon < lastD_ep) epsilon = lastD_ep;
return epsilon;
}
//*****************************************************************************
void jacobiCUDAIterationEpsilon_1D_F(const int n, const float epsilon,
long *step, const struct boundary b,
float *d_m, float *d_w,
double *initTime, double *iterTime)
{
float *temp;
//timer
LARGE_INTEGER nStartCounter, nStopCounter;
//init data
printf("--Data initing(n=%d, epsilon=%lf).....", n, epsilon);
//timer starts
QueryPerformanceCounter(&nStartCounter);
//init
initMatrix(n, b, d_m, d_w);
//timer ends
QueryPerformanceCounter(&nStopCounter);
*initTime = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
//iteration
printf("--Computing(n=%d, epsilon=%lf).....", n, epsilon);
//timer starts
QueryPerformanceCounter(&nStartCounter);
*step = 0;
float epsilonTemp = epsilon + 1;
// setup execution parameters
int blockSize = 256;
dim3 block(blockSize);
dim3 matrixInnerGrid(getQuotient((n - 2) * (n - 2), blockSize));
dim3 block2(blockSize);
dim3 matrixInnerGrid2(getQuotient((n - 2) * (n - 2), blockSize));
//iteration
while (epsilonTemp > epsilon)
{
(*step)++;
//execute the kernel
kernelJacobiIteration<<<matrixInnerGrid, block>>>(n, d_m, d_w);
//check if kernel execution generated and error
//cutilCheckMsg("Kernel execution failed");
//epsilon
if (*step % JUMP == 0)
epsilonTemp = getEpsilon(n, d_m, d_w);
temp = d_m; d_m = d_w; d_w = temp;
}
//timer ends
QueryPerformanceCounter(&nStopCounter);
*iterTime = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
return;
}
//********************************************************************************
void jacobiCUDAIterationStep_1D_F(const int n, float *epsilon,
const long step, const struct boundary b,
float *d_m, float *d_w,
double *initTime, double *iterTime)
{
////*****************************************************************************
// unsigned int matrixSize = n * n;
// unsigned int matrixMemSize = sizeof(float) * matrixSize;
// float *m = (float *)malloc(sizeof(float) * n * n);
////*****************************************************************************
float *temp;
//timer
LARGE_INTEGER nStartCounter, nStopCounter;
//init data
printf("--Data initing(n=%d, step=%d).....", n, step);
//timer starts
QueryPerformanceCounter(&nStartCounter);
//init
initMatrix(n, b, d_m, d_w);
////*****************************************************************************
// cutilSafeCall(cudaMemcpy(m, d_m, matrixMemSize, cudaMemcpyDeviceToHost));
// printf("\nm\n");
// for(int i = 0; i < 10; i++)
// {
// for(int j = 0; j < 6; j++)
// printf("%10.2lf ", m[i * n + j]);
// printf("\n");
// }
// cutilSafeCall(cudaMemcpy(m, d_w, matrixMemSize, cudaMemcpyDeviceToHost));
// printf("\nw\n");
// for(int i = 0; i < 10; i++)
// {
// for(int j = 0; j < 6; j++)
// printf("%10.2lf ", m[i * n + j]);
// printf("\n");
// }
// getchar();
////*****************************************************************************
//timer ends
QueryPerformanceCounter(&nStopCounter);
*initTime = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
//iteration
printf("--Computing(n=%d, step=%d.....", n, epsilon);
//timer starts
QueryPerformanceCounter(&nStartCounter);
// setup execution parameters
int blockSize = 256;
dim3 block(blockSize);
dim3 matrixInnerGrid(getQuotient((n - 2) * (n - 2), blockSize));
for(int i = 0; i < step; i++)
{
//execute the kernel
kernelJacobiIteration<<<matrixInnerGrid, block>>>(n, d_m, d_w);
////*****************************************************************************
// cutilSafeCall(cudaMemcpy(m, d_m, matrixMemSize, cudaMemcpyDeviceToHost));
// printf("\nm\n");
// for(int i = 0; i < 10; i++)
// {
// for(int j = 0; j < 6; j++)
// printf("%10.2lf ", m[i * n + j]);
// printf("\n");
// }
// cutilSafeCall(cudaMemcpy(m, d_w, matrixMemSize, cudaMemcpyDeviceToHost));
// printf("\nw\n");
// for(int i = 0; i < 10; i++)
// {
// for(int j = 0; j < 6; j++)
// printf("%10.2lf ", m[i * n + j]);
// printf("\n");
// }
// getchar();
////*****************************************************************************
temp = d_m; d_m = d_w; d_w = temp;
}
*epsilon = getEpsilon(n, d_m, d_w);
//timer ends
QueryPerformanceCounter(&nStopCounter);
*iterTime = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
return;
}
//********************************************************************************
void jacobiCUDA_1D_F(int argc, char** argv,
int n, float epsilon,
long step, struct boundary b, char *outFile)
{
printf("Jacobi CUDA 1D -\n");
printf("--n=%d, e=%lf, step=%ld\n--LURD: %lf, %lf, %lf, %lf\n",
n, epsilon, step, b.left, b.up, b.right, b.down);
//init cuda device
if (cutCheckCmdLineFlag(argc, (const char**)argv, "device"))
cutilDeviceInit(argc, argv);
else
cudaSetDevice(cutGetMaxGflopsDeviceId());
//allocate devce memory for matrices u
unsigned int matrixSize = n * n;
unsigned int matrixMemSize = sizeof(float) * matrixSize;
float *d_m;
cudaMalloc((void**) &d_m, matrixMemSize);
/*cutilSafeCall*/(cudaMalloc((void**) &d_m, matrixMemSize));
float *d_w;
/*cutilSafeCall*/(cudaMalloc((void**) &d_w, matrixMemSize));
//timer
LARGE_INTEGER nStartCounter, nStopCounter;
double nTime1, nTime2, nTime3;
//jacobi serial 1D solution
if (epsilon != 0)
{
printf("--Epsilon mode\n");
jacobiCUDAIterationEpsilon_1D_F(n, epsilon, &step,
b, d_m, d_w, &nTime1, &nTime2);
printf("--Step = %ld\n", step);
}
else
{
printf("--Step mode\n");
jacobiCUDAIterationStep_1D_F(n, &epsilon, step,
b, d_m, d_w, &nTime1, &nTime2);
printf("--Epsilon = %lf\n", epsilon);
}
printf("--Result outputing...");
char *outDir = getOutDir(n, epsilon, b, step, outFile);
//timer starts
QueryPerformanceCounter(&nStartCounter);
float *m = (float *)malloc(sizeof(float) * n * n);
/*cutilSafeCall*/(cudaMemcpy(m, d_m, matrixMemSize, cudaMemcpyDeviceToHost));
//output result
outMatrix1DtoF_F(m, n, outDir);
//timer2 ends
QueryPerformanceCounter(&nStopCounter);
//get time
nTime3 = getCostTime(nStartCounter, nStopCounter);
printf("Done.\n");
printf("--(Time/s)Init=%lf, Computing=%lf, Data-saving=%lf, Total=%lf\n",
nTime1, nTime2, nTime3, nTime1 + nTime2 + nTime3);
outLog(n, epsilon, step, b, nTime1, nTime2, nTime3, outFile, outDir);
return;
} |
a9b5f04e2c8e1b2b118a92d8f4ddae10dc0cabdb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
# Edge Base Approach
#{class} {use the bit manipulations for less memeory requirements}
@iterative
*/
#include<bits/stdc++.h>
#include<cuda.h>
#include<thrust/count.h>
#include<thrust/extrema.h>
#include<thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include<hiprand/hiprand_kernel.h>
#include "Utility.cuh"
#define MAXBLOCKS 1<<32
#define MOD 32
using namespace std;
__global__ void AssignColors(long int V,long int *preSum,long int *colIndex,long int *colors,long int *delta_Degree,bool *conflicts){
long int threadId = blockDim.x*blockIdx.x+threadIdx.x;
long int stride = blockDim.x*gridDim.x;
if(threadId<V&&!conflicts[threadId]){
return;
}
if(threadId<V){
for(long int i=threadId;i<V;i=i+stride){
long int *vforbidden = (long int*)malloc(sizeof(long int)*(*delta_Degree+1));
memset(vforbidden,0,sizeof(long int)*(*delta_Degree+1));
for(long int k=preSum[i];k<preSum[i+1];k++){
long int j = colIndex[k];
long int value = colors[j]%MOD;
long int shift = 1<<value;
vforbidden[colors[j]/MOD]|= shift;
}
//Assign colors
for(long int color=1;color<=*delta_Degree+1;color++){
long int val = color%MOD;
if((vforbidden[color/MOD]&(1<<val))== 0){
colors[i] = color;
return;
}
}
free(vforbidden);
}
}
}
__global__ void DetectConflicts(long int V,long int *preSum,long int *colIndex,long int *colors,bool *conflicts,bool *checkConflicts){
long int threadId = blockIdx.x*blockDim.x+threadIdx.x;
if(threadId<V){
conflicts[threadId] = false;
for(long int k=preSum[threadId];k<preSum[threadId+1];k++){
long int j = colIndex[k];
if((colors[threadId]==colors[j])&&(j<threadId)){
conflicts[threadId] = true;
*checkConflicts = true;
return;
}
}
}
}
__global__ void preSumLength(int V,long int *d_preSum,long int *degree,long int *delta_Degree){
for(long int i=0;i<V;i++){
d_preSum[i+1] = d_preSum[i]+degree[i];
if(*delta_Degree<degree[i]){
*delta_Degree = degree[i];
}
}
}
__global__ void IsValidgraph_Coloring(long int V,long int *colors,long int *preSum,long int *colIndex,bool *flag){
long int threadId = blockDim.x*blockIdx.x+threadIdx.x;
if(threadId<V){
for(long int i=preSum[threadId];i<preSum[threadId+1];i++){
if(colors[threadId]==colors[colIndex[i]]||colors[threadId]==-1){
*flag = false;
}
}
}
}
long int EdgeBased_Algorithm(long int V,long int *preSum,long int *colIndex,long int *colors,long int *degree,long int n_zero_counter,long int *delta_Degree){
/*
@ step 2 Initialize the colors to 0
@ until all are colored
*/
thrust::fill(colors,colors+V,0);
long int n_threads = 256;
long int n_blocks = min((V+n_threads-1)/n_threads,(long)MAXBLOCKS);
bool *d_conflicts,*checkConflict;
hipMallocManaged(&d_conflicts,sizeof(bool)*V);
hipMallocManaged(&checkConflict,sizeof(bool));
thrust::fill(d_conflicts,d_conflicts+V,true);
do{
*checkConflict = false;
hipLaunchKernelGGL(( AssignColors), dim3(n_blocks),dim3(n_threads), 0, 0, V,preSum,colIndex,colors,delta_Degree,d_conflicts);
hipLaunchKernelGGL(( DetectConflicts), dim3(n_blocks),dim3(n_threads), 0, 0, V,preSum,colIndex,colors,d_conflicts,checkConflict);
catchCudaError(hipDeviceSynchronize(),"Edge");
}while(*checkConflict);
//Assigned Colors
/*
@ last step to print the assigned colors
*/
cout<<endl;
for(long int i=0;i<V;i++){
printf("vertex --> %i Assigned Color --> %d\n",i,colors[i]);
}
cout<<endl;
//thrust::device_ptr<long int> d_ptr = thrust::device_pointer_cast(colors);
//long int minimumColor = *(thrust::max_element(d_ptr, d_ptr+V));
thrust::device_vector<long int> d_data(V);
thrust::copy(colors,colors+V,d_data.begin());
thrust::sort(d_data.begin(), d_data.end());
size_t num_unique = thrust::inner_product(d_data.begin(), d_data.end()-1,d_data.begin()+1,0,
thrust::plus<long int>(),thrust::not_equal_to<long int>())+1;
hipFree(d_conflicts);
hipFree(checkConflict);
//required colors needed
return (long int)num_unique;
}
void GraphColoring_GPUAllocation(const char filename[]){
//@difficult to allocate memory for large complete dataset not assume complete graph
long int V; //No. of verties
long int n_zero_counter = 0;
long int **st_Column;
long int *st_degree;
if(string(filename).find("col")!=string::npos){
ReadColFile(filename,&V,&st_Column,&st_degree,&n_zero_counter);
}else{
ReadMMFile(filename,&V,&st_Column,&st_degree,&n_zero_counter);
}
long int *degree;
catchCudaError(hipMallocManaged(°ree,sizeof(long int)*V),"Degree Allocation");
thrust::copy(st_degree,st_degree+V,degree);
long int *d_preSum;
catchCudaError(hipMallocManaged(&d_preSum,sizeof(long int)*(V+1)),"preSum Allocation");
d_preSum[0] = 0;
//store all the index of non zero element
long int *d_colIndex;
catchCudaError(hipMallocManaged(&d_colIndex,sizeof(long int)*n_zero_counter),"colIndex Allocation");
//Allocation
long int *colors;
catchCudaError(hipMallocManaged(&colors,sizeof(long int)*V),"Color Allocation");
long int *delta_Degree;
catchCudaError(hipMallocManaged(&delta_Degree,sizeof(long int)),"Delta Degree Allocation");
*delta_Degree = 0;
hipLaunchKernelGGL(( preSumLength), dim3(1),dim3(1), 0, 0, V,d_preSum,degree,delta_Degree);
catchCudaError(hipMemcpy(d_preSum,d_preSum,sizeof(long int)*(V+1),hipMemcpyDeviceToHost),"Copy to PreSum");
for(int i=0;i<V;i++){
//Remove the hipMemcpy it will take more time
thrust::copy(st_Column[i],st_Column[i]+degree[i],d_colIndex+d_preSum[i]);
}
/*
@begin CSR
*/
//Call the EdgeBase Algorithm
long int number_Of_Colors_Needed = EdgeBased_Algorithm(V,d_preSum,d_colIndex,colors,degree,n_zero_counter,delta_Degree);
cout<<"EdgeBase Algorithm coloring found solution with "<<number_Of_Colors_Needed<<" colors"<<endl;
cout<<"Valid coloring Yes\n";
catchCudaError(hipDeviceSynchronize(),"GraphColoring DeviceSync");
hipFree(d_preSum);
hipFree(d_colIndex);
hipFree(colors);
hipFree(degree);
}
/* Reading Argument with command line opetion */
int main(int argc,char *argv[])
{
if(argc<2){
cout<<"Invalid Input Parameter"<<endl;
exit(1);
}else{
/*
@Adding the clock
*/
clock_t time = clock();
GraphColoring_GPUAllocation(argv[1]);
time = clock()-time;
cout<<"Total execution time is "<<(double)time/(double)CLOCKS_PER_SEC<<endl;
}
return 0;
}
| a9b5f04e2c8e1b2b118a92d8f4ddae10dc0cabdb.cu | /*
# Edge Base Approach
#{class} {use the bit manipulations for less memeory requirements}
@iterative
*/
#include<bits/stdc++.h>
#include<cuda.h>
#include<thrust/count.h>
#include<thrust/extrema.h>
#include<thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include<curand_kernel.h>
#include "Utility.cuh"
#define MAXBLOCKS 1<<32
#define MOD 32
using namespace std;
__global__ void AssignColors(long int V,long int *preSum,long int *colIndex,long int *colors,long int *delta_Degree,bool *conflicts){
long int threadId = blockDim.x*blockIdx.x+threadIdx.x;
long int stride = blockDim.x*gridDim.x;
if(threadId<V&&!conflicts[threadId]){
return;
}
if(threadId<V){
for(long int i=threadId;i<V;i=i+stride){
long int *vforbidden = (long int*)malloc(sizeof(long int)*(*delta_Degree+1));
memset(vforbidden,0,sizeof(long int)*(*delta_Degree+1));
for(long int k=preSum[i];k<preSum[i+1];k++){
long int j = colIndex[k];
long int value = colors[j]%MOD;
long int shift = 1<<value;
vforbidden[colors[j]/MOD]|= shift;
}
//Assign colors
for(long int color=1;color<=*delta_Degree+1;color++){
long int val = color%MOD;
if((vforbidden[color/MOD]&(1<<val))== 0){
colors[i] = color;
return;
}
}
free(vforbidden);
}
}
}
__global__ void DetectConflicts(long int V,long int *preSum,long int *colIndex,long int *colors,bool *conflicts,bool *checkConflicts){
long int threadId = blockIdx.x*blockDim.x+threadIdx.x;
if(threadId<V){
conflicts[threadId] = false;
for(long int k=preSum[threadId];k<preSum[threadId+1];k++){
long int j = colIndex[k];
if((colors[threadId]==colors[j])&&(j<threadId)){
conflicts[threadId] = true;
*checkConflicts = true;
return;
}
}
}
}
__global__ void preSumLength(int V,long int *d_preSum,long int *degree,long int *delta_Degree){
for(long int i=0;i<V;i++){
d_preSum[i+1] = d_preSum[i]+degree[i];
if(*delta_Degree<degree[i]){
*delta_Degree = degree[i];
}
}
}
__global__ void IsValidgraph_Coloring(long int V,long int *colors,long int *preSum,long int *colIndex,bool *flag){
long int threadId = blockDim.x*blockIdx.x+threadIdx.x;
if(threadId<V){
for(long int i=preSum[threadId];i<preSum[threadId+1];i++){
if(colors[threadId]==colors[colIndex[i]]||colors[threadId]==-1){
*flag = false;
}
}
}
}
long int EdgeBased_Algorithm(long int V,long int *preSum,long int *colIndex,long int *colors,long int *degree,long int n_zero_counter,long int *delta_Degree){
/*
@ step 2 Initialize the colors to 0
@ until all are colored
*/
thrust::fill(colors,colors+V,0);
long int n_threads = 256;
long int n_blocks = min((V+n_threads-1)/n_threads,(long)MAXBLOCKS);
bool *d_conflicts,*checkConflict;
cudaMallocManaged(&d_conflicts,sizeof(bool)*V);
cudaMallocManaged(&checkConflict,sizeof(bool));
thrust::fill(d_conflicts,d_conflicts+V,true);
do{
*checkConflict = false;
AssignColors<<<n_blocks,n_threads>>>(V,preSum,colIndex,colors,delta_Degree,d_conflicts);
DetectConflicts<<<n_blocks,n_threads>>>(V,preSum,colIndex,colors,d_conflicts,checkConflict);
catchCudaError(cudaDeviceSynchronize(),"Edge");
}while(*checkConflict);
//Assigned Colors
/*
@ last step to print the assigned colors
*/
cout<<endl;
for(long int i=0;i<V;i++){
printf("vertex --> %i Assigned Color --> %d\n",i,colors[i]);
}
cout<<endl;
//thrust::device_ptr<long int> d_ptr = thrust::device_pointer_cast(colors);
//long int minimumColor = *(thrust::max_element(d_ptr, d_ptr+V));
thrust::device_vector<long int> d_data(V);
thrust::copy(colors,colors+V,d_data.begin());
thrust::sort(d_data.begin(), d_data.end());
size_t num_unique = thrust::inner_product(d_data.begin(), d_data.end()-1,d_data.begin()+1,0,
thrust::plus<long int>(),thrust::not_equal_to<long int>())+1;
cudaFree(d_conflicts);
cudaFree(checkConflict);
//required colors needed
return (long int)num_unique;
}
void GraphColoring_GPUAllocation(const char filename[]){
//@difficult to allocate memory for large complete dataset not assume complete graph
long int V; //No. of verties
long int n_zero_counter = 0;
long int **st_Column;
long int *st_degree;
if(string(filename).find("col")!=string::npos){
ReadColFile(filename,&V,&st_Column,&st_degree,&n_zero_counter);
}else{
ReadMMFile(filename,&V,&st_Column,&st_degree,&n_zero_counter);
}
long int *degree;
catchCudaError(cudaMallocManaged(°ree,sizeof(long int)*V),"Degree Allocation");
thrust::copy(st_degree,st_degree+V,degree);
long int *d_preSum;
catchCudaError(cudaMallocManaged(&d_preSum,sizeof(long int)*(V+1)),"preSum Allocation");
d_preSum[0] = 0;
//store all the index of non zero element
long int *d_colIndex;
catchCudaError(cudaMallocManaged(&d_colIndex,sizeof(long int)*n_zero_counter),"colIndex Allocation");
//Allocation
long int *colors;
catchCudaError(cudaMallocManaged(&colors,sizeof(long int)*V),"Color Allocation");
long int *delta_Degree;
catchCudaError(cudaMallocManaged(&delta_Degree,sizeof(long int)),"Delta Degree Allocation");
*delta_Degree = 0;
preSumLength<<<1,1>>>(V,d_preSum,degree,delta_Degree);
catchCudaError(cudaMemcpy(d_preSum,d_preSum,sizeof(long int)*(V+1),cudaMemcpyDeviceToHost),"Copy to PreSum");
for(int i=0;i<V;i++){
//Remove the cudaMemcpy it will take more time
thrust::copy(st_Column[i],st_Column[i]+degree[i],d_colIndex+d_preSum[i]);
}
/*
@begin CSR
*/
//Call the EdgeBase Algorithm
long int number_Of_Colors_Needed = EdgeBased_Algorithm(V,d_preSum,d_colIndex,colors,degree,n_zero_counter,delta_Degree);
cout<<"EdgeBase Algorithm coloring found solution with "<<number_Of_Colors_Needed<<" colors"<<endl;
cout<<"Valid coloring Yes\n";
catchCudaError(cudaDeviceSynchronize(),"GraphColoring DeviceSync");
cudaFree(d_preSum);
cudaFree(d_colIndex);
cudaFree(colors);
cudaFree(degree);
}
/* Reading Argument with command line opetion */
int main(int argc,char *argv[])
{
if(argc<2){
cout<<"Invalid Input Parameter"<<endl;
exit(1);
}else{
/*
@Adding the clock
*/
clock_t time = clock();
GraphColoring_GPUAllocation(argv[1]);
time = clock()-time;
cout<<"Total execution time is "<<(double)time/(double)CLOCKS_PER_SEC<<endl;
}
return 0;
}
|
806fb18d076234a3b9957d05fc534c3c093819b3.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<cmath>
#include<cstdlib>//needed for rand
#include<ctime> //needed for srand(time(0))
#include<fstream> //needed for files
#include<vector>
#include "atom.cpp"
#include "book.h"
using namespace std;
void makeBox(double box) {
ofstream fout;
fout.open("box.xyz");
fout << 12*box << endl;
fout << "box " << endl;
for (int b =0; b<box; b++) {
fout<<"box "<< b <<" "<< 0 << " "<< 0 << endl;
fout<<"box "<< 0 <<" "<< b << " "<< 0 << endl;
fout<<"box "<< 0 <<" "<< 0 << " "<< b << endl;
fout<<"box "<< box <<" "<< b << " "<< 0 << endl;
fout<<"box "<< box <<" "<< 0 << " "<< b << endl;
fout<<"box "<< b <<" "<< box << " "<< 0 << endl;
fout<<"box "<< 0 <<" "<< box << " "<< b << endl;
fout<<"box "<< b <<" "<< 0 << " "<< box << endl;
fout<<"box "<< 0 <<" "<< b << " "<< box << endl;
fout<<"box "<< box <<" "<< box << " "<< b << endl;
fout<<"box "<< b <<" "<< box << " "<< box << endl;
fout<<"box "<< box <<" "<< b << " "<< box << endl;
}
fout.close();
}
void updateCoor(vector<Atom>& atoms, int numAtoms, int i, double box, double timeStep) {
double x;
double y;
double z;
double ang = 0.0000000001;
//ofstream fout;
ofstream fout;
fout.open("10Atom.xyz", ios::app);
//print to .xyz file
//if(i==0){
fout << numAtoms << endl;
//}
fout << "iteration " << i << endl;
for (int k=0; k<numAtoms; k++) {
fout<< "Argon ";
fout<< atoms[k].getxCoor() <<" "<<atoms[k].getyCoor() << " "<< atoms[k].getzCoor()<< endl;
}
for (int k=0; k<numAtoms; k++) { //k is the atom which is being updated
//update the coordinates and make sure they are in the box
x= atoms[k].getxCoor() + atoms[k].getxVel()*timeStep/ang;
y= atoms[k].getyCoor() + atoms[k].getyVel()*timeStep/ang;
z= atoms[k].getzCoor() + atoms[k].getzVel()*timeStep/ang;
if(x >= box){
x =fmod(x, box);
}
if(x <0) {
x =fmod(x, box) + box;
}
if(y >= box){
y = fmod(y, box);
}
if(y <0) {
y =fmod(y, box) +box;
}
if(z >= box){
z =fmod(z, box);
}
if(z <0) {
z =fmod(z, box) + box;
}
atoms[k].set_Coor(x,y,z);
}
}
void updateVel(vector<Atom>& atoms, int numAtoms, double box, double timeStep, int i) {
double dist;
double dx=0.0;
double dy=0.0;
double dz=0.0;
double ePot = 0;
double eKin = 0;
double eTot1;
double eTot2;
double ang = 0.0000000001;
double E = 1.712*(.000000000000000000001);
double sig = 3.4*(.0000000001);
double Fx;
double Fy;
double Fz;
double x;
double y;
double z;
for (int k=0; k<numAtoms; k++) { //k is the atom which is being updated
for (int l = 0; l<numAtoms;l++){ //l is the atom whose presence is applying force to k
double min = 1000000000;
if(l != k){ //make sure they are not the same atom
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min) {
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor()-box)*(atoms[l].getxCoor()-atoms[k].getxCoor()-box)
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min) {
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor()-box;
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor()+box)*(atoms[l].getxCoor()-atoms[k].getxCoor()+box)
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor()+box;
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor()-box)*(atoms[l].getyCoor()-atoms[k].getyCoor()-box)
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor()-box;
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor()+box)*(atoms[l].getyCoor()-atoms[k].getyCoor()+box)
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor()+box;
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor()-box)*(atoms[l].getzCoor()-atoms[k].getzCoor()-box);
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor()-box;
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor()+box)*(atoms[l].getzCoor()-atoms[k].getzCoor()+box);
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor()+box;
}
dx= dx*ang;
dy=dy*ang;
dz=dz*ang;
// update the velocity
double D = dx*dx+dy*dy+dz*dz;
double D_7= (dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)
*(dx*dx+dy*dy+dz*dz);
double D_4 = (dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz);
double sig_6= sig*sig*sig*sig*sig*sig;
Fx = -1*(24*E*sig_6*dx*((2*sig_6/(D_7))-(1/(D_4))));
Fy = -1*(24*E*sig_6*dy*((2*sig_6/(D_7))-(1/(D_4))));
Fz = -1*(24*E*sig_6*dz*((2*sig_6/(D_7))-(1/(D_4))));
x= atoms[k].getxVel() + (Fx/atoms[k].getMass())*timeStep;
y= atoms[k].getyVel() + (Fy/atoms[k].getMass())*timeStep;
z= atoms[k].getzVel() + (Fz/atoms[k].getMass())*timeStep;
atoms[k].set_Vel(x,y,z);
ePot += 4*E*(pow((sig/sqrt(dist)),12)-pow((sig/sqrt(dist)),6));
}
}
eKin += .5*atoms[k].getMass()*(atoms[k].getxVel()*atoms[k].getxVel()+atoms[k].getyVel()*atoms[k].getyVel()+atoms[k].getzVel()*atoms[k].getzVel());
}
eTot1 = eKin + ePot;
eTot2 = (eKin + ePot)/1000*6.02*pow(10,23);
if (i%100 ==0) {
printf("%d %4.16g %4.16g \n",i, eTot1, eTot2);
}
}
void cleanFile() {
ofstream fout;
fout.open("10Atom.xyz");
fout<< "";
}
double defBox(int numAtoms,double temperature) {
double box;
double ang = 0.0000000001;
//calculate the volume of the box using PV = nRT (P = 1)
double n = numAtoms/(6.022*pow(10, 23)); //calculate the mols of atoms
double R = .08206; //gas constant
box = n*R*temperature; //calculate the volume of the box in liters
box *= .001; //convert the volume of the box from liters to meters cubed
box = pow(box, 1/3.); //take the cube root to find the length of one side of the box
box /= ang; //convert to angstroms
return box;
}
void simulation2(){
double *dev_a, *dev_b, *dev_c;
double *xCoor, *xVel;
double ang = 0.0000000001;
ifstream atom_file;
atom_file.open("input.txt", ios::in);
int numAtoms;
atom_file >> numAtoms;
vector<Atom> atoms;
malloc( (double)&xCoor, numAtoms * sizeof(double);
malloc( (double)&xVel, numAtoms * sizeof(double);
int iterations;
atom_file>> iterations;
double timeStep;
atom_file>> timeStep;
double box;
atom_file>> box;
double mass;
atom_file>> mass;
atom_file.close();
box /= ang; //convert to angstroms
makeBox(box);
ifstream initial_file;
initial_file.open("initial.txt", ios::in);
double initialx;
double initialy;
double initialz;
double initialxVel;
double initialyVel;
double initialzVel;
for (double i=0; i<numAtoms; i++) {
initial_file >> initialx;
initial_file >> initialy;
initial_file >> initialz;
initial_file >> initialxVel;
initial_file >> initialyVel;
initial_file >> initialzVel;
atoms.push_back( Atom(mass,initialx/ang,initialy/ang,initialz/ang,initialxVel,initialyVel,initialzVel));
}
for (int i=0; i<numAtoms; i++){
initial_file.close();
cleanFile();
//set initial positions and velocities for atoms
/*for(double i =0; i<numAtoms; i++) {
atoms.push_back(Atom(mass*1.66053892 * pow(10,-27),fmod(rand(),box),fmod(rand(),box),fmod(rand(),box),fmod(rand(),500)-250,fmod(rand(),500)-250,fmod(rand(),500)-250));
}*/
HANDLE_ERROR( hipMalloc( (void**)&dev_a, numAtoms * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b, numAtoms * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c, numAtoms * sizeof(double) ) );
for(int i = 0; i<iterations; i++) {
hipMemcpy(dev_a, atoms.getxCoor() , numAtoms * sizeof(double) , hipMemcpyHostToDevice);
hipMemcpy(dev_b, atoms.getxVel(), numAtoms * sizeof(double) , hipMemcpyHostToDevice);
//updateCoor(atoms, numAtoms, i, box, timeStep);
//updateVel(atoms, numAtoms, box, timeStep, i);
}
}
int main() {
double numberOfAtoms;
double temp;
/*cout << "Enter the number of atoms: ";
cin >> numberOfAtoms;
cout << "Enter the temperature: ";
cin >> temp;*/
simulation2();
return 0;
}
| 806fb18d076234a3b9957d05fc534c3c093819b3.cu | #include<iostream>
#include<cmath>
#include<cstdlib>//needed for rand
#include<ctime> //needed for srand(time(0))
#include<fstream> //needed for files
#include<vector>
#include "atom.cpp"
#include "book.h"
using namespace std;
void makeBox(double box) {
ofstream fout;
fout.open("box.xyz");
fout << 12*box << endl;
fout << "box " << endl;
for (int b =0; b<box; b++) {
fout<<"box "<< b <<" "<< 0 << " "<< 0 << endl;
fout<<"box "<< 0 <<" "<< b << " "<< 0 << endl;
fout<<"box "<< 0 <<" "<< 0 << " "<< b << endl;
fout<<"box "<< box <<" "<< b << " "<< 0 << endl;
fout<<"box "<< box <<" "<< 0 << " "<< b << endl;
fout<<"box "<< b <<" "<< box << " "<< 0 << endl;
fout<<"box "<< 0 <<" "<< box << " "<< b << endl;
fout<<"box "<< b <<" "<< 0 << " "<< box << endl;
fout<<"box "<< 0 <<" "<< b << " "<< box << endl;
fout<<"box "<< box <<" "<< box << " "<< b << endl;
fout<<"box "<< b <<" "<< box << " "<< box << endl;
fout<<"box "<< box <<" "<< b << " "<< box << endl;
}
fout.close();
}
void updateCoor(vector<Atom>& atoms, int numAtoms, int i, double box, double timeStep) {
double x;
double y;
double z;
double ang = 0.0000000001;
//ofstream fout;
ofstream fout;
fout.open("10Atom.xyz", ios::app);
//print to .xyz file
//if(i==0){
fout << numAtoms << endl;
//}
fout << "iteration " << i << endl;
for (int k=0; k<numAtoms; k++) {
fout<< "Argon ";
fout<< atoms[k].getxCoor() <<" "<<atoms[k].getyCoor() << " "<< atoms[k].getzCoor()<< endl;
}
for (int k=0; k<numAtoms; k++) { //k is the atom which is being updated
//update the coordinates and make sure they are in the box
x= atoms[k].getxCoor() + atoms[k].getxVel()*timeStep/ang;
y= atoms[k].getyCoor() + atoms[k].getyVel()*timeStep/ang;
z= atoms[k].getzCoor() + atoms[k].getzVel()*timeStep/ang;
if(x >= box){
x =fmod(x, box);
}
if(x <0) {
x =fmod(x, box) + box;
}
if(y >= box){
y = fmod(y, box);
}
if(y <0) {
y =fmod(y, box) +box;
}
if(z >= box){
z =fmod(z, box);
}
if(z <0) {
z =fmod(z, box) + box;
}
atoms[k].set_Coor(x,y,z);
}
}
void updateVel(vector<Atom>& atoms, int numAtoms, double box, double timeStep, int i) {
double dist;
double dx=0.0;
double dy=0.0;
double dz=0.0;
double ePot = 0;
double eKin = 0;
double eTot1;
double eTot2;
double ang = 0.0000000001;
double E = 1.712*(.000000000000000000001);
double sig = 3.4*(.0000000001);
double Fx;
double Fy;
double Fz;
double x;
double y;
double z;
for (int k=0; k<numAtoms; k++) { //k is the atom which is being updated
for (int l = 0; l<numAtoms;l++){ //l is the atom whose presence is applying force to k
double min = 1000000000;
if(l != k){ //make sure they are not the same atom
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min) {
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor()-box)*(atoms[l].getxCoor()-atoms[k].getxCoor()-box)
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min) {
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor()-box;
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor()+box)*(atoms[l].getxCoor()-atoms[k].getxCoor()+box)
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor()+box;
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor()-box)*(atoms[l].getyCoor()-atoms[k].getyCoor()-box)
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor()-box;
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor()+box)*(atoms[l].getyCoor()-atoms[k].getyCoor()+box)
+ (atoms[l].getzCoor()-atoms[k].getzCoor())*(atoms[l].getzCoor()-atoms[k].getzCoor());
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor()+box;
dz =atoms[l].getzCoor()-atoms[k].getzCoor();
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor()-box)*(atoms[l].getzCoor()-atoms[k].getzCoor()-box);
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor()-box;
}
dist = (atoms[l].getxCoor()-atoms[k].getxCoor())*(atoms[l].getxCoor()-atoms[k].getxCoor())
+(atoms[l].getyCoor()-atoms[k].getyCoor())*(atoms[l].getyCoor()-atoms[k].getyCoor())
+ (atoms[l].getzCoor()-atoms[k].getzCoor()+box)*(atoms[l].getzCoor()-atoms[k].getzCoor()+box);
if (dist < min){
min = dist;
dx =atoms[l].getxCoor()-atoms[k].getxCoor();
dy =atoms[l].getyCoor()-atoms[k].getyCoor();
dz =atoms[l].getzCoor()-atoms[k].getzCoor()+box;
}
dx= dx*ang;
dy=dy*ang;
dz=dz*ang;
// update the velocity
double D = dx*dx+dy*dy+dz*dz;
double D_7= (dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)
*(dx*dx+dy*dy+dz*dz);
double D_4 = (dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz)*(dx*dx+dy*dy+dz*dz);
double sig_6= sig*sig*sig*sig*sig*sig;
Fx = -1*(24*E*sig_6*dx*((2*sig_6/(D_7))-(1/(D_4))));
Fy = -1*(24*E*sig_6*dy*((2*sig_6/(D_7))-(1/(D_4))));
Fz = -1*(24*E*sig_6*dz*((2*sig_6/(D_7))-(1/(D_4))));
x= atoms[k].getxVel() + (Fx/atoms[k].getMass())*timeStep;
y= atoms[k].getyVel() + (Fy/atoms[k].getMass())*timeStep;
z= atoms[k].getzVel() + (Fz/atoms[k].getMass())*timeStep;
atoms[k].set_Vel(x,y,z);
ePot += 4*E*(pow((sig/sqrt(dist)),12)-pow((sig/sqrt(dist)),6));
}
}
eKin += .5*atoms[k].getMass()*(atoms[k].getxVel()*atoms[k].getxVel()+atoms[k].getyVel()*atoms[k].getyVel()+atoms[k].getzVel()*atoms[k].getzVel());
}
eTot1 = eKin + ePot;
eTot2 = (eKin + ePot)/1000*6.02*pow(10,23);
if (i%100 ==0) {
printf("%d %4.16g %4.16g \n",i, eTot1, eTot2);
}
}
void cleanFile() {
ofstream fout;
fout.open("10Atom.xyz");
fout<< "";
}
double defBox(int numAtoms,double temperature) {
double box;
double ang = 0.0000000001;
//calculate the volume of the box using PV = nRT (P = 1)
double n = numAtoms/(6.022*pow(10, 23)); //calculate the mols of atoms
double R = .08206; //gas constant
box = n*R*temperature; //calculate the volume of the box in liters
box *= .001; //convert the volume of the box from liters to meters cubed
box = pow(box, 1/3.); //take the cube root to find the length of one side of the box
box /= ang; //convert to angstroms
return box;
}
void simulation2(){
double *dev_a, *dev_b, *dev_c;
double *xCoor, *xVel;
double ang = 0.0000000001;
ifstream atom_file;
atom_file.open("input.txt", ios::in);
int numAtoms;
atom_file >> numAtoms;
vector<Atom> atoms;
malloc( (double)&xCoor, numAtoms * sizeof(double);
malloc( (double)&xVel, numAtoms * sizeof(double);
int iterations;
atom_file>> iterations;
double timeStep;
atom_file>> timeStep;
double box;
atom_file>> box;
double mass;
atom_file>> mass;
atom_file.close();
box /= ang; //convert to angstroms
makeBox(box);
ifstream initial_file;
initial_file.open("initial.txt", ios::in);
double initialx;
double initialy;
double initialz;
double initialxVel;
double initialyVel;
double initialzVel;
for (double i=0; i<numAtoms; i++) {
initial_file >> initialx;
initial_file >> initialy;
initial_file >> initialz;
initial_file >> initialxVel;
initial_file >> initialyVel;
initial_file >> initialzVel;
atoms.push_back( Atom(mass,initialx/ang,initialy/ang,initialz/ang,initialxVel,initialyVel,initialzVel));
}
for (int i=0; i<numAtoms; i++){
initial_file.close();
cleanFile();
//set initial positions and velocities for atoms
/*for(double i =0; i<numAtoms; i++) {
atoms.push_back(Atom(mass*1.66053892 * pow(10,-27),fmod(rand(),box),fmod(rand(),box),fmod(rand(),box),fmod(rand(),500)-250,fmod(rand(),500)-250,fmod(rand(),500)-250));
}*/
HANDLE_ERROR( cudaMalloc( (void**)&dev_a, numAtoms * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, numAtoms * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, numAtoms * sizeof(double) ) );
for(int i = 0; i<iterations; i++) {
cudaMemcpy(dev_a, atoms.getxCoor() , numAtoms * sizeof(double) , cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, atoms.getxVel(), numAtoms * sizeof(double) , cudaMemcpyHostToDevice);
//updateCoor(atoms, numAtoms, i, box, timeStep);
//updateVel(atoms, numAtoms, box, timeStep, i);
}
}
int main() {
double numberOfAtoms;
double temp;
/*cout << "Enter the number of atoms: ";
cin >> numberOfAtoms;
cout << "Enter the temperature: ";
cin >> temp;*/
simulation2();
return 0;
}
|
7a781fb48f7d2c8e7554fe223dc9566db65a2b8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__
void hitsearch_float64(const int n, const double* spectrum, const double threshold, const double drift_rate, double* maxsnr, double* maxdrift, unsigned int* tot_hits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int count = 0;
for (int i = index; i < n; i += stride) {
if (spectrum[i] > threshold) {
count++;
if (spectrum[i] > maxsnr[i]) {
maxsnr[i] = spectrum[i];
maxdrift[i] = drift_rate;
}
}
}
atomicAdd(&tot_hits[0], count);
}
extern "C" __global__
void hitsearch_float32(const int n, const float* spectrum, const double threshold, const double drift_rate, float* maxsnr, float* maxdrift, unsigned int* tot_hits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int count = 0;
for (int i = index; i < n; i += stride) {
if (spectrum[i] > threshold) {
count++;
if (spectrum[i] > maxsnr[i]) {
maxsnr[i] = spectrum[i];
maxdrift[i] = drift_rate;
}
}
}
atomicAdd(&tot_hits[0], count);
}
| 7a781fb48f7d2c8e7554fe223dc9566db65a2b8b.cu | extern "C" __global__
void hitsearch_float64(const int n, const double* spectrum, const double threshold, const double drift_rate, double* maxsnr, double* maxdrift, unsigned int* tot_hits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int count = 0;
for (int i = index; i < n; i += stride) {
if (spectrum[i] > threshold) {
count++;
if (spectrum[i] > maxsnr[i]) {
maxsnr[i] = spectrum[i];
maxdrift[i] = drift_rate;
}
}
}
atomicAdd(&tot_hits[0], count);
}
extern "C" __global__
void hitsearch_float32(const int n, const float* spectrum, const double threshold, const double drift_rate, float* maxsnr, float* maxdrift, unsigned int* tot_hits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int count = 0;
for (int i = index; i < n; i += stride) {
if (spectrum[i] > threshold) {
count++;
if (spectrum[i] > maxsnr[i]) {
maxsnr[i] = spectrum[i];
maxdrift[i] = drift_rate;
}
}
}
atomicAdd(&tot_hits[0], count);
}
|
f99b3c77510782e569bee83bb1ecd91e61b38c8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void updateOutputWithTargetMV(const float* input, const float* weight, const float* bias, const float* mapping, const float* n_class_in_cluster, const float* class_start_indices, const float* target, const long input_stride0, const long weight_stride0, const long score_stride0, long input_size, float* score) {
__shared__ float buffer[MV_BUFFER_SIZE];
// align input and score to current sample in minibatch
input += input_stride0 * blockIdx.y;
score += score_stride0 * blockIdx.y;
// get the indices corresponding the the target
const int itarget = (int)(target[blockIdx.y] - 0.5f); // - 0.5 : 1based->0
const int cluster_target = (int)(mapping[2*itarget] - 0.5f);
const int iclass_start = (int)(class_start_indices[cluster_target] + 0.5f);
const int cluster_size = (int)(n_class_in_cluster[cluster_target] + 0.5f);
// get the bias and weight of the target cluster + correct line
const int lineIdx = blockIdx.x;
const int nLinesParallel = gridDim.x;
// do matrix vector multiply :
const int tidxx = threadIdx.x;
// loop over lines
for (int iline = lineIdx; iline < cluster_size; iline += nLinesParallel) {
const float* weight0 = weight + weight_stride0 * (iclass_start + iline);
// map
__syncthreads();
register float tmp = 0.f;
for (int i = tidxx; i < input_size; i += MV_BUFFER_SIZE)
tmp += input[i] * weight0[i];
buffer[tidxx] = tmp;
// reduce
/*
for (unsigned int stride = MV_BUFFER_SIZE >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (tidxx < stride)
buffer[tidxx] += buffer[tidxx+stride];
}
if (tidxx == 0)
score[iline] = buffer[0] + bias[iclass_start + iline];
*/
tmp = 0.f;
__syncthreads();
if (tidxx < MV_BUFFER_SIZE / MV_N_REDUCE) {
for (int i = tidxx * MV_N_REDUCE; i < (tidxx + 1) * MV_N_REDUCE; ++i)
tmp += buffer[i];
buffer[tidxx] = tmp;
}
__syncthreads();
// store result
if (tidxx == 0) {
tmp = buffer[0];
#pragma unroll
for (int i = 1; i < MV_BUFFER_SIZE / MV_N_REDUCE; ++i)
tmp += buffer[i];
score[iline] = tmp + bias[iclass_start + iline];
}
}
} | f99b3c77510782e569bee83bb1ecd91e61b38c8e.cu | #include "includes.h"
__global__ void updateOutputWithTargetMV(const float* input, const float* weight, const float* bias, const float* mapping, const float* n_class_in_cluster, const float* class_start_indices, const float* target, const long input_stride0, const long weight_stride0, const long score_stride0, long input_size, float* score) {
__shared__ float buffer[MV_BUFFER_SIZE];
// align input and score to current sample in minibatch
input += input_stride0 * blockIdx.y;
score += score_stride0 * blockIdx.y;
// get the indices corresponding the the target
const int itarget = (int)(target[blockIdx.y] - 0.5f); // - 0.5 : 1based->0
const int cluster_target = (int)(mapping[2*itarget] - 0.5f);
const int iclass_start = (int)(class_start_indices[cluster_target] + 0.5f);
const int cluster_size = (int)(n_class_in_cluster[cluster_target] + 0.5f);
// get the bias and weight of the target cluster + correct line
const int lineIdx = blockIdx.x;
const int nLinesParallel = gridDim.x;
// do matrix vector multiply :
const int tidxx = threadIdx.x;
// loop over lines
for (int iline = lineIdx; iline < cluster_size; iline += nLinesParallel) {
const float* weight0 = weight + weight_stride0 * (iclass_start + iline);
// map
__syncthreads();
register float tmp = 0.f;
for (int i = tidxx; i < input_size; i += MV_BUFFER_SIZE)
tmp += input[i] * weight0[i];
buffer[tidxx] = tmp;
// reduce
/*
for (unsigned int stride = MV_BUFFER_SIZE >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (tidxx < stride)
buffer[tidxx] += buffer[tidxx+stride];
}
if (tidxx == 0)
score[iline] = buffer[0] + bias[iclass_start + iline];
*/
tmp = 0.f;
__syncthreads();
if (tidxx < MV_BUFFER_SIZE / MV_N_REDUCE) {
for (int i = tidxx * MV_N_REDUCE; i < (tidxx + 1) * MV_N_REDUCE; ++i)
tmp += buffer[i];
buffer[tidxx] = tmp;
}
__syncthreads();
// store result
if (tidxx == 0) {
tmp = buffer[0];
#pragma unroll
for (int i = 1; i < MV_BUFFER_SIZE / MV_N_REDUCE; ++i)
tmp += buffer[i];
score[iline] = tmp + bias[iclass_start + iline];
}
}
} |
3fa52c80e89875a625b70e14d66ffca5e8013ff4.hip | // !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "hip/hip_runtime.h"
#include "utility/src/utils.cuh"
#include "utility/src/utils_cub.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void permuteGrad(
const T* grad_out_x,
const T* grad_out_y,
const int* flat_node2pin_map,
const int num_pins,
T* grad_out_x_perm,
T* grad_out_y_perm
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int pin_id = flat_node2pin_map[i];
grad_out_x_perm[i] = grad_out_x[pin_id];
grad_out_y_perm[i] = grad_out_y[pin_id];
}
}
/// @brief Compute pin position from node position
template <typename T, typename K>
__global__ void computePinPos(
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const K* pin2node_map,
const int num_pins,
T* pin_x, T* pin_y
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int node_id = pin2node_map[i];
pin_x[i] = pin_offset_x[i] + x[node_id];
pin_y[i] = pin_offset_y[i] + y[node_id];
}
}
template <typename T>
int computePinPosCudaSegmentLauncher(
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const long* pin2node_map,
const int* flat_node2pin_map,
const int* flat_node2pin_start_map,
int num_pins,
T* pin_x, T* pin_y
)
{
int thread_count = 512;
hipLaunchKernelGGL(( computePinPos), dim3((num_pins+thread_count-1) / thread_count), dim3(thread_count), 0, 0, x, y, pin_offset_x, pin_offset_y, pin2node_map, num_pins, pin_x, pin_y);
return 0;
}
template <typename T>
int computePinPosGradCudaSegmentLauncher(
const T* grad_out_x, const T* grad_out_y,
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const long* pin2node_map,
const int* flat_node2pin_map,
const int* flat_node2pin_start_map,
int num_nodes,
int num_pins,
T* grad_x, T* grad_y,
T* grad_perm_buf ///< 2*num_pins, buffer for store the permutated gradients
)
{
int thread_count = 512;
T* grad_out_x_perm = grad_perm_buf;
T* grad_out_y_perm = grad_perm_buf + num_pins;
hipLaunchKernelGGL(( permuteGrad), dim3((num_pins+thread_count-1) / thread_count), dim3(thread_count), 0, 0, grad_out_x, grad_out_y, flat_node2pin_map, num_pins, grad_out_x_perm, grad_out_y_perm);
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
// allocate temp storage
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_x_perm, grad_x,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
hipMalloc(&d_temp_storage, temp_storage_bytes);
// for x
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_x_perm, grad_x,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
// for y
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_y_perm, grad_y,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
hipFree(d_temp_storage);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computePinPosCudaSegmentLauncher<T>(\
const T* x, const T* y, \
const T* pin_offset_x, \
const T* pin_offset_y, \
const long* pin2node_map, \
const int* flat_node2pin_map, \
const int* flat_node2pin_start_map, \
int num_pins, \
T* pin_x, T* pin_y \
);\
\
template int computePinPosGradCudaSegmentLauncher<T>(\
const T* grad_out_x, const T* grad_out_y, \
const T* x, const T* y, \
const T* pin_offset_x, \
const T* pin_offset_y, \
const long* pin2node_map, \
const int* flat_node2pin_map, \
const int* flat_node2pin_start_map, \
int num_nodes, \
int num_pins, \
T* grad_x, T* grad_y, \
T* grad_perm_buf \
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| 3fa52c80e89875a625b70e14d66ffca5e8013ff4.cu | #include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "cuda_runtime.h"
#include "utility/src/utils.cuh"
#include "utility/src/utils_cub.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void permuteGrad(
const T* grad_out_x,
const T* grad_out_y,
const int* flat_node2pin_map,
const int num_pins,
T* grad_out_x_perm,
T* grad_out_y_perm
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int pin_id = flat_node2pin_map[i];
grad_out_x_perm[i] = grad_out_x[pin_id];
grad_out_y_perm[i] = grad_out_y[pin_id];
}
}
/// @brief Compute pin position from node position
template <typename T, typename K>
__global__ void computePinPos(
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const K* pin2node_map,
const int num_pins,
T* pin_x, T* pin_y
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int node_id = pin2node_map[i];
pin_x[i] = pin_offset_x[i] + x[node_id];
pin_y[i] = pin_offset_y[i] + y[node_id];
}
}
template <typename T>
int computePinPosCudaSegmentLauncher(
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const long* pin2node_map,
const int* flat_node2pin_map,
const int* flat_node2pin_start_map,
int num_pins,
T* pin_x, T* pin_y
)
{
int thread_count = 512;
computePinPos<<<(num_pins+thread_count-1) / thread_count, thread_count>>>(x, y, pin_offset_x, pin_offset_y, pin2node_map, num_pins, pin_x, pin_y);
return 0;
}
template <typename T>
int computePinPosGradCudaSegmentLauncher(
const T* grad_out_x, const T* grad_out_y,
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const long* pin2node_map,
const int* flat_node2pin_map,
const int* flat_node2pin_start_map,
int num_nodes,
int num_pins,
T* grad_x, T* grad_y,
T* grad_perm_buf ///< 2*num_pins, buffer for store the permutated gradients
)
{
int thread_count = 512;
T* grad_out_x_perm = grad_perm_buf;
T* grad_out_y_perm = grad_perm_buf + num_pins;
permuteGrad<<<(num_pins+thread_count-1) / thread_count, thread_count>>>(grad_out_x, grad_out_y, flat_node2pin_map, num_pins, grad_out_x_perm, grad_out_y_perm);
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
// allocate temp storage
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_x_perm, grad_x,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// for x
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_x_perm, grad_x,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
// for y
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_y_perm, grad_y,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
cudaFree(d_temp_storage);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computePinPosCudaSegmentLauncher<T>(\
const T* x, const T* y, \
const T* pin_offset_x, \
const T* pin_offset_y, \
const long* pin2node_map, \
const int* flat_node2pin_map, \
const int* flat_node2pin_start_map, \
int num_pins, \
T* pin_x, T* pin_y \
);\
\
template int computePinPosGradCudaSegmentLauncher<T>(\
const T* grad_out_x, const T* grad_out_y, \
const T* x, const T* y, \
const T* pin_offset_x, \
const T* pin_offset_y, \
const long* pin2node_map, \
const int* flat_node2pin_map, \
const int* flat_node2pin_start_map, \
int num_nodes, \
int num_pins, \
T* grad_x, T* grad_y, \
T* grad_perm_buf \
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
cd887eb561ec9676fa08425e3e81927d3cb306a5.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define mm_BLOCK_SIZE 16
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define iSizeMultiple 4 //must be multipes of 15
#define WA (4 * mm_BLOCK_SIZE) // Matrix A width
#define HA (4 * mm_BLOCK_SIZE) // Matrix A height
//#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width
#define WB (60 * mm_BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
texture<float,2,hipReadModeElementType> tex_A;
texture<float,2,hipReadModeElementType> tex_B;
texture<float,2,hipReadModeElementType> tex_c;
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel( float* C, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = tex2D(tex_A,(a+wA*ty+tx)%wA,(a+wA*ty+tx)/wA);//A[a + wA * ty + tx];
BS(ty, tx) = tex2D(tex_B,(b+wB*ty+tx)%wB,(b+wB*ty+tx)/wB);//B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// hipSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA * iSizeMultiple;
uiHA = HA * iSizeMultiple;
uiWB = WB * iSizeMultiple;
uiHB = HB * iSizeMultiple;
uiWC = WC * iSizeMultiple;
uiHC = HC * iSizeMultiple;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(hipMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(hipMalloc((void**) &d_B, mem_size_B));
hipChannelFormatDesc channelDescA = hipCreateChannelDesc<float>();
hipChannelFormatDesc channelDescB = hipCreateChannelDesc<float>();
hipArray* A_Array, *B_Array;
hipMallocArray(&A_Array, &channelDescA, uiWA, uiHA);//,hipArraySurfaceLoadStore);
hipMallocArray(&B_Array, &channelDescB, uiWB, uiHB);//,hipArraySurfaceLoadStore);
// Copy to device memory some data located at address h_data
// in host memory
hipMemcpyToArray(A_Array, 0, 0, h_A, uiWA * uiHA * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpyToArray(B_Array, 0, 0, h_B, uiWB * uiHB * sizeof(float),
hipMemcpyHostToDevice);
// Set texture reference parameters
tex_A.addressMode[0] = hipAddressModeWrap;
tex_A.addressMode[1] = hipAddressModeWrap;
tex_A.filterMode = hipFilterModePoint;
tex_B.addressMode[0] = hipAddressModeWrap;
tex_B.addressMode[1] = hipAddressModeWrap;
tex_B.filterMode = hipFilterModePoint;
// Bind the array to the texture reference
hipBindTextureToArray(tex_A, A_Array, channelDescA);
hipBindTextureToArray(tex_B, B_Array, channelDescB);
// copy host memory to device
//checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) );
//checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) );
checkCudaErrors(hipMalloc((void**) &d_C, mem_size_C));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// int mm_grid=mm_GRID_X*mm_GRID_Y;
hipLaunchKernelGGL(( mm_kernel), dim3(mm_grid), dim3(mm_block), 0, 0, d_C, uiWA, uiWB);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
// copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
return 0;
}
| cd887eb561ec9676fa08425e3e81927d3cb306a5.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define mm_BLOCK_SIZE 16
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define iSizeMultiple 4 //must be multipes of 15
#define WA (4 * mm_BLOCK_SIZE) // Matrix A width
#define HA (4 * mm_BLOCK_SIZE) // Matrix A height
//#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width
#define WB (60 * mm_BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
texture<float,2,cudaReadModeElementType> tex_A;
texture<float,2,cudaReadModeElementType> tex_B;
texture<float,2,cudaReadModeElementType> tex_c;
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel( float* C, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = tex2D(tex_A,(a+wA*ty+tx)%wA,(a+wA*ty+tx)/wA);//A[a + wA * ty + tx];
BS(ty, tx) = tex2D(tex_B,(b+wB*ty+tx)%wB,(b+wB*ty+tx)/wB);//B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// cudaSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA * iSizeMultiple;
uiHA = HA * iSizeMultiple;
uiWB = WB * iSizeMultiple;
uiHB = HB * iSizeMultiple;
uiWC = WC * iSizeMultiple;
uiHC = HC * iSizeMultiple;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B));
cudaChannelFormatDesc channelDescA = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc channelDescB = cudaCreateChannelDesc<float>();
cudaArray* A_Array, *B_Array;
cudaMallocArray(&A_Array, &channelDescA, uiWA, uiHA);//,cudaArraySurfaceLoadStore);
cudaMallocArray(&B_Array, &channelDescB, uiWB, uiHB);//,cudaArraySurfaceLoadStore);
// Copy to device memory some data located at address h_data
// in host memory
cudaMemcpyToArray(A_Array, 0, 0, h_A, uiWA * uiHA * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpyToArray(B_Array, 0, 0, h_B, uiWB * uiHB * sizeof(float),
cudaMemcpyHostToDevice);
// Set texture reference parameters
tex_A.addressMode[0] = cudaAddressModeWrap;
tex_A.addressMode[1] = cudaAddressModeWrap;
tex_A.filterMode = cudaFilterModePoint;
tex_B.addressMode[0] = cudaAddressModeWrap;
tex_B.addressMode[1] = cudaAddressModeWrap;
tex_B.filterMode = cudaFilterModePoint;
// Bind the array to the texture reference
cudaBindTextureToArray(tex_A, A_Array, channelDescA);
cudaBindTextureToArray(tex_B, B_Array, channelDescB);
// copy host memory to device
//checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) );
//checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// int mm_grid=mm_GRID_X*mm_GRID_Y;
mm_kernel<<< mm_grid, mm_block>>>(d_C, uiWA, uiWB);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
// copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
return 0;
}
|
9ea9f1548dcdb821e14f73aabf4026803613ee5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <cstring> // memset
#include <cstdlib> // rand, RAND_MAX
#include <cmath> // sqrtf
#include <string>
#include <vector>
using namespace std;
float randomf(){
return (rand()+0.5)/(RAND_MAX+1.0);
}
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
// Use atomic add to avoid race condition
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
int main()
{
int b=32,n=512,m=128,nsample=64,c=64;
float radius=0.1;
float *xyz1, *xyz2, *points;
hipMallocManaged(&xyz1, b*n*3*sizeof(float));
hipMallocManaged(&xyz2, b*m*3*sizeof(float));
hipMallocManaged(&points, b*n*c*sizeof(float));
int *idx;
hipMallocManaged(&idx, b*m*nsample*sizeof(int));
memset(idx, 0, sizeof(int)*b*m*nsample);
float *out, *grad_out;
hipMallocManaged(&out, b*m*nsample*c*sizeof(float));
hipMallocManaged(&grad_out, b*m*nsample*c*sizeof(float));
memset(grad_out, 0.0, sizeof(float)*b*m*nsample*c);
float *grad_points;
hipMallocManaged(&grad_points, b*n*c*sizeof(float));
for (int i=0;i<b*n*3;i++)
xyz1[i]=randomf();
for (int i=0;i<b*m*3;i++)
xyz2[i]=randomf();
for (int i=0;i<b*n*c;i++)
points[i]=randomf();
double t0=get_time();
hipLaunchKernelGGL(( query_ball_point_gpu), dim3(b),dim3(256), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx);
hipDeviceSynchronize();
printf("query_ball_point gpu time %f\n",get_time()-t0);
t0=get_time();
hipLaunchKernelGGL(( group_point_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,points,idx,out);
hipDeviceSynchronize();
printf("grou_point gpu time %f\n",get_time()-t0);
t0=get_time();
hipLaunchKernelGGL(( group_point_grad_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,grad_out,idx,grad_points);
hipDeviceSynchronize();
printf("grou_point_grad gpu time %f\n",get_time()-t0);
hipFree(xyz1);
hipFree(xyz2);
hipFree(points);
hipFree(idx);
hipFree(out);
hipFree(grad_out);
hipFree(grad_points);
return 0;
}
| 9ea9f1548dcdb821e14f73aabf4026803613ee5d.cu | #include <cstdio>
#include <ctime>
#include <cstring> // memset
#include <cstdlib> // rand, RAND_MAX
#include <cmath> // sqrtf
#include <string>
#include <vector>
using namespace std;
float randomf(){
return (rand()+0.5)/(RAND_MAX+1.0);
}
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
// Use atomic add to avoid race condition
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
int main()
{
int b=32,n=512,m=128,nsample=64,c=64;
float radius=0.1;
float *xyz1, *xyz2, *points;
cudaMallocManaged(&xyz1, b*n*3*sizeof(float));
cudaMallocManaged(&xyz2, b*m*3*sizeof(float));
cudaMallocManaged(&points, b*n*c*sizeof(float));
int *idx;
cudaMallocManaged(&idx, b*m*nsample*sizeof(int));
memset(idx, 0, sizeof(int)*b*m*nsample);
float *out, *grad_out;
cudaMallocManaged(&out, b*m*nsample*c*sizeof(float));
cudaMallocManaged(&grad_out, b*m*nsample*c*sizeof(float));
memset(grad_out, 0.0, sizeof(float)*b*m*nsample*c);
float *grad_points;
cudaMallocManaged(&grad_points, b*n*c*sizeof(float));
for (int i=0;i<b*n*3;i++)
xyz1[i]=randomf();
for (int i=0;i<b*m*3;i++)
xyz2[i]=randomf();
for (int i=0;i<b*n*c;i++)
points[i]=randomf();
double t0=get_time();
query_ball_point_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx);
cudaDeviceSynchronize();
printf("query_ball_point gpu time %f\n",get_time()-t0);
t0=get_time();
group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out);
cudaDeviceSynchronize();
printf("grou_point gpu time %f\n",get_time()-t0);
t0=get_time();
group_point_grad_gpu<<<b,256>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
cudaDeviceSynchronize();
printf("grou_point_grad gpu time %f\n",get_time()-t0);
cudaFree(xyz1);
cudaFree(xyz2);
cudaFree(points);
cudaFree(idx);
cudaFree(out);
cudaFree(grad_out);
cudaFree(grad_points);
return 0;
}
|
e9951310d75b10f15c215a4c8cb526f96f6b1b6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===================================================================================================================
// File: main.cpp
// Created on: 27-10-11
// Authors: Dirk Vos, Mark Schrauwen, Michiel van der Vlag
//
// Description: The filter file called from main.cpp. This file contains the actual CUDA code. Different functions
// of the code can be enabled\disabled by (un)commenting the macros as defined below.
//===================================================================================================================
#include <Timer.hpp>
#include <iostream>
#include <iomanip>
//-------------------------------------------------------------------------------------------------------------------
// Global definitions, macros
//-------------------------------------------------------------------------------------------------------------------
// Use commenting to disable/enable functions
#define MAX_BLOCKSIZE 32 // The size of one square block. A block will have size MAX_BLOCKSIZE * MAX_BLOCKSIZE
#define DATA_SIZE 7 // The number of variables of Memory Mapping part.
#define SHARED_MEM // If uncommented, shared memory will be used.
#define TEXTURE_MEM // If uncommented, Texture memory will be used.
using LOFAR::NSTimer;
using std::cout;
using std::cerr;
using std::endl;
using std::fixed;
using std::setprecision;
// global information
static unsigned int numMultiProcessors;
static unsigned int numThreadsPerBlock[2];
// define 2D textures
texture<unsigned char, hipTextureType2D, hipReadModeElementType> texture2DRed;
texture<unsigned char, hipTextureType2D, hipReadModeElementType> texture2DGreen;
texture<unsigned char, hipTextureType2D, hipReadModeElementType> texture2DBlue;
//-------------------------------------------------------------------------------------------------------------------
// Memory Mapping
//-------------------------------------------------------------------------------------------------------------------
/**
* unsigned int *dev_data is filled in the following way:
* index content
* 0 width
* 1 height
* 2 HISTOGRAM_SIZE
* 3 CONTRAST_THRESHOLD
* 4 pitch red image
* 5 pitch green image
* 6 pitch blue image
* 7 min (filled on device)
* 8 max (filled on device)
* 9 histogram (filled on device)
*/
//-------------------------------------------------------------------------------------------------------------------
// Device discovery
//-------------------------------------------------------------------------------------------------------------------
__host__ void getCudaDeviceInformation(void) {
// get GPU device
int device;
if (hipGetDevice(&device) != hipSuccess) {
cout << "main - cuda get device failed" << endl;
exit(1);
}
// get GPU properties
hipDeviceProp_t prop;
if (hipGetDeviceProperties (&prop, device) != hipSuccess) {
cout << "main - cuda get device properties failed" << endl;
exit(1);
}
// save to global information
numMultiProcessors = prop.multiProcessorCount;
numThreadsPerBlock[0] = MAX_BLOCKSIZE;
numThreadsPerBlock[1] = MAX_BLOCKSIZE;
}
//-------------------------------------------------------------------------------------------------------------------
// helper functions
//-------------------------------------------------------------------------------------------------------------------
__device__ unsigned char getElementRed(unsigned int x, unsigned int y) {
return tex2D(texture2DRed, x, y);
}
__device__ unsigned char getElementGreen(unsigned int x, unsigned int y) {
return tex2D(texture2DGreen, x, y);
}
__device__ unsigned char getElementBlue(unsigned int x, unsigned int y) {
return tex2D(texture2DBlue, x, y);
}
__device__ unsigned int getWidth(unsigned int *data) {
return data[0];
}
__device__ unsigned int getHeight(unsigned int *data) {
return data[1];
}
__device__ unsigned int getHistogramSize(unsigned int *data) {
return data[2];
}
__device__ unsigned int getContrastThreshold(unsigned int *data) {
return data[3];
}
__device__ unsigned int getPitchRedImage(unsigned int *data) {
return data[4];
}
__device__ unsigned int getPitchGreenImage(unsigned int *data) {
return data[5];
}
__device__ unsigned int getPitchBlueImage(unsigned int *data) {
return data[6];
}
__device__ unsigned int getMin(unsigned int *data) {
return data[7];
}
__device__ unsigned int getMax(unsigned int *data) {
return data[8];
}
__device__ void setMin(unsigned int *data, unsigned int value) {
data[7] = value;
}
__device__ void setMax(unsigned int *data, unsigned int value) {
data[8] = value;
}
__device__ unsigned int * getHistogram(unsigned int *data) {
return &data[9];
}
//-------------------------------------------------------------------------------------------------------------------
// rgb2gray
//-------------------------------------------------------------------------------------------------------------------
#ifdef TEXTURE_MEM
__global__ void rgb2grayCudaKernel(unsigned char *image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchRedImage(data);
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char redImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
__shared__ unsigned char greenImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
__shared__ unsigned char blueImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
redImage[threadIdx.x][threadIdx.y] = getElementRed(x, y);
greenImage[threadIdx.x][threadIdx.y] = getElementGreen(x, y);
blueImage[threadIdx.x][threadIdx.y] = getElementBlue(x, y);
__syncthreads();
// execute grey scaling code
float grayPix = 0.0f;
float r = static_cast< float >(redImage[threadIdx.x][threadIdx.y]);
float g = static_cast< float >(greenImage[threadIdx.x][threadIdx.y]);
float b = static_cast< float >(blueImage[threadIdx.x][threadIdx.y]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
__syncthreads();
// write back to global mem
image[(y * pitch) + x] = static_cast< unsigned char >(grayPix);
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// execute grey scaling code without shared mem
float grayPix = 0.0f;
float r = static_cast< float >(getElementRed(x, y));
float g = static_cast< float >(getElementGreen(x, y));
float b = static_cast< float >(getElementBlue(x, y));
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
// write back to global mem
image[(y * pitch) + x] = static_cast< unsigned char >(grayPix);
}
#endif
}
#else
__global__ void rgb2grayCudaKernel(unsigned char *red, unsigned char *green, unsigned char *blue, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int redpitch = getPitchRedImage(data);
unsigned int greenpitch = getPitchGreenImage(data);
unsigned int bluepitch = getPitchBlueImage(data);
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char redImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
__shared__ unsigned char greenImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
__shared__ unsigned char blueImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
redImage[threadIdx.x][threadIdx.y] = red[(y * redpitch) + x];
greenImage[threadIdx.x][threadIdx.y] = green[(y * greenpitch) + x];
blueImage[threadIdx.x][threadIdx.y] = blue[(y * bluepitch) + x];
__syncthreads();
// execute grey scaling code
float grayPix = 0.0f;
float r = static_cast< float >(redImage[threadIdx.x][threadIdx.y]);
float g = static_cast< float >(greenImage[threadIdx.x][threadIdx.y]);
float b = static_cast< float >(blueImage[threadIdx.x][threadIdx.y]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
__syncthreads();
// write back to global mem
red[(y * redpitch) + x] = static_cast< unsigned char >(grayPix);
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// execute grey scaling code without shared mem
float grayPix = 0.0f;
float r = static_cast< float >(red[(y * redpitch) + x]);
float g = static_cast< float >(green[(y * greenpitch) + x]);
float b = static_cast< float >(blue[(y * bluepitch) + x]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
// write back to global mem
red[(y * redpitch) + x] = static_cast< unsigned char >(grayPix);
}
#endif
}
#endif
__host__ void rgb2gray(unsigned char *inputImage, unsigned char *grayImage, const int width, const int height, double *totaltime) {
NSTimer kernelTime = NSTimer("kernelTime", false, false);
kernelTime.start();
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
float grayPix = 0.0f;
float r = static_cast< float >(inputImage[(y * width) + x]);
float g = static_cast< float >(inputImage[(width * height) + (y * width) + x]);
float b = static_cast< float >(inputImage[(2 * width * height) + (y * width) + x]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
grayImage[(y * width) + x] = static_cast< unsigned char >(grayPix);
}
}
kernelTime.stop();
*totaltime = kernelTime.getElapsed();
}
//-------------------------------------------------------------------------------------------------------------------
// histogram1D
//-------------------------------------------------------------------------------------------------------------------
#ifdef TEXTURE_MEM
__global__ void histogram1DCudaKernel(unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
// get histogram pointer
unsigned int *histogram = getHistogram(data);
unsigned int index;
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char grayImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
grayImage[threadIdx.x][threadIdx.y] = getElementRed(x, y);
__syncthreads();
// get histogram index
index = static_cast< unsigned int >(grayImage[threadIdx.x][threadIdx.y]);
__syncthreads();
// add pixel to histogram in one threadsafe operation
atomicAdd(&histogram[index], 1);
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// get histogram index
index = static_cast< unsigned int >(getElementRed(x, y));
// add pixel to histogram in one threadsafe operation
atomicAdd(&histogram[index], 1);
}
#endif
}
#else
__global__ void histogram1DCudaKernel(unsigned char* image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchRedImage(data);
// get histogram pointer
unsigned int *histogram = getHistogram(data);
unsigned int index;
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char grayImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
grayImage[threadIdx.x][threadIdx.y] = image[(y * pitch) + x];
__syncthreads();
// get histogram index
index = static_cast< unsigned int >(grayImage[threadIdx.x][threadIdx.y]);
__syncthreads();
// add pixel to histogram in one threadsafe operation
atomicAdd(&histogram[index], 1);
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// get histogram index
index = static_cast< unsigned int >(image[(y * pitch) + x]);
// add pixel to histogram in one threadsafe operation
atomicAdd(&histogram[index], 1);
}
#endif
}
#endif
__host__ void histogram1D(unsigned char *grayImage, const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE, double *totaltime)
{
NSTimer kernelTime = NSTimer("kernelTime", false, false);
memset(reinterpret_cast< void * >(histogram), 0, HISTOGRAM_SIZE * sizeof(int));
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
histogram[static_cast< unsigned int >(grayImage[(y * width) + x])] += 1;
}
}
// /Kernel
kernelTime.stop();
*totaltime = kernelTime.getElapsed();
}
//-------------------------------------------------------------------------------------------------------------------
// contrast1D
//-------------------------------------------------------------------------------------------------------------------
__global__ void contrastMinKernel(unsigned int *data) {
// load width and height, histogramsize and contrast threshold
unsigned int histogramSize = getHistogramSize(data);
unsigned int contrastThreshold = getContrastThreshold(data);
// get histogram pointer
unsigned int *histogram = getHistogram(data);
// find minimum
unsigned int i = 0;
while ( (i < histogramSize) && (histogram[i] < contrastThreshold) ) {
i++;
}
setMin(data, i);
}
__global__ void contrastMaxKernel(unsigned int *data) {
// load width and height, histogramsize and contrast threshold
unsigned int histogramSize = getHistogramSize(data);
unsigned int contrastThreshold = getContrastThreshold(data);
unsigned int min = getMin(data);
// get histogram pointer
unsigned int *histogram = getHistogram(data);
// find maximum
unsigned int i = histogramSize - 1;
while ( (i > min) && (histogram[i] < contrastThreshold) ) {
i--;
}
setMax(data, i);
}
#ifdef TEXTURE_MEM
__global__ void contrast1DKernel(unsigned char *image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchRedImage(data);
// calculate difference
float diff = getMax(data) - getMin(data);
// get pixel
unsigned int min = getMin(data);
unsigned int max = getMax(data);
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char grayImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
grayImage[threadIdx.x][threadIdx.y] = getElementRed(x, y);
__syncthreads();
// apply contrast enhancement
unsigned char pixel = grayImage[threadIdx.x][threadIdx.y];
if ( pixel < min ) {
pixel = 0;
}
else if ( pixel > max ) {
pixel = 255;
}
else {
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
__syncthreads();
// write back pixel
image[(y * pitch) + x] = pixel;
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// apply contrast enhancement
unsigned char pixel = getElementRed(x, y);
if ( pixel < min ) {
pixel = 0;
}
else if ( pixel > max ) {
pixel = 255;
}
else {
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
// write back pixel
image[(y * pitch) + x] = pixel;
}
#endif
}
#else
__global__ void contrast1DKernel(unsigned char *image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchRedImage(data);
// calculate difference
float diff = getMax(data) - getMin(data);
// get pixel
unsigned int min = getMin(data);
unsigned int max = getMax(data);
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char grayImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
grayImage[threadIdx.x][threadIdx.y] = image[(y * pitch) + x];
__syncthreads();
// apply contrast enhancement
unsigned char pixel = grayImage[threadIdx.x][threadIdx.y];
if ( pixel < min ) {
pixel = 0;
}
else if ( pixel > max ) {
pixel = 255;
}
else {
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
__syncthreads();
// write back pixel
image[(y * pitch) + x] = pixel;
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// apply contrast enhancement
unsigned char pixel = image[(y * pitch) + x];
if ( pixel < min ) {
pixel = 0;
}
else if ( pixel > max ) {
pixel = 255;
}
else {
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
// write back pixel
image[(y * pitch) + x] = pixel;
}
#endif
}
#endif
__host__ void contrast1D(unsigned char *grayImage, const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int CONTRAST_THRESHOLD, double *totaltime)
{
unsigned int i = 0;
NSTimer kernelTime = NSTimer("kernelTime", false, false);
while ( (i < HISTOGRAM_SIZE) && (histogram[i] < CONTRAST_THRESHOLD) )
{
i++;
}
unsigned int min = i;
i = HISTOGRAM_SIZE - 1;
while ( (i > min) && (histogram[i] < CONTRAST_THRESHOLD) )
{
i--;
}
unsigned int max = i;
float diff = max - min;
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for (int x = 0; x < width; x++ )
{
unsigned char pixel = grayImage[(y * width) + x];
if ( pixel < min )
{
pixel = 0;
}
else if ( pixel > max )
{
pixel = 255;
}
else
{
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
grayImage[(y * width) + x] = pixel;
}
}
// /Kernel
kernelTime.stop();
*totaltime = kernelTime.getElapsed();
}
//-------------------------------------------------------------------------------------------------------------------
// triangularSmooth
//-------------------------------------------------------------------------------------------------------------------
#ifdef TEXTURE_MEM
__global__ void triangularSmoothKernel(unsigned char *image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchGreenImage(data);
// only run threads that are in the image
if (x < width && y < height) {
// declare variables
unsigned int filterItem = 0;
float filterSum = 0.0f;
float smoothPix = 0.0f;
unsigned char value;
int fy, fx;
const float filter[] = {1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f
};
// do the smoothing
for ( fy = y - 2; fy < y + 3; fy++ )
{
for ( fx = x - 2; fx < x + 3; fx++ )
{
if ( ((fy < 0) || (fy >= height)) || ((fx < 0) || (fx >= width)) )
{
filterItem++;
continue;
}
smoothPix += getElementRed(fx, fy) * filter[filterItem];
filterSum += filter[filterItem];
filterItem++;
}
}
smoothPix /= filterSum;
value = static_cast< unsigned char >(smoothPix);
// write back to global mem
image[(y * pitch) + x] = value;
}
}
#else
__global__ void triangularSmoothKernel(unsigned char *red, unsigned char *green, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int redpitch = getPitchRedImage(data);
unsigned int greenpitch = getPitchGreenImage(data);
// only run threads that are in the image
if (x < width && y < height) {
// declare variables
unsigned int filterItem = 0;
float filterSum = 0.0f;
float smoothPix = 0.0f;
unsigned char value;
int fy, fx;
const float filter[] = {1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f
};
// do the smoothing
for ( fy = y - 2; fy < y + 3; fy++ )
{
for ( fx = x - 2; fx < x + 3; fx++ )
{
if ( ((fy < 0) || (fy >= height)) || ((fx < 0) || (fx >= width)) )
{
filterItem++;
continue;
}
smoothPix += red[(fy * redpitch) + fx] * filter[filterItem];
filterSum += filter[filterItem];
filterItem++;
}
}
smoothPix /= filterSum;
value = static_cast< unsigned char >(smoothPix);
// write back to global mem
green[(y * greenpitch) + x] = value;
}
}
#endif
__host__ void triangularSmooth(unsigned char *grayImage, unsigned char *smoothImage, const int width, const int height, double *totaltime)
{
NSTimer kernelTime = NSTimer("kernelTime", false, false);
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
unsigned int filterItem = 0;
float filterSum = 0.0f;
float smoothPix = 0.0f;
const float filter[] = {1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f
};
for ( int fy = y - 2; fy < y + 3; fy++ )
{
for ( int fx = x - 2; fx < x + 3; fx++ )
{
if ( ((fy < 0) || (fy >= height)) || ((fx < 0) || (fx >= width)) )
{
filterItem++;
continue;
}
smoothPix += grayImage[(fy * width) + fx] * filter[filterItem];
filterSum += filter[filterItem];
filterItem++;
}
}
smoothPix /= filterSum;
smoothImage[(y * width) + x] = static_cast< unsigned char >(smoothPix);
}
}
// /Kernel
kernelTime.stop();
*totaltime = kernelTime.getElapsed();
}
__host__ void imageProcess(unsigned char *inputImage, unsigned char *smoothImage, const int width, const int height,
const unsigned int HISTOGRAM_SIZE, const unsigned int CONTRAST_THRESHOLD, double *totaltime) {
//-------------------------------------------------------------------------------------------------------------------
// calculation occupancy GPU
//-------------------------------------------------------------------------------------------------------------------
// variables
unsigned int numBlocks[2];
unsigned int numThreads[2];
unsigned int widthCounter, heightCounter;
// calculate number of blocks in both directions
widthCounter = 1;
while (width > numMultiProcessors * widthCounter * numThreadsPerBlock[0]) {
widthCounter++;
}
numBlocks[0] = numMultiProcessors * widthCounter;
heightCounter = 1;
while (height > numMultiProcessors * heightCounter * numThreadsPerBlock[1]) {
heightCounter++;
}
numBlocks[1] = numMultiProcessors * heightCounter;
dim3 blockGrid(numBlocks[0], numBlocks[1]);
// calculate number of threads per block in both directions
numThreads[0] = width / numBlocks[0];
if (width % numBlocks[0] > 0)
numThreads[0]++;
numThreads[1] = height / numBlocks[1];
if (height % numBlocks[1] > 0)
numThreads[1]++;
dim3 threadGrid(numThreads[0], numThreads[1]);
//-------------------------------------------------------------------------------------------------------------------
// Write Memory to GPU device
//-------------------------------------------------------------------------------------------------------------------
// device variables
unsigned char *dev_redimage, *dev_greenimage, *dev_blueimage;
unsigned int *dev_data;
// host variables
NSTimer kernelTime = NSTimer("kernelTime", false, false);
// initialize pitch values (will be filled in by the malloc function later)
size_t redimage_pitch = 0;
size_t greenimage_pitch = 0;
size_t blueimage_pitch = 0;
// allocate 2D memory on GPU
if (hipMallocPitch<unsigned char>(&dev_redimage, &redimage_pitch, width, height) != hipSuccess) {
cout << "imageProcess - cuda pitch malloc red failed" << endl;
exit(1);
}
if (hipMallocPitch<unsigned char>(&dev_greenimage, &greenimage_pitch, width, height) != hipSuccess) {
cout << "imageProcess - cuda pitch malloc green failed" << endl;
exit(1);
}
if (hipMallocPitch<unsigned char>(&dev_blueimage, &blueimage_pitch, width, height) != hipSuccess) {
cout << "imageProcess - cuda pitch malloc blue failed" << endl;
exit(1);
}
if (hipMalloc(&dev_data, HISTOGRAM_SIZE * sizeof(int) + 9 * sizeof(int)) != hipSuccess) {
cout << "imageProcess - cuda malloc data failed" << endl;
exit(1);
}
unsigned int data[DATA_SIZE] = {width, height, HISTOGRAM_SIZE, CONTRAST_THRESHOLD, redimage_pitch, greenimage_pitch, blueimage_pitch};
// start timing
kernelTime.start();
// copy data from host to device.
if (hipMemcpy2D(dev_redimage, redimage_pitch, &inputImage[0], width, width, height, hipMemcpyHostToDevice) != hipSuccess) {
cout << "imageProcess - cuda 2D mem cpy red failed" << endl;
exit(1);
}
if (hipMemcpy2D(dev_greenimage, greenimage_pitch, &inputImage[width * height], width, width, height, hipMemcpyHostToDevice) != hipSuccess) {
cout << "imageProcess - cuda 2D mem cpy green failed" << endl;
exit(1);
}
if (hipMemcpy2D(dev_blueimage, blueimage_pitch, &inputImage[2 * width * height], width, width, height, hipMemcpyHostToDevice) != hipSuccess) {
cout << "imageProcess - cuda 2D mem cpy blue failed" << endl;
exit(1);
}
if (hipMemcpy(dev_data, data, DATA_SIZE * sizeof(int), hipMemcpyHostToDevice) != hipSuccess) {
cout << "imageProcess - cuda 2D mem cpy data failed" << endl;
exit(1);
}
if (hipMemset(&dev_data[9], 0, HISTOGRAM_SIZE * sizeof(int)) != hipSuccess) {
cout << "imageProcess - cuda mem set histogram failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[0] = kernelTime.getElapsed();
#ifdef TEXTURE_MEM
//Bind the image to the texture. Now the kernel will read the input image through the texture cache.
if (hipBindTexture2D(NULL, texture2DRed, dev_redimage, width, height, redimage_pitch) != hipSuccess) {
cout << "imageProcess - cuda bind 2D texture red failed" << endl;
exit(1);
}
if (hipBindTexture2D(NULL, texture2DGreen, dev_greenimage, width, height, greenimage_pitch) != hipSuccess) {
cout << "imageProcess - cuda bind 2D texture green failed" << endl;
exit(1);
}
if (hipBindTexture2D(NULL, texture2DBlue, dev_blueimage, width, height, blueimage_pitch) != hipSuccess) {
cout << "imageProcess - bind 2D texture blue failed" << endl;
exit(1);
}
// set border access to zero
texture2DRed.addressMode[0] = texture2DRed.addressMode[1] = hipAddressModeBorder;
texture2DGreen.addressMode[0] = texture2DGreen.addressMode[1] = hipAddressModeBorder;
texture2DBlue.addressMode[0] = texture2DBlue.addressMode[1] = hipAddressModeBorder;
#endif
//-------------------------------------------------------------------------------------------------------------------
// RGB to gray scale conversion
//-------------------------------------------------------------------------------------------------------------------
// start timing
kernelTime.reset();
kernelTime.start();
// create grid for kernel functions, execute the kernel on the GPU
#ifdef TEXTURE_MEM
hipLaunchKernelGGL(( rgb2grayCudaKernel) , dim3(blockGrid), dim3(threadGrid), 0, 0, dev_redimage, dev_data);
#else
hipLaunchKernelGGL(( rgb2grayCudaKernel) , dim3(blockGrid), dim3(threadGrid), 0, 0, dev_redimage, dev_greenimage, dev_blueimage, dev_data);
#endif
if (hipGetLastError() != hipSuccess) {
cout << "imageProcess - cuda start rgb2gray kernel on device failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[1] = kernelTime.getElapsed();
//-------------------------------------------------------------------------------------------------------------------
// Creating histogram
//-------------------------------------------------------------------------------------------------------------------
// start timing
kernelTime.reset();
kernelTime.start();
// create grid for kernel functions, execute the kernel on the GPU
#ifdef TEXTURE_MEM
hipLaunchKernelGGL(( histogram1DCudaKernel) , dim3(blockGrid), dim3(threadGrid), 0, 0, dev_data);
#else
hipLaunchKernelGGL(( histogram1DCudaKernel) , dim3(blockGrid), dim3(threadGrid), 0, 0, dev_redimage, dev_data);
#endif
if (hipGetLastError() != hipSuccess) {
cout << "imageProcess - cuda start histogram1D kernel on device failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[2] = kernelTime.getElapsed();
//-------------------------------------------------------------------------------------------------------------------
// Computing contrast
//-------------------------------------------------------------------------------------------------------------------
// start timing
kernelTime.reset();
kernelTime.start();
// create grid for kernel functions, execute the kernel on the GPU
hipLaunchKernelGGL(( contrastMinKernel) , dim3(1), dim3(1), 0, 0, dev_data);
if (hipGetLastError() != hipSuccess) {
cout << "imageProcess - cuda start contrastMin kernel on device failed" << endl;
exit(1);
}
// create grid for kernel functions, execute the kernel on the GPU
hipLaunchKernelGGL(( contrastMaxKernel) , dim3(1), dim3(1), 0, 0, dev_data);
if (hipGetLastError() != hipSuccess) {
cout << "imageProcess - cuda start contrastMax kernel on device failed" << endl;
exit(1);
}
// create grid for kernel functions, execute the kernel on the GPU
#ifdef TEXTURE_MEM
hipLaunchKernelGGL(( contrast1DKernel) , dim3(blockGrid), dim3(threadGrid), 0, 0, dev_redimage, dev_data);
#else
hipLaunchKernelGGL(( contrast1DKernel) , dim3(blockGrid), dim3(threadGrid), 0, 0, dev_redimage, dev_data);
#endif
if (hipGetLastError() != hipSuccess) {
cout << "imageProcess - cuda start contrast1D kernel on device failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[3] = kernelTime.getElapsed();
//-------------------------------------------------------------------------------------------------------------------
// Computing Smoothing
//-------------------------------------------------------------------------------------------------------------------
// start timing
kernelTime.reset();
kernelTime.start();
// create grid for kernel functions, execute the kernel on the GPU
#ifdef TEXTURE_MEM
hipLaunchKernelGGL(( triangularSmoothKernel) , dim3(blockGrid), dim3(threadGrid), 0, 0, dev_greenimage, dev_data);
#else
hipLaunchKernelGGL(( triangularSmoothKernel) , dim3(blockGrid), dim3(threadGrid), 0, 0, dev_redimage, dev_greenimage, dev_data);
#endif
if (hipGetLastError() != hipSuccess) {
cout << "triangularSmoothCuda - cuda start kernels on device failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[4] = kernelTime.getElapsed();
// start timing
kernelTime.reset();
kernelTime.start();
// read back result from GPU
if (hipMemcpy2D(smoothImage, width, dev_greenimage, greenimage_pitch, width, height, hipMemcpyDeviceToHost) != hipSuccess) {
cout << "triangularSmoothCuda - cuda mem copy smooth image to host failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[5] = kernelTime.getElapsed();
#ifdef TEXTURE_MEM
//Release the texture
hipUnbindTexture(texture2DRed);
hipUnbindTexture(texture2DGreen);
hipUnbindTexture(texture2DBlue);
#endif
// free memory on GPU
hipFree(dev_redimage);
hipFree(dev_greenimage);
hipFree(dev_blueimage);
hipFree(dev_data);
} | e9951310d75b10f15c215a4c8cb526f96f6b1b6d.cu | //===================================================================================================================
// File: main.cpp
// Created on: 27-10-11
// Authors: Dirk Vos, Mark Schrauwen, Michiel van der Vlag
//
// Description: The filter file called from main.cpp. This file contains the actual CUDA code. Different functions
// of the code can be enabled\disabled by (un)commenting the macros as defined below.
//===================================================================================================================
#include <Timer.hpp>
#include <iostream>
#include <iomanip>
//-------------------------------------------------------------------------------------------------------------------
// Global definitions, macros
//-------------------------------------------------------------------------------------------------------------------
// Use commenting to disable/enable functions
#define MAX_BLOCKSIZE 32 // The size of one square block. A block will have size MAX_BLOCKSIZE * MAX_BLOCKSIZE
#define DATA_SIZE 7 // The number of variables of Memory Mapping part.
#define SHARED_MEM // If uncommented, shared memory will be used.
#define TEXTURE_MEM // If uncommented, Texture memory will be used.
using LOFAR::NSTimer;
using std::cout;
using std::cerr;
using std::endl;
using std::fixed;
using std::setprecision;
// global information
static unsigned int numMultiProcessors;
static unsigned int numThreadsPerBlock[2];
// define 2D textures
texture<unsigned char, cudaTextureType2D, cudaReadModeElementType> texture2DRed;
texture<unsigned char, cudaTextureType2D, cudaReadModeElementType> texture2DGreen;
texture<unsigned char, cudaTextureType2D, cudaReadModeElementType> texture2DBlue;
//-------------------------------------------------------------------------------------------------------------------
// Memory Mapping
//-------------------------------------------------------------------------------------------------------------------
/**
* unsigned int *dev_data is filled in the following way:
* index content
* 0 width
* 1 height
* 2 HISTOGRAM_SIZE
* 3 CONTRAST_THRESHOLD
* 4 pitch red image
* 5 pitch green image
* 6 pitch blue image
* 7 min (filled on device)
* 8 max (filled on device)
* 9 histogram (filled on device)
*/
//-------------------------------------------------------------------------------------------------------------------
// Device discovery
//-------------------------------------------------------------------------------------------------------------------
__host__ void getCudaDeviceInformation(void) {
// get GPU device
int device;
if (cudaGetDevice(&device) != cudaSuccess) {
cout << "main - cuda get device failed" << endl;
exit(1);
}
// get GPU properties
cudaDeviceProp prop;
if (cudaGetDeviceProperties (&prop, device) != cudaSuccess) {
cout << "main - cuda get device properties failed" << endl;
exit(1);
}
// save to global information
numMultiProcessors = prop.multiProcessorCount;
numThreadsPerBlock[0] = MAX_BLOCKSIZE;
numThreadsPerBlock[1] = MAX_BLOCKSIZE;
}
//-------------------------------------------------------------------------------------------------------------------
// helper functions
//-------------------------------------------------------------------------------------------------------------------
__device__ unsigned char getElementRed(unsigned int x, unsigned int y) {
return tex2D(texture2DRed, x, y);
}
__device__ unsigned char getElementGreen(unsigned int x, unsigned int y) {
return tex2D(texture2DGreen, x, y);
}
__device__ unsigned char getElementBlue(unsigned int x, unsigned int y) {
return tex2D(texture2DBlue, x, y);
}
__device__ unsigned int getWidth(unsigned int *data) {
return data[0];
}
__device__ unsigned int getHeight(unsigned int *data) {
return data[1];
}
__device__ unsigned int getHistogramSize(unsigned int *data) {
return data[2];
}
__device__ unsigned int getContrastThreshold(unsigned int *data) {
return data[3];
}
__device__ unsigned int getPitchRedImage(unsigned int *data) {
return data[4];
}
__device__ unsigned int getPitchGreenImage(unsigned int *data) {
return data[5];
}
__device__ unsigned int getPitchBlueImage(unsigned int *data) {
return data[6];
}
__device__ unsigned int getMin(unsigned int *data) {
return data[7];
}
__device__ unsigned int getMax(unsigned int *data) {
return data[8];
}
__device__ void setMin(unsigned int *data, unsigned int value) {
data[7] = value;
}
__device__ void setMax(unsigned int *data, unsigned int value) {
data[8] = value;
}
__device__ unsigned int * getHistogram(unsigned int *data) {
return &data[9];
}
//-------------------------------------------------------------------------------------------------------------------
// rgb2gray
//-------------------------------------------------------------------------------------------------------------------
#ifdef TEXTURE_MEM
__global__ void rgb2grayCudaKernel(unsigned char *image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchRedImage(data);
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char redImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
__shared__ unsigned char greenImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
__shared__ unsigned char blueImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
redImage[threadIdx.x][threadIdx.y] = getElementRed(x, y);
greenImage[threadIdx.x][threadIdx.y] = getElementGreen(x, y);
blueImage[threadIdx.x][threadIdx.y] = getElementBlue(x, y);
__syncthreads();
// execute grey scaling code
float grayPix = 0.0f;
float r = static_cast< float >(redImage[threadIdx.x][threadIdx.y]);
float g = static_cast< float >(greenImage[threadIdx.x][threadIdx.y]);
float b = static_cast< float >(blueImage[threadIdx.x][threadIdx.y]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
__syncthreads();
// write back to global mem
image[(y * pitch) + x] = static_cast< unsigned char >(grayPix);
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// execute grey scaling code without shared mem
float grayPix = 0.0f;
float r = static_cast< float >(getElementRed(x, y));
float g = static_cast< float >(getElementGreen(x, y));
float b = static_cast< float >(getElementBlue(x, y));
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
// write back to global mem
image[(y * pitch) + x] = static_cast< unsigned char >(grayPix);
}
#endif
}
#else
__global__ void rgb2grayCudaKernel(unsigned char *red, unsigned char *green, unsigned char *blue, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int redpitch = getPitchRedImage(data);
unsigned int greenpitch = getPitchGreenImage(data);
unsigned int bluepitch = getPitchBlueImage(data);
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char redImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
__shared__ unsigned char greenImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
__shared__ unsigned char blueImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
redImage[threadIdx.x][threadIdx.y] = red[(y * redpitch) + x];
greenImage[threadIdx.x][threadIdx.y] = green[(y * greenpitch) + x];
blueImage[threadIdx.x][threadIdx.y] = blue[(y * bluepitch) + x];
__syncthreads();
// execute grey scaling code
float grayPix = 0.0f;
float r = static_cast< float >(redImage[threadIdx.x][threadIdx.y]);
float g = static_cast< float >(greenImage[threadIdx.x][threadIdx.y]);
float b = static_cast< float >(blueImage[threadIdx.x][threadIdx.y]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
__syncthreads();
// write back to global mem
red[(y * redpitch) + x] = static_cast< unsigned char >(grayPix);
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// execute grey scaling code without shared mem
float grayPix = 0.0f;
float r = static_cast< float >(red[(y * redpitch) + x]);
float g = static_cast< float >(green[(y * greenpitch) + x]);
float b = static_cast< float >(blue[(y * bluepitch) + x]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
// write back to global mem
red[(y * redpitch) + x] = static_cast< unsigned char >(grayPix);
}
#endif
}
#endif
__host__ void rgb2gray(unsigned char *inputImage, unsigned char *grayImage, const int width, const int height, double *totaltime) {
NSTimer kernelTime = NSTimer("kernelTime", false, false);
kernelTime.start();
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
float grayPix = 0.0f;
float r = static_cast< float >(inputImage[(y * width) + x]);
float g = static_cast< float >(inputImage[(width * height) + (y * width) + x]);
float b = static_cast< float >(inputImage[(2 * width * height) + (y * width) + x]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
grayImage[(y * width) + x] = static_cast< unsigned char >(grayPix);
}
}
kernelTime.stop();
*totaltime = kernelTime.getElapsed();
}
//-------------------------------------------------------------------------------------------------------------------
// histogram1D
//-------------------------------------------------------------------------------------------------------------------
#ifdef TEXTURE_MEM
__global__ void histogram1DCudaKernel(unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
// get histogram pointer
unsigned int *histogram = getHistogram(data);
unsigned int index;
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char grayImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
grayImage[threadIdx.x][threadIdx.y] = getElementRed(x, y);
__syncthreads();
// get histogram index
index = static_cast< unsigned int >(grayImage[threadIdx.x][threadIdx.y]);
__syncthreads();
// add pixel to histogram in one threadsafe operation
atomicAdd(&histogram[index], 1);
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// get histogram index
index = static_cast< unsigned int >(getElementRed(x, y));
// add pixel to histogram in one threadsafe operation
atomicAdd(&histogram[index], 1);
}
#endif
}
#else
__global__ void histogram1DCudaKernel(unsigned char* image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchRedImage(data);
// get histogram pointer
unsigned int *histogram = getHistogram(data);
unsigned int index;
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char grayImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
grayImage[threadIdx.x][threadIdx.y] = image[(y * pitch) + x];
__syncthreads();
// get histogram index
index = static_cast< unsigned int >(grayImage[threadIdx.x][threadIdx.y]);
__syncthreads();
// add pixel to histogram in one threadsafe operation
atomicAdd(&histogram[index], 1);
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// get histogram index
index = static_cast< unsigned int >(image[(y * pitch) + x]);
// add pixel to histogram in one threadsafe operation
atomicAdd(&histogram[index], 1);
}
#endif
}
#endif
__host__ void histogram1D(unsigned char *grayImage, const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE, double *totaltime)
{
NSTimer kernelTime = NSTimer("kernelTime", false, false);
memset(reinterpret_cast< void * >(histogram), 0, HISTOGRAM_SIZE * sizeof(int));
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
histogram[static_cast< unsigned int >(grayImage[(y * width) + x])] += 1;
}
}
// /Kernel
kernelTime.stop();
*totaltime = kernelTime.getElapsed();
}
//-------------------------------------------------------------------------------------------------------------------
// contrast1D
//-------------------------------------------------------------------------------------------------------------------
__global__ void contrastMinKernel(unsigned int *data) {
// load width and height, histogramsize and contrast threshold
unsigned int histogramSize = getHistogramSize(data);
unsigned int contrastThreshold = getContrastThreshold(data);
// get histogram pointer
unsigned int *histogram = getHistogram(data);
// find minimum
unsigned int i = 0;
while ( (i < histogramSize) && (histogram[i] < contrastThreshold) ) {
i++;
}
setMin(data, i);
}
__global__ void contrastMaxKernel(unsigned int *data) {
// load width and height, histogramsize and contrast threshold
unsigned int histogramSize = getHistogramSize(data);
unsigned int contrastThreshold = getContrastThreshold(data);
unsigned int min = getMin(data);
// get histogram pointer
unsigned int *histogram = getHistogram(data);
// find maximum
unsigned int i = histogramSize - 1;
while ( (i > min) && (histogram[i] < contrastThreshold) ) {
i--;
}
setMax(data, i);
}
#ifdef TEXTURE_MEM
__global__ void contrast1DKernel(unsigned char *image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchRedImage(data);
// calculate difference
float diff = getMax(data) - getMin(data);
// get pixel
unsigned int min = getMin(data);
unsigned int max = getMax(data);
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char grayImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
grayImage[threadIdx.x][threadIdx.y] = getElementRed(x, y);
__syncthreads();
// apply contrast enhancement
unsigned char pixel = grayImage[threadIdx.x][threadIdx.y];
if ( pixel < min ) {
pixel = 0;
}
else if ( pixel > max ) {
pixel = 255;
}
else {
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
__syncthreads();
// write back pixel
image[(y * pitch) + x] = pixel;
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// apply contrast enhancement
unsigned char pixel = getElementRed(x, y);
if ( pixel < min ) {
pixel = 0;
}
else if ( pixel > max ) {
pixel = 255;
}
else {
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
// write back pixel
image[(y * pitch) + x] = pixel;
}
#endif
}
#else
__global__ void contrast1DKernel(unsigned char *image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchRedImage(data);
// calculate difference
float diff = getMax(data) - getMin(data);
// get pixel
unsigned int min = getMin(data);
unsigned int max = getMax(data);
#ifdef SHARED_MEM
// only run threads that are in the image
if (x < width && y < height) {
// allocate shared mem for this block
__shared__ unsigned char grayImage[MAX_BLOCKSIZE][MAX_BLOCKSIZE];
// copy from global mem to shared mem
grayImage[threadIdx.x][threadIdx.y] = image[(y * pitch) + x];
__syncthreads();
// apply contrast enhancement
unsigned char pixel = grayImage[threadIdx.x][threadIdx.y];
if ( pixel < min ) {
pixel = 0;
}
else if ( pixel > max ) {
pixel = 255;
}
else {
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
__syncthreads();
// write back pixel
image[(y * pitch) + x] = pixel;
}
#else
// only run threads that are in the image
if (x < width && y < height) {
// apply contrast enhancement
unsigned char pixel = image[(y * pitch) + x];
if ( pixel < min ) {
pixel = 0;
}
else if ( pixel > max ) {
pixel = 255;
}
else {
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
// write back pixel
image[(y * pitch) + x] = pixel;
}
#endif
}
#endif
__host__ void contrast1D(unsigned char *grayImage, const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int CONTRAST_THRESHOLD, double *totaltime)
{
unsigned int i = 0;
NSTimer kernelTime = NSTimer("kernelTime", false, false);
while ( (i < HISTOGRAM_SIZE) && (histogram[i] < CONTRAST_THRESHOLD) )
{
i++;
}
unsigned int min = i;
i = HISTOGRAM_SIZE - 1;
while ( (i > min) && (histogram[i] < CONTRAST_THRESHOLD) )
{
i--;
}
unsigned int max = i;
float diff = max - min;
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for (int x = 0; x < width; x++ )
{
unsigned char pixel = grayImage[(y * width) + x];
if ( pixel < min )
{
pixel = 0;
}
else if ( pixel > max )
{
pixel = 255;
}
else
{
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
grayImage[(y * width) + x] = pixel;
}
}
// /Kernel
kernelTime.stop();
*totaltime = kernelTime.getElapsed();
}
//-------------------------------------------------------------------------------------------------------------------
// triangularSmooth
//-------------------------------------------------------------------------------------------------------------------
#ifdef TEXTURE_MEM
__global__ void triangularSmoothKernel(unsigned char *image, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int pitch = getPitchGreenImage(data);
// only run threads that are in the image
if (x < width && y < height) {
// declare variables
unsigned int filterItem = 0;
float filterSum = 0.0f;
float smoothPix = 0.0f;
unsigned char value;
int fy, fx;
const float filter[] = {1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f
};
// do the smoothing
for ( fy = y - 2; fy < y + 3; fy++ )
{
for ( fx = x - 2; fx < x + 3; fx++ )
{
if ( ((fy < 0) || (fy >= height)) || ((fx < 0) || (fx >= width)) )
{
filterItem++;
continue;
}
smoothPix += getElementRed(fx, fy) * filter[filterItem];
filterSum += filter[filterItem];
filterItem++;
}
}
smoothPix /= filterSum;
value = static_cast< unsigned char >(smoothPix);
// write back to global mem
image[(y * pitch) + x] = value;
}
}
#else
__global__ void triangularSmoothKernel(unsigned char *red, unsigned char *green, unsigned int *data) {
// load width and height
unsigned int width = getWidth(data);
unsigned int height = getHeight(data);
// load block ID's. Thread ids
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int redpitch = getPitchRedImage(data);
unsigned int greenpitch = getPitchGreenImage(data);
// only run threads that are in the image
if (x < width && y < height) {
// declare variables
unsigned int filterItem = 0;
float filterSum = 0.0f;
float smoothPix = 0.0f;
unsigned char value;
int fy, fx;
const float filter[] = {1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f
};
// do the smoothing
for ( fy = y - 2; fy < y + 3; fy++ )
{
for ( fx = x - 2; fx < x + 3; fx++ )
{
if ( ((fy < 0) || (fy >= height)) || ((fx < 0) || (fx >= width)) )
{
filterItem++;
continue;
}
smoothPix += red[(fy * redpitch) + fx] * filter[filterItem];
filterSum += filter[filterItem];
filterItem++;
}
}
smoothPix /= filterSum;
value = static_cast< unsigned char >(smoothPix);
// write back to global mem
green[(y * greenpitch) + x] = value;
}
}
#endif
__host__ void triangularSmooth(unsigned char *grayImage, unsigned char *smoothImage, const int width, const int height, double *totaltime)
{
NSTimer kernelTime = NSTimer("kernelTime", false, false);
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
unsigned int filterItem = 0;
float filterSum = 0.0f;
float smoothPix = 0.0f;
const float filter[] = {1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f
};
for ( int fy = y - 2; fy < y + 3; fy++ )
{
for ( int fx = x - 2; fx < x + 3; fx++ )
{
if ( ((fy < 0) || (fy >= height)) || ((fx < 0) || (fx >= width)) )
{
filterItem++;
continue;
}
smoothPix += grayImage[(fy * width) + fx] * filter[filterItem];
filterSum += filter[filterItem];
filterItem++;
}
}
smoothPix /= filterSum;
smoothImage[(y * width) + x] = static_cast< unsigned char >(smoothPix);
}
}
// /Kernel
kernelTime.stop();
*totaltime = kernelTime.getElapsed();
}
__host__ void imageProcess(unsigned char *inputImage, unsigned char *smoothImage, const int width, const int height,
const unsigned int HISTOGRAM_SIZE, const unsigned int CONTRAST_THRESHOLD, double *totaltime) {
//-------------------------------------------------------------------------------------------------------------------
// calculation occupancy GPU
//-------------------------------------------------------------------------------------------------------------------
// variables
unsigned int numBlocks[2];
unsigned int numThreads[2];
unsigned int widthCounter, heightCounter;
// calculate number of blocks in both directions
widthCounter = 1;
while (width > numMultiProcessors * widthCounter * numThreadsPerBlock[0]) {
widthCounter++;
}
numBlocks[0] = numMultiProcessors * widthCounter;
heightCounter = 1;
while (height > numMultiProcessors * heightCounter * numThreadsPerBlock[1]) {
heightCounter++;
}
numBlocks[1] = numMultiProcessors * heightCounter;
dim3 blockGrid(numBlocks[0], numBlocks[1]);
// calculate number of threads per block in both directions
numThreads[0] = width / numBlocks[0];
if (width % numBlocks[0] > 0)
numThreads[0]++;
numThreads[1] = height / numBlocks[1];
if (height % numBlocks[1] > 0)
numThreads[1]++;
dim3 threadGrid(numThreads[0], numThreads[1]);
//-------------------------------------------------------------------------------------------------------------------
// Write Memory to GPU device
//-------------------------------------------------------------------------------------------------------------------
// device variables
unsigned char *dev_redimage, *dev_greenimage, *dev_blueimage;
unsigned int *dev_data;
// host variables
NSTimer kernelTime = NSTimer("kernelTime", false, false);
// initialize pitch values (will be filled in by the malloc function later)
size_t redimage_pitch = 0;
size_t greenimage_pitch = 0;
size_t blueimage_pitch = 0;
// allocate 2D memory on GPU
if (cudaMallocPitch<unsigned char>(&dev_redimage, &redimage_pitch, width, height) != cudaSuccess) {
cout << "imageProcess - cuda pitch malloc red failed" << endl;
exit(1);
}
if (cudaMallocPitch<unsigned char>(&dev_greenimage, &greenimage_pitch, width, height) != cudaSuccess) {
cout << "imageProcess - cuda pitch malloc green failed" << endl;
exit(1);
}
if (cudaMallocPitch<unsigned char>(&dev_blueimage, &blueimage_pitch, width, height) != cudaSuccess) {
cout << "imageProcess - cuda pitch malloc blue failed" << endl;
exit(1);
}
if (cudaMalloc(&dev_data, HISTOGRAM_SIZE * sizeof(int) + 9 * sizeof(int)) != cudaSuccess) {
cout << "imageProcess - cuda malloc data failed" << endl;
exit(1);
}
unsigned int data[DATA_SIZE] = {width, height, HISTOGRAM_SIZE, CONTRAST_THRESHOLD, redimage_pitch, greenimage_pitch, blueimage_pitch};
// start timing
kernelTime.start();
// copy data from host to device.
if (cudaMemcpy2D(dev_redimage, redimage_pitch, &inputImage[0], width, width, height, cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "imageProcess - cuda 2D mem cpy red failed" << endl;
exit(1);
}
if (cudaMemcpy2D(dev_greenimage, greenimage_pitch, &inputImage[width * height], width, width, height, cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "imageProcess - cuda 2D mem cpy green failed" << endl;
exit(1);
}
if (cudaMemcpy2D(dev_blueimage, blueimage_pitch, &inputImage[2 * width * height], width, width, height, cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "imageProcess - cuda 2D mem cpy blue failed" << endl;
exit(1);
}
if (cudaMemcpy(dev_data, data, DATA_SIZE * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "imageProcess - cuda 2D mem cpy data failed" << endl;
exit(1);
}
if (cudaMemset(&dev_data[9], 0, HISTOGRAM_SIZE * sizeof(int)) != cudaSuccess) {
cout << "imageProcess - cuda mem set histogram failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[0] = kernelTime.getElapsed();
#ifdef TEXTURE_MEM
//Bind the image to the texture. Now the kernel will read the input image through the texture cache.
if (cudaBindTexture2D(NULL, texture2DRed, dev_redimage, width, height, redimage_pitch) != cudaSuccess) {
cout << "imageProcess - cuda bind 2D texture red failed" << endl;
exit(1);
}
if (cudaBindTexture2D(NULL, texture2DGreen, dev_greenimage, width, height, greenimage_pitch) != cudaSuccess) {
cout << "imageProcess - cuda bind 2D texture green failed" << endl;
exit(1);
}
if (cudaBindTexture2D(NULL, texture2DBlue, dev_blueimage, width, height, blueimage_pitch) != cudaSuccess) {
cout << "imageProcess - bind 2D texture blue failed" << endl;
exit(1);
}
// set border access to zero
texture2DRed.addressMode[0] = texture2DRed.addressMode[1] = cudaAddressModeBorder;
texture2DGreen.addressMode[0] = texture2DGreen.addressMode[1] = cudaAddressModeBorder;
texture2DBlue.addressMode[0] = texture2DBlue.addressMode[1] = cudaAddressModeBorder;
#endif
//-------------------------------------------------------------------------------------------------------------------
// RGB to gray scale conversion
//-------------------------------------------------------------------------------------------------------------------
// start timing
kernelTime.reset();
kernelTime.start();
// create grid for kernel functions, execute the kernel on the GPU
#ifdef TEXTURE_MEM
rgb2grayCudaKernel <<< blockGrid, threadGrid>>>(dev_redimage, dev_data);
#else
rgb2grayCudaKernel <<< blockGrid, threadGrid>>>(dev_redimage, dev_greenimage, dev_blueimage, dev_data);
#endif
if (cudaGetLastError() != cudaSuccess) {
cout << "imageProcess - cuda start rgb2gray kernel on device failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[1] = kernelTime.getElapsed();
//-------------------------------------------------------------------------------------------------------------------
// Creating histogram
//-------------------------------------------------------------------------------------------------------------------
// start timing
kernelTime.reset();
kernelTime.start();
// create grid for kernel functions, execute the kernel on the GPU
#ifdef TEXTURE_MEM
histogram1DCudaKernel <<< blockGrid, threadGrid>>>(dev_data);
#else
histogram1DCudaKernel <<< blockGrid, threadGrid>>>(dev_redimage, dev_data);
#endif
if (cudaGetLastError() != cudaSuccess) {
cout << "imageProcess - cuda start histogram1D kernel on device failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[2] = kernelTime.getElapsed();
//-------------------------------------------------------------------------------------------------------------------
// Computing contrast
//-------------------------------------------------------------------------------------------------------------------
// start timing
kernelTime.reset();
kernelTime.start();
// create grid for kernel functions, execute the kernel on the GPU
contrastMinKernel <<< 1, 1>>>(dev_data);
if (cudaGetLastError() != cudaSuccess) {
cout << "imageProcess - cuda start contrastMin kernel on device failed" << endl;
exit(1);
}
// create grid for kernel functions, execute the kernel on the GPU
contrastMaxKernel <<< 1, 1>>>(dev_data);
if (cudaGetLastError() != cudaSuccess) {
cout << "imageProcess - cuda start contrastMax kernel on device failed" << endl;
exit(1);
}
// create grid for kernel functions, execute the kernel on the GPU
#ifdef TEXTURE_MEM
contrast1DKernel <<< blockGrid, threadGrid>>>(dev_redimage, dev_data);
#else
contrast1DKernel <<< blockGrid, threadGrid>>>(dev_redimage, dev_data);
#endif
if (cudaGetLastError() != cudaSuccess) {
cout << "imageProcess - cuda start contrast1D kernel on device failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[3] = kernelTime.getElapsed();
//-------------------------------------------------------------------------------------------------------------------
// Computing Smoothing
//-------------------------------------------------------------------------------------------------------------------
// start timing
kernelTime.reset();
kernelTime.start();
// create grid for kernel functions, execute the kernel on the GPU
#ifdef TEXTURE_MEM
triangularSmoothKernel <<< blockGrid, threadGrid>>>(dev_greenimage, dev_data);
#else
triangularSmoothKernel <<< blockGrid, threadGrid>>>(dev_redimage, dev_greenimage, dev_data);
#endif
if (cudaGetLastError() != cudaSuccess) {
cout << "triangularSmoothCuda - cuda start kernels on device failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[4] = kernelTime.getElapsed();
// start timing
kernelTime.reset();
kernelTime.start();
// read back result from GPU
if (cudaMemcpy2D(smoothImage, width, dev_greenimage, greenimage_pitch, width, height, cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "triangularSmoothCuda - cuda mem copy smooth image to host failed" << endl;
exit(1);
}
// stop timing
kernelTime.stop();
totaltime[5] = kernelTime.getElapsed();
#ifdef TEXTURE_MEM
//Release the texture
cudaUnbindTexture(texture2DRed);
cudaUnbindTexture(texture2DGreen);
cudaUnbindTexture(texture2DBlue);
#endif
// free memory on GPU
cudaFree(dev_redimage);
cudaFree(dev_greenimage);
cudaFree(dev_blueimage);
cudaFree(dev_data);
} |
b92afcb3221ef1851ae96f9a833bd886a60c2b92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k96_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| b92afcb3221ef1851ae96f9a833bd886a60c2b92.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k96_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
ad39ad2b5e887114a810a9c89cf073441d90879b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2022 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifdef USE_CUDA_EXP
#include <LightGBM/cuda/cuda_algorithms.hpp>
#include "cuda_regression_metric.hpp"
namespace LightGBM {
template <typename CUDA_METRIC, bool USE_WEIGHTS>
__global__ void EvalKernel(const data_size_t num_data, const label_t* labels, const label_t* weights,
const double* scores, double* reduce_block_buffer) {
__shared__ double shared_mem_buffer[32];
const data_size_t index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
double point_metric = 0.0;
if (index < num_data) {
point_metric = CUDA_METRIC::MetricOnPointCUDA(labels[index], scores[index]);
}
const double block_sum_point_metric = ShuffleReduceSum<double>(point_metric, shared_mem_buffer, NUM_DATA_PER_EVAL_THREAD);
reduce_block_buffer[blockIdx.x] = block_sum_point_metric;
if (USE_WEIGHTS) {
double weight = 0.0;
if (index < num_data) {
weight = static_cast<double>(weights[index]);
const double block_sum_weight = ShuffleReduceSum<double>(weight, shared_mem_buffer, NUM_DATA_PER_EVAL_THREAD);
reduce_block_buffer[blockIdx.x + blockDim.x] = block_sum_weight;
}
}
}
template <typename HOST_METRIC, typename CUDA_METRIC>
double CUDARegressionMetricInterface<HOST_METRIC, CUDA_METRIC>::LaunchEvalKernel(const double* score) const {
const int num_blocks = (this->num_data_ + NUM_DATA_PER_EVAL_THREAD - 1) / NUM_DATA_PER_EVAL_THREAD;
if (this->cuda_weights_ != nullptr) {
hipLaunchKernelGGL(( EvalKernel<CUDA_METRIC, true>), dim3(num_blocks), dim3(NUM_DATA_PER_EVAL_THREAD), 0, 0,
this->num_data_, this->cuda_labels_, this->cuda_weights_, score, reduce_block_buffer_.RawData());
} else {
hipLaunchKernelGGL(( EvalKernel<CUDA_METRIC, false>), dim3(num_blocks), dim3(NUM_DATA_PER_EVAL_THREAD), 0, 0,
this->num_data_, this->cuda_labels_, this->cuda_weights_, score, reduce_block_buffer_.RawData());
}
ShuffleReduceSumGlobal<double, double>(reduce_block_buffer_.RawData(), num_blocks, reduce_block_buffer_inner_.RawData());
double sum_loss = 0.0;
CopyFromCUDADeviceToHost<double>(&sum_loss, reduce_block_buffer_inner_.RawData(), 1, __FILE__, __LINE__);
double sum_weight = static_cast<double>(this->num_data_);
if (this->cuda_weights_ != nullptr) {
ShuffleReduceSumGlobal<double, double>(reduce_block_buffer_.RawData() + num_blocks, num_blocks, reduce_block_buffer_inner_.RawData());
CopyFromCUDADeviceToHost<double>(&sum_weight, reduce_block_buffer_inner_.RawData(), 1, __FILE__, __LINE__);
}
return this->AverageLoss(sum_loss, sum_weight);
}
template double CUDARegressionMetricInterface<RMSEMetric, CUDARMSEMetric>::LaunchEvalKernel(const double* score) const;
} // namespace LightGBM
#endif // USE_CUDA_EXP
| ad39ad2b5e887114a810a9c89cf073441d90879b.cu | /*!
* Copyright (c) 2022 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifdef USE_CUDA_EXP
#include <LightGBM/cuda/cuda_algorithms.hpp>
#include "cuda_regression_metric.hpp"
namespace LightGBM {
template <typename CUDA_METRIC, bool USE_WEIGHTS>
__global__ void EvalKernel(const data_size_t num_data, const label_t* labels, const label_t* weights,
const double* scores, double* reduce_block_buffer) {
__shared__ double shared_mem_buffer[32];
const data_size_t index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
double point_metric = 0.0;
if (index < num_data) {
point_metric = CUDA_METRIC::MetricOnPointCUDA(labels[index], scores[index]);
}
const double block_sum_point_metric = ShuffleReduceSum<double>(point_metric, shared_mem_buffer, NUM_DATA_PER_EVAL_THREAD);
reduce_block_buffer[blockIdx.x] = block_sum_point_metric;
if (USE_WEIGHTS) {
double weight = 0.0;
if (index < num_data) {
weight = static_cast<double>(weights[index]);
const double block_sum_weight = ShuffleReduceSum<double>(weight, shared_mem_buffer, NUM_DATA_PER_EVAL_THREAD);
reduce_block_buffer[blockIdx.x + blockDim.x] = block_sum_weight;
}
}
}
template <typename HOST_METRIC, typename CUDA_METRIC>
double CUDARegressionMetricInterface<HOST_METRIC, CUDA_METRIC>::LaunchEvalKernel(const double* score) const {
const int num_blocks = (this->num_data_ + NUM_DATA_PER_EVAL_THREAD - 1) / NUM_DATA_PER_EVAL_THREAD;
if (this->cuda_weights_ != nullptr) {
EvalKernel<CUDA_METRIC, true><<<num_blocks, NUM_DATA_PER_EVAL_THREAD>>>(
this->num_data_, this->cuda_labels_, this->cuda_weights_, score, reduce_block_buffer_.RawData());
} else {
EvalKernel<CUDA_METRIC, false><<<num_blocks, NUM_DATA_PER_EVAL_THREAD>>>(
this->num_data_, this->cuda_labels_, this->cuda_weights_, score, reduce_block_buffer_.RawData());
}
ShuffleReduceSumGlobal<double, double>(reduce_block_buffer_.RawData(), num_blocks, reduce_block_buffer_inner_.RawData());
double sum_loss = 0.0;
CopyFromCUDADeviceToHost<double>(&sum_loss, reduce_block_buffer_inner_.RawData(), 1, __FILE__, __LINE__);
double sum_weight = static_cast<double>(this->num_data_);
if (this->cuda_weights_ != nullptr) {
ShuffleReduceSumGlobal<double, double>(reduce_block_buffer_.RawData() + num_blocks, num_blocks, reduce_block_buffer_inner_.RawData());
CopyFromCUDADeviceToHost<double>(&sum_weight, reduce_block_buffer_inner_.RawData(), 1, __FILE__, __LINE__);
}
return this->AverageLoss(sum_loss, sum_weight);
}
template double CUDARegressionMetricInterface<RMSEMetric, CUDARMSEMetric>::LaunchEvalKernel(const double* score) const;
} // namespace LightGBM
#endif // USE_CUDA_EXP
|
854d505e23d46afbad0c64e5ea01210e0422d5be.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define CHECK(cmnd) { \
hipError_t ierr = cmnd; \
if (ierr != hipSuccess) { \
printf("Error: %s:%d: ", __FILE__, __LINE__, hipGetErrorString(ierr)); \
exit(ierr); \
} \
}
void initData(float * arr, const int n) {
time_t t;
srand((unsigned int) time(&t));
for (int k=0; k<n; k++) arr[k] = (float)(rand() & 0xFF) / 10.0;
}
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return (double)tp.tv_sec + (double)tp.tv_usec*1.0e-6;
}
void Add_on_host(const float * a, const float * b, float * c, const int n) {
for (int k=0; k<n; k++) c[k] = a[k] + b[k];
}
__global__ void Add_on_device(const float * a, const float * b, float * c, const int n) {
size_t k = threadIdx.x + blockIdx.x * blockDim.x;
if (k < n) c[k] = a[k] + b[k];
}
void check_result(const float *a, const float *b, const int n){
const double epsilon = 1.0e-8;
double diff = 0.0;
bool match = 1;
for (int k=0; k<n; k++){
diff = abs(a[k] - b[k]);
if (diff > epsilon){
match = 0;
printf("Error: check_result: diff=%16.12f at k=%d\n", diff, k);
break;
}
}
if (match) printf("Success: all elements match better than epsilon=%16.12f\n", epsilon);
}
int main(int argc, char ** argv) {
printf("Info: Starting %s ... \n", argv[0]);
// problem sizes and kernel configs
const int n_elem = 1 << 24;
const size_t n_byte = n_elem * sizeof(float);
const int tpb_x = 128;
dim3 tpb(tpb_x, 1, 1);
dim3 nblocks((n_elem + tpb_x - 1) / tpb_x, 1, 1);
// timing
double t0, dt_host, dt_gpu, dt_h2d, dt_kern, dt_d2h;
// addition on host
t0 = cpuSecond();
float *h_a, *h_b, *h_ref; //, *d_ref;
h_a = (float *)malloc(n_byte);
h_b = (float *)malloc(n_byte);
h_ref = (float *)malloc(n_byte); // reference result from host
// d_ref = (float *)malloc(n_byte); // reference result from device
initData(h_a, n_elem);
initData(h_b, n_elem);
memset(h_a, 0, n_byte);
memset(h_b, 0, n_byte);
Add_on_host(h_a, h_b, h_ref, n_elem);
dt_host = cpuSecond() - t0;
// device addition
const int dev = 0;
hipDeviceProp_t dev_prop;
CHECK(hipSetDevice(dev));
printf("Info: device #%d is: %s\n", dev, dev_prop.name);
t0 = cpuSecond();
float *d_a, *d_b, *d_c;
CHECK(hipMalloc((float **)&d_a, n_byte));
CHECK(hipMalloc((float **)&d_b, n_byte));
CHECK(hipMalloc((float **)&d_c, n_byte));
CHECK(hipMemcpy(d_a, h_a, n_byte, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_b, h_b, n_byte, hipMemcpyHostToDevice));
dt_h2d = cpuSecond() - t0;
// Kernel launch
t0 = cpuSecond();
hipLaunchKernelGGL(( Add_on_device), dim3(nblocks), dim3(tpb), 0, 0, d_a, d_b, d_c, n_elem);
CHECK(hipDeviceSynchronize());
dt_kern = cpuSecond() - t0;
float * h_res;
h_res = (float *)malloc(n_byte);
t0 = cpuSecond();
CHECK(hipMemcpy(h_res, d_c, n_byte, hipMemcpyDeviceToHost));
dt_d2h = cpuSecond() - t0;
check_result(h_ref, h_res, n_elem);
// {
// const double epsilon = 1.0e-8;
// double diff = 0.0;
// bool match = 1;
// for (int k=0; k<n_elem; k++){
// diff = abs(h_ref[k] - d_ref[k]);
// if (diff > epsilon){
// match = 0;
// printf("Error: check_result: diff=%16.12f at k=%d\n", diff, k);
// break;
// }
// }
// if (match) printf("Success: all elements match better than epsilon=%16.12f\n", epsilon);
// }
dt_gpu = dt_h2d + dt_kern + dt_d2h;
printf("\n%s\n", "Timing results ...");
printf("dt_host: %12.8f (sec)\n", dt_host);
printf("dt_h2d: %12.8f (sec)\n", dt_h2d);
printf("dt_kern: %12.8f (sec)\n", dt_kern);
printf("dt_d2h: %12.8f (sec)\n", dt_d2h);
printf("dt_gpu: %12.8f (sec)\n", dt_gpu);
printf("dt_host / dt_gpu = %6.2f \n", dt_host / dt_gpu);
printf("\n");
// Free up the memory on host and device
free(h_a); free(h_b); free(h_ref); free(h_res);
CHECK(hipFree(d_a)); CHECK(hipFree(d_b)); CHECK(hipFree(d_c));
return 0;
}
| 854d505e23d46afbad0c64e5ea01210e0422d5be.cu | #include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define CHECK(cmnd) { \
cudaError_t ierr = cmnd; \
if (ierr != cudaSuccess) { \
printf("Error: %s:%d: ", __FILE__, __LINE__, cudaGetErrorString(ierr)); \
exit(ierr); \
} \
}
void initData(float * arr, const int n) {
time_t t;
srand((unsigned int) time(&t));
for (int k=0; k<n; k++) arr[k] = (float)(rand() & 0xFF) / 10.0;
}
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return (double)tp.tv_sec + (double)tp.tv_usec*1.0e-6;
}
void Add_on_host(const float * a, const float * b, float * c, const int n) {
for (int k=0; k<n; k++) c[k] = a[k] + b[k];
}
__global__ void Add_on_device(const float * a, const float * b, float * c, const int n) {
size_t k = threadIdx.x + blockIdx.x * blockDim.x;
if (k < n) c[k] = a[k] + b[k];
}
void check_result(const float *a, const float *b, const int n){
const double epsilon = 1.0e-8;
double diff = 0.0;
bool match = 1;
for (int k=0; k<n; k++){
diff = abs(a[k] - b[k]);
if (diff > epsilon){
match = 0;
printf("Error: check_result: diff=%16.12f at k=%d\n", diff, k);
break;
}
}
if (match) printf("Success: all elements match better than epsilon=%16.12f\n", epsilon);
}
int main(int argc, char ** argv) {
printf("Info: Starting %s ... \n", argv[0]);
// problem sizes and kernel configs
const int n_elem = 1 << 24;
const size_t n_byte = n_elem * sizeof(float);
const int tpb_x = 128;
dim3 tpb(tpb_x, 1, 1);
dim3 nblocks((n_elem + tpb_x - 1) / tpb_x, 1, 1);
// timing
double t0, dt_host, dt_gpu, dt_h2d, dt_kern, dt_d2h;
// addition on host
t0 = cpuSecond();
float *h_a, *h_b, *h_ref; //, *d_ref;
h_a = (float *)malloc(n_byte);
h_b = (float *)malloc(n_byte);
h_ref = (float *)malloc(n_byte); // reference result from host
// d_ref = (float *)malloc(n_byte); // reference result from device
initData(h_a, n_elem);
initData(h_b, n_elem);
memset(h_a, 0, n_byte);
memset(h_b, 0, n_byte);
Add_on_host(h_a, h_b, h_ref, n_elem);
dt_host = cpuSecond() - t0;
// device addition
const int dev = 0;
cudaDeviceProp dev_prop;
CHECK(cudaSetDevice(dev));
printf("Info: device #%d is: %s\n", dev, dev_prop.name);
t0 = cpuSecond();
float *d_a, *d_b, *d_c;
CHECK(cudaMalloc((float **)&d_a, n_byte));
CHECK(cudaMalloc((float **)&d_b, n_byte));
CHECK(cudaMalloc((float **)&d_c, n_byte));
CHECK(cudaMemcpy(d_a, h_a, n_byte, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_b, h_b, n_byte, cudaMemcpyHostToDevice));
dt_h2d = cpuSecond() - t0;
// Kernel launch
t0 = cpuSecond();
Add_on_device<<<nblocks, tpb>>>(d_a, d_b, d_c, n_elem);
CHECK(cudaDeviceSynchronize());
dt_kern = cpuSecond() - t0;
float * h_res;
h_res = (float *)malloc(n_byte);
t0 = cpuSecond();
CHECK(cudaMemcpy(h_res, d_c, n_byte, cudaMemcpyDeviceToHost));
dt_d2h = cpuSecond() - t0;
check_result(h_ref, h_res, n_elem);
// {
// const double epsilon = 1.0e-8;
// double diff = 0.0;
// bool match = 1;
// for (int k=0; k<n_elem; k++){
// diff = abs(h_ref[k] - d_ref[k]);
// if (diff > epsilon){
// match = 0;
// printf("Error: check_result: diff=%16.12f at k=%d\n", diff, k);
// break;
// }
// }
// if (match) printf("Success: all elements match better than epsilon=%16.12f\n", epsilon);
// }
dt_gpu = dt_h2d + dt_kern + dt_d2h;
printf("\n%s\n", "Timing results ...");
printf("dt_host: %12.8f (sec)\n", dt_host);
printf("dt_h2d: %12.8f (sec)\n", dt_h2d);
printf("dt_kern: %12.8f (sec)\n", dt_kern);
printf("dt_d2h: %12.8f (sec)\n", dt_d2h);
printf("dt_gpu: %12.8f (sec)\n", dt_gpu);
printf("dt_host / dt_gpu = %6.2f \n", dt_host / dt_gpu);
printf("\n");
// Free up the memory on host and device
free(h_a); free(h_b); free(h_ref); free(h_res);
CHECK(cudaFree(d_a)); CHECK(cudaFree(d_b)); CHECK(cudaFree(d_c));
return 0;
}
|
9c62422c302ef0068f7173c3d0f2a7b0f3ab6b50.hip | // !!! This is a file automatically generated by hipify!!!
#include "mex.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include "hemi\hemi.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
char err_str[1000];
sprintf(err_str,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
mexErrMsgTxt(err_str);
}
}
#define HEMI_GRID_STRIDE_LOOP(iter, num) for (int iter = hemiGetElementOffset(); \
iter<num;\
iter+=hemiGetElementStride())
HEMI_KERNEL(d_im2col)(float* d_image, int img_c_size, int img_r_size,
int thread_num, int total_length,
int ksize_c, int ksize_r, int channels,
int stride_c, int stride_r,
int padding_c, int padding_r, float* d_output)
{
int out_c_size, out_r_size, out_row, out_col, ch;
int col_length = ksize_c*ksize_r*channels;
int ksize = ksize_c*ksize_r;
int img_channel_size = img_c_size*img_r_size;
out_c_size = (img_c_size-ksize_c+2*padding_c)/stride_c+1;
out_r_size = (img_r_size-ksize_r+2*padding_r)/stride_r+1;
HEMI_GRID_STRIDE_LOOP(idx, thread_num){
//transform the image data to columns
int index = idx;
out_row = index%out_c_size;
index/=out_c_size;
out_col = index%out_r_size;
ch = index/out_r_size;
int col_base = col_length*(idx%total_length)+ksize*ch;
int img_base = img_channel_size*ch;
int ori_c_zero= out_col*stride_c-padding_c;
int ori_r_zero= out_row*stride_r-padding_r;
for (int k_c=0; k_c<ksize_r; k_c++)
for (int k_r=0; k_r<ksize_c; k_r++){
int ori_c = ori_c_zero+k_c;
int ori_r = ori_r_zero+k_r;
d_output[col_base+ k_c*ksize_c+k_r]=(ori_c>=0&&ori_c<img_c_size&&ori_r>=0&&ori_r<img_r_size)?
d_image[img_base+ori_c*img_c_size+ori_r]
//ch*ksize+ k_c*ksize_c+k_r
:0;
//d_output[idx] = col_base;
}
}
}
#define THREAD_PER_BLOCK 256
#define IMG_OUT plhs[0]
#define IMG_IN prhs[0]
#define KSIZE_IN prhs[1]
#define STRIDE_IN prhs[2]
#define PADDING prhs[3]
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
//All code and internal function calls go in here!
if(nrhs<2)
mexErrMsgTxt("Not enough inputs");
double* stride_in;
size_t stride_c, stride_r;
if(nrhs>=3){
stride_in = (double*) mxGetData(STRIDE_IN);
stride_c = (size_t)stride_in[0];
stride_r = (size_t)stride_in[1];
}
else{
stride_c = 1;
stride_r = 1;
}
int padding=0;
if(nrhs==4)
padding = mxGetScalar(PADDING);
float *img_in;
float *img_out;
img_in = (float*)mxGetData(IMG_IN);
double* filter_size;
filter_size = (double*) mxGetData(KSIZE_IN);
size_t k_height, k_width;
k_height = (size_t)filter_size[0];
k_width = (size_t)filter_size[1];
size_t img_height =mxGetDimensions(IMG_IN)[0];
size_t img_width = mxGetDimensions(IMG_IN)[1];
size_t img_channel = mxGetDimensions(IMG_IN)[2];
//mexPrintf("height %d width %d channel %d\n", img_height, img_width, img_channel);
//mexPrintf("filter height %d filter width %d \n", k_height, k_width);
size_t total_size = ((img_height+2*padding-k_height)/stride_c+1)*((img_width+2*padding-k_width)/stride_r+1);
size_t col_size = k_height*k_width*img_channel;
int thread_num = total_size*img_channel;
//mexPrintf("Total %d columns \n", total_size);
IMG_OUT = mxCreateNumericMatrix(col_size, total_size, mxSINGLE_CLASS, mxREAL);
img_out = (float*)mxGetData(IMG_OUT);
/* Start CUDA PROCESSING*/
float *d_img;
float *d_col;
size_t n_pixels = img_height*img_width*img_channel;
gpuErrchk(hipMalloc(&d_img, n_pixels*sizeof(float)));
gpuErrchk(hipMalloc(&d_col, col_size*total_size*sizeof(float)));
/* Copy data to GPU mem */
//copy data
gpuErrchk(hipMemcpy(d_img, img_in, n_pixels*sizeof(float), hipMemcpyHostToDevice));
int numSMs;
hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, 0);
/* launch kernel */
HEMI_KERNEL_LAUNCH(d_im2col, 32*numSMs, THREAD_PER_BLOCK, 0, 0,
d_img, img_height, img_width,
thread_num, total_size,
k_height, k_width, img_channel,
stride_c,stride_r,
padding, padding,
d_col);
gpuErrchk(hipDeviceSynchronize());
/*copy result back to cpu mem */
gpuErrchk(hipMemcpy(img_out, d_col, col_size*total_size*sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(d_img));
gpuErrchk(hipFree(d_col));
return;
}
| 9c62422c302ef0068f7173c3d0f2a7b0f3ab6b50.cu | #include "mex.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include "hemi\hemi.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
char err_str[1000];
sprintf(err_str,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
mexErrMsgTxt(err_str);
}
}
#define HEMI_GRID_STRIDE_LOOP(iter, num) for (int iter = hemiGetElementOffset(); \
iter<num;\
iter+=hemiGetElementStride())
HEMI_KERNEL(d_im2col)(float* d_image, int img_c_size, int img_r_size,
int thread_num, int total_length,
int ksize_c, int ksize_r, int channels,
int stride_c, int stride_r,
int padding_c, int padding_r, float* d_output)
{
int out_c_size, out_r_size, out_row, out_col, ch;
int col_length = ksize_c*ksize_r*channels;
int ksize = ksize_c*ksize_r;
int img_channel_size = img_c_size*img_r_size;
out_c_size = (img_c_size-ksize_c+2*padding_c)/stride_c+1;
out_r_size = (img_r_size-ksize_r+2*padding_r)/stride_r+1;
HEMI_GRID_STRIDE_LOOP(idx, thread_num){
//transform the image data to columns
int index = idx;
out_row = index%out_c_size;
index/=out_c_size;
out_col = index%out_r_size;
ch = index/out_r_size;
int col_base = col_length*(idx%total_length)+ksize*ch;
int img_base = img_channel_size*ch;
int ori_c_zero= out_col*stride_c-padding_c;
int ori_r_zero= out_row*stride_r-padding_r;
for (int k_c=0; k_c<ksize_r; k_c++)
for (int k_r=0; k_r<ksize_c; k_r++){
int ori_c = ori_c_zero+k_c;
int ori_r = ori_r_zero+k_r;
d_output[col_base+ k_c*ksize_c+k_r]=(ori_c>=0&&ori_c<img_c_size&&ori_r>=0&&ori_r<img_r_size)?
d_image[img_base+ori_c*img_c_size+ori_r]
//ch*ksize+ k_c*ksize_c+k_r
:0;
//d_output[idx] = col_base;
}
}
}
#define THREAD_PER_BLOCK 256
#define IMG_OUT plhs[0]
#define IMG_IN prhs[0]
#define KSIZE_IN prhs[1]
#define STRIDE_IN prhs[2]
#define PADDING prhs[3]
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
//All code and internal function calls go in here!
if(nrhs<2)
mexErrMsgTxt("Not enough inputs");
double* stride_in;
size_t stride_c, stride_r;
if(nrhs>=3){
stride_in = (double*) mxGetData(STRIDE_IN);
stride_c = (size_t)stride_in[0];
stride_r = (size_t)stride_in[1];
}
else{
stride_c = 1;
stride_r = 1;
}
int padding=0;
if(nrhs==4)
padding = mxGetScalar(PADDING);
float *img_in;
float *img_out;
img_in = (float*)mxGetData(IMG_IN);
double* filter_size;
filter_size = (double*) mxGetData(KSIZE_IN);
size_t k_height, k_width;
k_height = (size_t)filter_size[0];
k_width = (size_t)filter_size[1];
size_t img_height =mxGetDimensions(IMG_IN)[0];
size_t img_width = mxGetDimensions(IMG_IN)[1];
size_t img_channel = mxGetDimensions(IMG_IN)[2];
//mexPrintf("height %d width %d channel %d\n", img_height, img_width, img_channel);
//mexPrintf("filter height %d filter width %d \n", k_height, k_width);
size_t total_size = ((img_height+2*padding-k_height)/stride_c+1)*((img_width+2*padding-k_width)/stride_r+1);
size_t col_size = k_height*k_width*img_channel;
int thread_num = total_size*img_channel;
//mexPrintf("Total %d columns \n", total_size);
IMG_OUT = mxCreateNumericMatrix(col_size, total_size, mxSINGLE_CLASS, mxREAL);
img_out = (float*)mxGetData(IMG_OUT);
/* Start CUDA PROCESSING*/
float *d_img;
float *d_col;
size_t n_pixels = img_height*img_width*img_channel;
gpuErrchk(cudaMalloc(&d_img, n_pixels*sizeof(float)));
gpuErrchk(cudaMalloc(&d_col, col_size*total_size*sizeof(float)));
/* Copy data to GPU mem */
//copy data
gpuErrchk(cudaMemcpy(d_img, img_in, n_pixels*sizeof(float), cudaMemcpyHostToDevice));
int numSMs;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, 0);
/* launch kernel */
HEMI_KERNEL_LAUNCH(d_im2col, 32*numSMs, THREAD_PER_BLOCK, 0, 0,
d_img, img_height, img_width,
thread_num, total_size,
k_height, k_width, img_channel,
stride_c,stride_r,
padding, padding,
d_col);
gpuErrchk(cudaDeviceSynchronize());
/*copy result back to cpu mem */
gpuErrchk(cudaMemcpy(img_out, d_col, col_size*total_size*sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_img));
gpuErrchk(cudaFree(d_col));
return;
}
|
4df14a57b8dbccf51e55b05f3df6f539973f90f9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO CSV writer class implementation
*/
#include "writer_impl.hpp"
#include <cudf/copying.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/strings/convert/convert_booleans.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/convert/convert_floats.hpp>
#include <cudf/strings/convert/convert_integers.hpp>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/replace.hpp>
#include <strings/utilities.cuh>
#include <algorithm>
#include <cstring>
#include <iterator>
#include <sstream>
#include <type_traits>
#include <utility>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/modify_strings.cuh>
namespace cudf {
namespace io {
namespace detail {
namespace csv {
namespace { // anonym.
// helpers:
using namespace cudf::strings;
// predicate to determine if a given string_view contains special characters:
//{"\"", "\n", <delimiter>}
//
struct predicate_special_chars {
explicit predicate_special_chars(string_scalar const& delimiter, hipStream_t stream = 0)
: delimiter_(delimiter.value(stream))
{
}
__device__ bool operator()(string_view const& str_view) const
{
// if (any_of{"\"", "\n", <delimiter>} )
//
constexpr char const* quote_str = "\"";
constexpr char const* newline_str = "\n";
constexpr size_type len1byte{1};
if ((str_view.find(quote_str, len1byte) >= 0) || (str_view.find(newline_str, len1byte) >= 0) ||
(str_view.find(delimiter_) >= 0)) {
return true;
} else {
return false;
}
}
private:
string_view delimiter_;
};
struct probe_special_chars {
probe_special_chars(column_device_view const d_column, predicate_special_chars const& predicate)
: d_column_(d_column), predicate_(predicate)
{
}
__device__ int32_t operator()(size_type idx) const
{
if (d_column_.is_null(idx)) {
return 0; // null string, so no-op
}
string_view d_str = d_column_.template element<string_view>(idx);
if (predicate_(d_str)) {
constexpr char const quote_char = '\"';
// count number of quotes "\""
size_type num_quotes =
thrust::count_if(thrust::seq, d_str.begin(), d_str.end(), [quote_char](char_utf8 chr) {
return chr == quote_char;
});
return d_str.size_bytes() + num_quotes + 2;
} else {
return d_str.size_bytes();
}
}
private:
column_device_view const d_column_;
predicate_special_chars predicate_;
};
struct modify_special_chars {
modify_special_chars(column_device_view const d_column,
int32_t const* d_offsets,
char* d_chars,
predicate_special_chars const& predicate)
: d_column_(d_column), d_offsets_(d_offsets), d_chars_(d_chars), predicate_(predicate)
{
}
__device__ int32_t operator()(size_type idx)
{
using namespace cudf::strings::detail;
if (d_column_.is_null(idx)) {
return 0; // null string, so no-op
}
string_view d_str = d_column_.template element<string_view>(idx);
size_type str_size_bytes = d_str.size_bytes();
char* d_buffer = get_output_ptr(idx);
// assert( d_buffer != nullptr );
if (predicate_(d_str)) {
constexpr char const quote_char = '\"';
constexpr char const* quote_str = "\"";
constexpr char const* str_2quotes = "\"\"";
size_type len1quote{1};
size_type len2quotes{2};
// modify d_str by duplicating all 2bl quotes
// and surrounding whole string by 2bl quotes:
//
// pre-condition: `d_str` is _not_ modified by `d_buffer` manipulation
// because it's a copy of `idx` entry in `d_column_`
//(since `d_column` is const)
//
d_buffer = copy_and_increment(d_buffer, quote_str, len1quote); // add the quote prefix
for (auto itr = d_str.begin(); itr != d_str.end(); ++itr) {
char_utf8 the_chr = *itr;
if (the_chr == quote_char) {
d_buffer = copy_and_increment(d_buffer, str_2quotes, len2quotes); // double the quote;
} else {
d_buffer += from_char_utf8(the_chr, d_buffer);
}
}
d_buffer = copy_and_increment(d_buffer, quote_str, len1quote); // add the quote suffix;
} else {
// copy the source string unmodified:
//(pass-through)
//
memcpy(get_output_ptr(idx), d_str.data(), str_size_bytes);
}
return 0;
}
__device__ char* get_output_ptr(size_type idx)
{
return d_chars_ && d_offsets_ ? d_chars_ + d_offsets_[idx] : nullptr;
}
private:
column_device_view const d_column_;
int32_t const* d_offsets_;
char* d_chars_;
predicate_special_chars predicate_;
};
struct column_to_strings_fn {
// compile-time predicate that defines unsupported column types;
// based on the conditions used for instantiations of individual
// converters in strings/convert/convert_*.hpp;
//(this should have been a `variable template`,
// instead of a static function, but nvcc (10.0)
// fails to compile var-templs);
//
template <typename column_type>
constexpr static bool is_not_handled(void)
{
// Note: the case (not std::is_same<column_type, bool>::value)
// is already covered by is_integral)
//
return not((std::is_same<column_type, cudf::string_view>::value) ||
(std::is_integral<column_type>::value) ||
(std::is_floating_point<column_type>::value) || (cudf::is_timestamp<column_type>()));
}
explicit column_to_strings_fn(writer_options const& options,
rmm::mr::device_memory_resource* mr = nullptr,
hipStream_t stream = nullptr)
: options_(options), mr_(mr), stream_(stream)
{
}
// Note: `null` replacement with `na_rep` deferred to `concatenate()`
// instead of column-wise; might be faster
//
// Note: Cannot pass `stream` to detail::<fname> version of <fname> calls below, because they are
// not exposed in header (see, for example, detail::concatenate(tbl_view, separator, na_rep, mr,
// stream) is declared and defined in combine.cu); Possible solution: declare `extern`, or just
// declare a prototype inside `namespace cudf::strings::detail`;
// bools:
//
template <typename column_type>
std::enable_if_t<std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()(
column_view const& column) const
{
auto conv_col_ptr =
cudf::strings::from_booleans(column, options_.true_value(), options_.false_value(), mr_);
return conv_col_ptr;
}
// strings:
//
template <typename column_type>
std::enable_if_t<std::is_same<column_type, cudf::string_view>::value, std::unique_ptr<column>>
operator()(column_view const& column_v) const
{
using namespace cudf::strings::detail;
// handle special characters: {delimiter, '\n', "} in row:
//
// algorithm outline:
//
// target = "\"";
// repl = ""\"\";
//
// str_column_ref = {};
// for each str_row: column_v {
// if ((not null str_row) &&
// (str_row.find("\n") || str_row.find("\"") || str_row.find(delimiter) ))
// str_column_modified = modify(str_row);
// where modify() = duplicate the double quotes, if any; add 2bl quotes prefix/suffix;
//}
//
std::string delimiter{options_.inter_column_delimiter()};
predicate_special_chars pred{delimiter, stream_};
return modify_strings<probe_special_chars, modify_special_chars>(column_v, mr_, stream_, pred);
}
// ints:
//
template <typename column_type>
std::enable_if_t<std::is_integral<column_type>::value && !std::is_same<column_type, bool>::value,
std::unique_ptr<column>>
operator()(column_view const& column) const
{
auto conv_col_ptr = cudf::strings::from_integers(column, mr_);
return conv_col_ptr;
}
// floats:
//
template <typename column_type>
std::enable_if_t<std::is_floating_point<column_type>::value, std::unique_ptr<column>> operator()(
column_view const& column) const
{
auto conv_col_ptr = cudf::strings::from_floats(column, mr_);
return conv_col_ptr;
}
// timestamps:
//
template <typename column_type>
std::enable_if_t<cudf::is_timestamp<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
std::string format{"%Y-%m-%dT%H:%M:%SZ"}; // same as default for `from_timestamp`
// handle the cases where delimiter / line-terminator can be
// "-" or ":", in which case they are to be dropped from the format:
//
std::string delimiter{options_.inter_column_delimiter()};
std::string newline{options_.line_terminator()};
constexpr char const* dash{"-"};
constexpr char const* colon{":"};
if (delimiter == dash || newline == dash) {
format.erase(std::remove(format.begin(), format.end(), dash[0]), format.end());
}
if (delimiter == colon || newline == colon) {
format.erase(std::remove(format.begin(), format.end(), colon[0]), format.end());
}
auto conv_col_ptr = cudf::strings::from_timestamps(column, format, mr_);
return conv_col_ptr;
}
// unsupported type of column:
//
template <typename column_type>
std::enable_if_t<is_not_handled<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
CUDF_FAIL("Unsupported column type.");
}
private:
writer_options const& options_;
rmm::mr::device_memory_resource* mr_;
hipStream_t stream_;
};
} // unnamed namespace
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
writer_options const& options,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
writer::impl::impl(std::unique_ptr<data_sink> sink,
writer_options const& options,
rmm::mr::device_memory_resource* mr)
: out_sink_(std::move(sink)), mr_(mr), options_(options)
{
}
// write the header: column names:
//
void writer::impl::write_chunked_begin(table_view const& table,
const table_metadata* metadata,
hipStream_t stream)
{
if ((metadata != nullptr) && (options_.include_header())) {
CUDF_EXPECTS(metadata->column_names.size() == static_cast<size_t>(table.num_columns()),
"Mismatch between number of column headers and table columns.");
std::string delimiter_str{options_.inter_column_delimiter()};
// avoid delimiter after last element:
//
std::stringstream ss;
std::copy(metadata->column_names.begin(),
metadata->column_names.end() - 1,
std::ostream_iterator<std::string>(ss, delimiter_str.c_str()));
ss << metadata->column_names.back() << options_.line_terminator();
out_sink_->host_write(ss.str().data(), ss.str().size());
}
}
void writer::impl::write_chunked(strings_column_view const& str_column_view,
const table_metadata* metadata,
hipStream_t stream)
{
// algorithm outline:
//
// for_each(strings_column.begin(), strings_column.end(),
// [sink = out_sink_](auto str_row) mutable {
// auto host_buffer = str_row.host_buffer();
// sink->host_write(host_buffer_.data(), host_buffer_.size());
// });//or...sink->device_write(device_buffer,...);
//
// added line_terminator functionality
//
CUDF_EXPECTS(str_column_view.size() > 0, "Unexpected empty strings column.");
cudf::string_scalar newline{options_.line_terminator()};
auto p_str_col_w_nl = cudf::strings::join_strings(str_column_view, newline);
strings_column_view strings_column{std::move(p_str_col_w_nl->view())};
auto total_num_bytes = strings_column.chars_size();
char const* ptr_all_bytes = strings_column.chars().data<char>();
if (out_sink_->supports_device_write()) {
// host algorithm call, but the underlying call
// is a device_write taking a device buffer;
//
out_sink_->device_write(ptr_all_bytes, total_num_bytes, stream);
out_sink_->device_write(newline.data(),
newline.size(),
stream); // needs newline at the end, to separate from next chunk
} else {
// no device write possible;
//
// copy the bytes to host, too:
//
thrust::host_vector<char> h_bytes(total_num_bytes);
CUDA_TRY(hipMemcpyAsync(h_bytes.data(),
ptr_all_bytes,
total_num_bytes * sizeof(char),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
// host algorithm call, where the underlying call
// is also host_write taking a host buffer;
//
char const* ptr_h_bytes = h_bytes.data();
out_sink_->host_write(ptr_h_bytes, total_num_bytes);
out_sink_->host_write(
options_.line_terminator().data(),
options_.line_terminator().size()); // needs newline at the end, to separate from next chunk
}
}
void writer::impl::write(table_view const& table,
const table_metadata* metadata,
hipStream_t stream)
{
CUDF_EXPECTS(table.num_columns() > 0, "Empty table.");
// write header: column names separated by delimiter:
// (even for tables with no rows)
//
write_chunked_begin(table, metadata, stream);
if (table.num_rows() > 0) {
// no need to check same-size columns constraint; auto-enforced by table_view
auto n_rows_per_chunk = options_.rows_per_chunk();
//
// This outputs the CSV in row chunks to save memory.
// Maybe we can use the total_rows*count calculation and a memory threshold
// instead of an arbitrary chunk count.
// The entire CSV chunk must fit in CPU memory before writing it out.
//
if (n_rows_per_chunk % 8) // must be divisible by 8
n_rows_per_chunk += 8 - (n_rows_per_chunk % 8);
CUDF_EXPECTS(n_rows_per_chunk >= 8, "write_csv: invalid chunk_rows; must be at least 8");
auto exec = rmm::exec_policy(stream);
auto num_rows = table.num_rows();
std::vector<table_view> vector_views;
if (num_rows <= n_rows_per_chunk) {
vector_views.push_back(table);
} else {
std::vector<size_type> splits;
auto n_chunks = num_rows / n_rows_per_chunk;
splits.resize(n_chunks);
rmm::device_vector<size_type> d_splits(n_chunks, n_rows_per_chunk);
thrust::inclusive_scan(exec->on(stream), d_splits.begin(), d_splits.end(), d_splits.begin());
CUDA_TRY(hipMemcpyAsync(splits.data(),
d_splits.data().get(),
n_chunks * sizeof(size_type),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
// split table_view into chunks:
//
vector_views = cudf::split(table, splits);
}
// convert each chunk to CSV:
//
column_to_strings_fn converter{options_, mr_};
for (auto&& sub_view : vector_views) {
std::vector<std::unique_ptr<column>> str_column_vec;
// populate vector of string-converted columns:
//
std::transform(sub_view.begin(),
sub_view.end(),
std::back_inserter(str_column_vec),
[converter](auto const& current_col) {
return cudf::type_dispatcher(current_col.type(), converter, current_col);
});
// create string table view from str_column_vec:
//
auto str_table_ptr = std::make_unique<cudf::table>(std::move(str_column_vec));
table_view str_table_view{std::move(*str_table_ptr)};
// concatenate columns in each row into one big string column
//(using null representation and delimiter):
//
std::string delimiter_str{options_.inter_column_delimiter()};
auto str_concat_col =
cudf::strings::concatenate(str_table_view, delimiter_str, options_.na_rep(), mr_);
strings_column_view strings_converted{std::move(*str_concat_col)};
write_chunked(strings_converted, metadata, stream);
}
}
// finalize (no-op, for now, but offers a hook for future extensions):
//
write_chunked_end(table, metadata, stream);
}
void writer::write_all(table_view const& table, const table_metadata* metadata, hipStream_t stream)
{
_impl->write(table, metadata, stream);
}
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
| 4df14a57b8dbccf51e55b05f3df6f539973f90f9.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO CSV writer class implementation
*/
#include "writer_impl.hpp"
#include <cudf/copying.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/strings/convert/convert_booleans.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/convert/convert_floats.hpp>
#include <cudf/strings/convert/convert_integers.hpp>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/replace.hpp>
#include <strings/utilities.cuh>
#include <algorithm>
#include <cstring>
#include <iterator>
#include <sstream>
#include <type_traits>
#include <utility>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/modify_strings.cuh>
namespace cudf {
namespace io {
namespace detail {
namespace csv {
namespace { // anonym.
// helpers:
using namespace cudf::strings;
// predicate to determine if a given string_view contains special characters:
//{"\"", "\n", <delimiter>}
//
struct predicate_special_chars {
explicit predicate_special_chars(string_scalar const& delimiter, cudaStream_t stream = 0)
: delimiter_(delimiter.value(stream))
{
}
__device__ bool operator()(string_view const& str_view) const
{
// if (any_of{"\"", "\n", <delimiter>} )
//
constexpr char const* quote_str = "\"";
constexpr char const* newline_str = "\n";
constexpr size_type len1byte{1};
if ((str_view.find(quote_str, len1byte) >= 0) || (str_view.find(newline_str, len1byte) >= 0) ||
(str_view.find(delimiter_) >= 0)) {
return true;
} else {
return false;
}
}
private:
string_view delimiter_;
};
struct probe_special_chars {
probe_special_chars(column_device_view const d_column, predicate_special_chars const& predicate)
: d_column_(d_column), predicate_(predicate)
{
}
__device__ int32_t operator()(size_type idx) const
{
if (d_column_.is_null(idx)) {
return 0; // null string, so no-op
}
string_view d_str = d_column_.template element<string_view>(idx);
if (predicate_(d_str)) {
constexpr char const quote_char = '\"';
// count number of quotes "\""
size_type num_quotes =
thrust::count_if(thrust::seq, d_str.begin(), d_str.end(), [quote_char](char_utf8 chr) {
return chr == quote_char;
});
return d_str.size_bytes() + num_quotes + 2;
} else {
return d_str.size_bytes();
}
}
private:
column_device_view const d_column_;
predicate_special_chars predicate_;
};
struct modify_special_chars {
modify_special_chars(column_device_view const d_column,
int32_t const* d_offsets,
char* d_chars,
predicate_special_chars const& predicate)
: d_column_(d_column), d_offsets_(d_offsets), d_chars_(d_chars), predicate_(predicate)
{
}
__device__ int32_t operator()(size_type idx)
{
using namespace cudf::strings::detail;
if (d_column_.is_null(idx)) {
return 0; // null string, so no-op
}
string_view d_str = d_column_.template element<string_view>(idx);
size_type str_size_bytes = d_str.size_bytes();
char* d_buffer = get_output_ptr(idx);
// assert( d_buffer != nullptr );
if (predicate_(d_str)) {
constexpr char const quote_char = '\"';
constexpr char const* quote_str = "\"";
constexpr char const* str_2quotes = "\"\"";
size_type len1quote{1};
size_type len2quotes{2};
// modify d_str by duplicating all 2bl quotes
// and surrounding whole string by 2bl quotes:
//
// pre-condition: `d_str` is _not_ modified by `d_buffer` manipulation
// because it's a copy of `idx` entry in `d_column_`
//(since `d_column` is const)
//
d_buffer = copy_and_increment(d_buffer, quote_str, len1quote); // add the quote prefix
for (auto itr = d_str.begin(); itr != d_str.end(); ++itr) {
char_utf8 the_chr = *itr;
if (the_chr == quote_char) {
d_buffer = copy_and_increment(d_buffer, str_2quotes, len2quotes); // double the quote;
} else {
d_buffer += from_char_utf8(the_chr, d_buffer);
}
}
d_buffer = copy_and_increment(d_buffer, quote_str, len1quote); // add the quote suffix;
} else {
// copy the source string unmodified:
//(pass-through)
//
memcpy(get_output_ptr(idx), d_str.data(), str_size_bytes);
}
return 0;
}
__device__ char* get_output_ptr(size_type idx)
{
return d_chars_ && d_offsets_ ? d_chars_ + d_offsets_[idx] : nullptr;
}
private:
column_device_view const d_column_;
int32_t const* d_offsets_;
char* d_chars_;
predicate_special_chars predicate_;
};
struct column_to_strings_fn {
// compile-time predicate that defines unsupported column types;
// based on the conditions used for instantiations of individual
// converters in strings/convert/convert_*.hpp;
//(this should have been a `variable template`,
// instead of a static function, but nvcc (10.0)
// fails to compile var-templs);
//
template <typename column_type>
constexpr static bool is_not_handled(void)
{
// Note: the case (not std::is_same<column_type, bool>::value)
// is already covered by is_integral)
//
return not((std::is_same<column_type, cudf::string_view>::value) ||
(std::is_integral<column_type>::value) ||
(std::is_floating_point<column_type>::value) || (cudf::is_timestamp<column_type>()));
}
explicit column_to_strings_fn(writer_options const& options,
rmm::mr::device_memory_resource* mr = nullptr,
cudaStream_t stream = nullptr)
: options_(options), mr_(mr), stream_(stream)
{
}
// Note: `null` replacement with `na_rep` deferred to `concatenate()`
// instead of column-wise; might be faster
//
// Note: Cannot pass `stream` to detail::<fname> version of <fname> calls below, because they are
// not exposed in header (see, for example, detail::concatenate(tbl_view, separator, na_rep, mr,
// stream) is declared and defined in combine.cu); Possible solution: declare `extern`, or just
// declare a prototype inside `namespace cudf::strings::detail`;
// bools:
//
template <typename column_type>
std::enable_if_t<std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()(
column_view const& column) const
{
auto conv_col_ptr =
cudf::strings::from_booleans(column, options_.true_value(), options_.false_value(), mr_);
return conv_col_ptr;
}
// strings:
//
template <typename column_type>
std::enable_if_t<std::is_same<column_type, cudf::string_view>::value, std::unique_ptr<column>>
operator()(column_view const& column_v) const
{
using namespace cudf::strings::detail;
// handle special characters: {delimiter, '\n', "} in row:
//
// algorithm outline:
//
// target = "\"";
// repl = ""\"\";
//
// str_column_ref = {};
// for each str_row: column_v {
// if ((not null str_row) &&
// (str_row.find("\n") || str_row.find("\"") || str_row.find(delimiter) ))
// str_column_modified = modify(str_row);
// where modify() = duplicate the double quotes, if any; add 2bl quotes prefix/suffix;
//}
//
std::string delimiter{options_.inter_column_delimiter()};
predicate_special_chars pred{delimiter, stream_};
return modify_strings<probe_special_chars, modify_special_chars>(column_v, mr_, stream_, pred);
}
// ints:
//
template <typename column_type>
std::enable_if_t<std::is_integral<column_type>::value && !std::is_same<column_type, bool>::value,
std::unique_ptr<column>>
operator()(column_view const& column) const
{
auto conv_col_ptr = cudf::strings::from_integers(column, mr_);
return conv_col_ptr;
}
// floats:
//
template <typename column_type>
std::enable_if_t<std::is_floating_point<column_type>::value, std::unique_ptr<column>> operator()(
column_view const& column) const
{
auto conv_col_ptr = cudf::strings::from_floats(column, mr_);
return conv_col_ptr;
}
// timestamps:
//
template <typename column_type>
std::enable_if_t<cudf::is_timestamp<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
std::string format{"%Y-%m-%dT%H:%M:%SZ"}; // same as default for `from_timestamp`
// handle the cases where delimiter / line-terminator can be
// "-" or ":", in which case they are to be dropped from the format:
//
std::string delimiter{options_.inter_column_delimiter()};
std::string newline{options_.line_terminator()};
constexpr char const* dash{"-"};
constexpr char const* colon{":"};
if (delimiter == dash || newline == dash) {
format.erase(std::remove(format.begin(), format.end(), dash[0]), format.end());
}
if (delimiter == colon || newline == colon) {
format.erase(std::remove(format.begin(), format.end(), colon[0]), format.end());
}
auto conv_col_ptr = cudf::strings::from_timestamps(column, format, mr_);
return conv_col_ptr;
}
// unsupported type of column:
//
template <typename column_type>
std::enable_if_t<is_not_handled<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
CUDF_FAIL("Unsupported column type.");
}
private:
writer_options const& options_;
rmm::mr::device_memory_resource* mr_;
cudaStream_t stream_;
};
} // unnamed namespace
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
writer_options const& options,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
writer::impl::impl(std::unique_ptr<data_sink> sink,
writer_options const& options,
rmm::mr::device_memory_resource* mr)
: out_sink_(std::move(sink)), mr_(mr), options_(options)
{
}
// write the header: column names:
//
void writer::impl::write_chunked_begin(table_view const& table,
const table_metadata* metadata,
cudaStream_t stream)
{
if ((metadata != nullptr) && (options_.include_header())) {
CUDF_EXPECTS(metadata->column_names.size() == static_cast<size_t>(table.num_columns()),
"Mismatch between number of column headers and table columns.");
std::string delimiter_str{options_.inter_column_delimiter()};
// avoid delimiter after last element:
//
std::stringstream ss;
std::copy(metadata->column_names.begin(),
metadata->column_names.end() - 1,
std::ostream_iterator<std::string>(ss, delimiter_str.c_str()));
ss << metadata->column_names.back() << options_.line_terminator();
out_sink_->host_write(ss.str().data(), ss.str().size());
}
}
void writer::impl::write_chunked(strings_column_view const& str_column_view,
const table_metadata* metadata,
cudaStream_t stream)
{
// algorithm outline:
//
// for_each(strings_column.begin(), strings_column.end(),
// [sink = out_sink_](auto str_row) mutable {
// auto host_buffer = str_row.host_buffer();
// sink->host_write(host_buffer_.data(), host_buffer_.size());
// });//or...sink->device_write(device_buffer,...);
//
// added line_terminator functionality
//
CUDF_EXPECTS(str_column_view.size() > 0, "Unexpected empty strings column.");
cudf::string_scalar newline{options_.line_terminator()};
auto p_str_col_w_nl = cudf::strings::join_strings(str_column_view, newline);
strings_column_view strings_column{std::move(p_str_col_w_nl->view())};
auto total_num_bytes = strings_column.chars_size();
char const* ptr_all_bytes = strings_column.chars().data<char>();
if (out_sink_->supports_device_write()) {
// host algorithm call, but the underlying call
// is a device_write taking a device buffer;
//
out_sink_->device_write(ptr_all_bytes, total_num_bytes, stream);
out_sink_->device_write(newline.data(),
newline.size(),
stream); // needs newline at the end, to separate from next chunk
} else {
// no device write possible;
//
// copy the bytes to host, too:
//
thrust::host_vector<char> h_bytes(total_num_bytes);
CUDA_TRY(cudaMemcpyAsync(h_bytes.data(),
ptr_all_bytes,
total_num_bytes * sizeof(char),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
// host algorithm call, where the underlying call
// is also host_write taking a host buffer;
//
char const* ptr_h_bytes = h_bytes.data();
out_sink_->host_write(ptr_h_bytes, total_num_bytes);
out_sink_->host_write(
options_.line_terminator().data(),
options_.line_terminator().size()); // needs newline at the end, to separate from next chunk
}
}
void writer::impl::write(table_view const& table,
const table_metadata* metadata,
cudaStream_t stream)
{
CUDF_EXPECTS(table.num_columns() > 0, "Empty table.");
// write header: column names separated by delimiter:
// (even for tables with no rows)
//
write_chunked_begin(table, metadata, stream);
if (table.num_rows() > 0) {
// no need to check same-size columns constraint; auto-enforced by table_view
auto n_rows_per_chunk = options_.rows_per_chunk();
//
// This outputs the CSV in row chunks to save memory.
// Maybe we can use the total_rows*count calculation and a memory threshold
// instead of an arbitrary chunk count.
// The entire CSV chunk must fit in CPU memory before writing it out.
//
if (n_rows_per_chunk % 8) // must be divisible by 8
n_rows_per_chunk += 8 - (n_rows_per_chunk % 8);
CUDF_EXPECTS(n_rows_per_chunk >= 8, "write_csv: invalid chunk_rows; must be at least 8");
auto exec = rmm::exec_policy(stream);
auto num_rows = table.num_rows();
std::vector<table_view> vector_views;
if (num_rows <= n_rows_per_chunk) {
vector_views.push_back(table);
} else {
std::vector<size_type> splits;
auto n_chunks = num_rows / n_rows_per_chunk;
splits.resize(n_chunks);
rmm::device_vector<size_type> d_splits(n_chunks, n_rows_per_chunk);
thrust::inclusive_scan(exec->on(stream), d_splits.begin(), d_splits.end(), d_splits.begin());
CUDA_TRY(cudaMemcpyAsync(splits.data(),
d_splits.data().get(),
n_chunks * sizeof(size_type),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
// split table_view into chunks:
//
vector_views = cudf::split(table, splits);
}
// convert each chunk to CSV:
//
column_to_strings_fn converter{options_, mr_};
for (auto&& sub_view : vector_views) {
std::vector<std::unique_ptr<column>> str_column_vec;
// populate vector of string-converted columns:
//
std::transform(sub_view.begin(),
sub_view.end(),
std::back_inserter(str_column_vec),
[converter](auto const& current_col) {
return cudf::type_dispatcher(current_col.type(), converter, current_col);
});
// create string table view from str_column_vec:
//
auto str_table_ptr = std::make_unique<cudf::table>(std::move(str_column_vec));
table_view str_table_view{std::move(*str_table_ptr)};
// concatenate columns in each row into one big string column
//(using null representation and delimiter):
//
std::string delimiter_str{options_.inter_column_delimiter()};
auto str_concat_col =
cudf::strings::concatenate(str_table_view, delimiter_str, options_.na_rep(), mr_);
strings_column_view strings_converted{std::move(*str_concat_col)};
write_chunked(strings_converted, metadata, stream);
}
}
// finalize (no-op, for now, but offers a hook for future extensions):
//
write_chunked_end(table, metadata, stream);
}
void writer::write_all(table_view const& table, const table_metadata* metadata, cudaStream_t stream)
{
_impl->write(table, metadata, stream);
}
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
|
91db3af3cbea5650367e21059529e382bec0d6ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "wtoolkit_cuda.h"
#include <algorithm>
#include <iostream>
#include <iomanip>
#include "wmacros.h"
#include "bboxes.cu.h"
using namespace std;
constexpr auto kBlockSize = 64;
#ifdef GOOGLE_CUDA
/*
* anchor boxesgbboxes
*
* gbboxes:[gb_size,4] (ymin,xmin,ymax,xmax)ground truth box
* anchor_bboxes:[ab_size,4] (ymin,xmin,ymax,xmax)box
* :
* scores:[ab_size]iou
* indexs:[ab_size]gbboxes
*/
__global__ void matcher_get_scores_and_indexs(const float* gbboxes,const float* anchor_bboxes,float* scores,int* indexs,size_t gb_size,size_t ab_size)
{
const auto a_index = blockIdx.x;
const auto g_offset = threadIdx.x;
auto max_i = -1;
auto max_s = MIN_SCORE_FOR_POS_BOX;
float abbox[4];
float gbbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* gbboxeskBlockSizeanchor box(a_index)ground truth box(max_i,max_s)
*/
for(auto i=0; i<4; ++i)
abbox[i] = (anchor_bboxes+(a_index<<2))[i];
for(auto i=g_offset; i<gb_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
gbbox[j] = (gbboxes+(i<<2))[j];
const auto cs = cuda_bboxes_jaccard(abbox,gbbox);
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[g_offset] = max_i;
max_scores[g_offset] = max_s;
__syncthreads();
if(g_offset != 0) return;
/*
* 0
*/
max_i = -1;
max_s = 1e-8;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0) {
indexs[a_index] = max_index[max_i];
scores[a_index] = max_s;
}
}
__global__ void matcher_get_scores_and_indexsv2(const float* gbboxes,const float* anchor_bboxes,float* scores,int* indexs,size_t gb_size,size_t ab_size)
{
const auto a_index = blockIdx.x;
const auto g_offset = threadIdx.x;
auto max_i = -1;
auto max_s = MIN_SCORE_FOR_POS_BOX;
float abbox[4];
float gbbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* gbboxeskBlockSizeanchor box(a_index)ground truth box(max_i,max_s)
*/
for(auto i=0; i<4; ++i)
abbox[i] = (anchor_bboxes+(a_index<<2))[i];
for(auto i=g_offset; i<gb_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
gbbox[j] = (gbboxes+(i<<2))[j];
if(!cuda_is_in_gtbox(abbox,gbbox))
continue;
const auto cs = cuda_bboxes_jaccard(abbox,gbbox);
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[g_offset] = max_i;
max_scores[g_offset] = max_s;
__syncthreads();
if(g_offset != 0) return;
/*
* 0
*/
max_i = -1;
max_s = 1e-8;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0) {
indexs[a_index] = max_index[max_i];
scores[a_index] = max_s;
}
}
/*
* ground truth boxanchor box
* gbboxes:[gb_size,4]
* anchor_bboxes: [ab_size,4]
* :
* is_max_score:[ab_size]
* scores0:[gb_size]
* indexs0:[gb_size]
*/
__global__ void matcher_find_max_score_index(const float* gbboxes,const float* anchor_bboxes,bool* is_max_score,size_t ab_size)
{
const auto g_index = blockIdx.x;
const auto a_offset = threadIdx.x;
auto max_i = -1;
auto max_s = MIN_SCORE_FOR_POS_BOX;
float gbbox[4];
float abbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* anchor bboxeskBlockSizegbboxes(g_index)anchor boxes
*/
for(auto i=0; i<4; ++i)
gbbox[i] = (gbboxes+(g_index<<2))[i];
for(auto i=a_offset; i<ab_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
abbox[j] = (anchor_bboxes+(i<<2))[j];
const auto cs = cuda_bboxes_jaccard(gbbox,abbox);
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[a_offset] = max_i;
max_scores[a_offset] = max_s;
__syncthreads();
if(a_offset != 0) return;
/*
* 0anchor box
*/
max_i = -1;
max_s = MIN_SCORE_FOR_POS_BOX;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0)
is_max_score[max_index[max_i]] = true;
}
__global__ void matcher_get_labels(int* indexs,float* scores,const bool* is_max_score,const int* glabels,int* out_labels,float neg_threshold,float pos_threshold)
{
auto a_index = blockIdx.x;
const auto &index = indexs[a_index];
const auto score = scores[a_index];
if((score>=pos_threshold) || (score<neg_threshold) || is_max_score[a_index]) {
if((score>=pos_threshold) || is_max_score[a_index]) {
out_labels[a_index] = glabels[index];
} else {
out_labels[a_index] = 0;
indexs[a_index] = -1;
scores[a_index] = 0;
}
} else {
indexs[a_index] = -1;
scores[a_index] = 0.0f;
out_labels[a_index] = -1;
}
}
__host__ void matcher_by_gpu(const float* gbboxes,const float* anchor_bboxes,const int* glabels,
float* out_scores,int* out_labels,int* out_index,
size_t gb_size,size_t ab_size,float neg_threshold,float pos_threshold,bool max_overlap_as_pos=true,bool force_in_gtbox=false)
{
cuda_unique_ptr<int> g_out_index;
if(nullptr == out_index) {
g_out_index = make_cuda_unique<int>(ab_size);
out_index = g_out_index.get();
}
CHECK_OK(hipMemset(out_scores,0,sizeof(float)*ab_size));
CHECK_OK(hipMemset(out_index,0xff,sizeof(int)*ab_size));
CHECK_OK(hipMemset(out_labels,0,sizeof(int)*ab_size));
dim3 grid(ab_size);
dim3 grid1(gb_size);
cuda_unique_ptr<bool> d_is_max_score = make_cuda_unique<bool>((unsigned char)(0x00),ab_size);
if(force_in_gtbox)
hipLaunchKernelGGL(( matcher_get_scores_and_indexsv2), dim3(grid),dim3(std::min<size_t>(kBlockSize,gb_size)), 0, 0, gbboxes,anchor_bboxes,out_scores,out_index,gb_size,ab_size);
else
hipLaunchKernelGGL(( matcher_get_scores_and_indexs), dim3(grid),dim3(std::min<size_t>(kBlockSize,gb_size)), 0, 0, gbboxes,anchor_bboxes,out_scores,out_index,gb_size,ab_size);
auto res = hipPeekAtLastError();
if(res != hipError_t::hipSuccess) {
CHECK_CUDA_ERRORS(res);
cout<<"CUDAERROR INFO:"<<ab_size<<","<<kBlockSize<<","<<gb_size<<endl;
}
hipDeviceSynchronize();
if(max_overlap_as_pos) {
hipLaunchKernelGGL(( matcher_find_max_score_index), dim3(grid1),dim3(std::min<size_t>(kBlockSize,ab_size)), 0, 0, gbboxes,anchor_bboxes,d_is_max_score.get(),ab_size);
CHECK_CUDA_ERRORS(hipPeekAtLastError());
hipDeviceSynchronize();
}
hipDeviceSynchronize();
CHECK_CUDA_ERRORS(hipPeekAtLastError());
hipLaunchKernelGGL(( matcher_get_labels), dim3(grid),dim3(1), 0, 0, out_index,out_scores,d_is_max_score.get(),glabels,out_labels,neg_threshold,pos_threshold);
CHECK_CUDA_ERRORS(hipPeekAtLastError());
hipDeviceSynchronize();
}
#endif
| 91db3af3cbea5650367e21059529e382bec0d6ea.cu | #include <vector>
#include "wtoolkit_cuda.h"
#include <algorithm>
#include <iostream>
#include <iomanip>
#include "wmacros.h"
#include "bboxes.cu.h"
using namespace std;
constexpr auto kBlockSize = 64;
#ifdef GOOGLE_CUDA
/*
* 找到与每一个anchor boxes相对就的最大的gbboxes
*
* gbboxes:[gb_size,4] (ymin,xmin,ymax,xmax)表示ground truth box
* anchor_bboxes:[ab_size,4] (ymin,xmin,ymax,xmax)表示待匹配的box
* 输出:
* scores:[ab_size]相应的iou得分
* indexs:[ab_size]与之相对应的gbboxes索引
*/
__global__ void matcher_get_scores_and_indexs(const float* gbboxes,const float* anchor_bboxes,float* scores,int* indexs,size_t gb_size,size_t ab_size)
{
const auto a_index = blockIdx.x;
const auto g_offset = threadIdx.x;
auto max_i = -1;
auto max_s = MIN_SCORE_FOR_POS_BOX;
float abbox[4];
float gbbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* gbboxes按kBlockSize划分为多个组,下面的代码找到在同一个组中与给定anchor box(a_index)对应的最大ground truth box(max_i,max_s)
*/
for(auto i=0; i<4; ++i)
abbox[i] = (anchor_bboxes+(a_index<<2))[i];
for(auto i=g_offset; i<gb_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
gbbox[j] = (gbboxes+(i<<2))[j];
const auto cs = cuda_bboxes_jaccard(abbox,gbbox);
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[g_offset] = max_i;
max_scores[g_offset] = max_s;
__syncthreads();
if(g_offset != 0) return;
/*
* 线程0在所有的组中找到最大的一个
*/
max_i = -1;
max_s = 1e-8;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0) {
indexs[a_index] = max_index[max_i];
scores[a_index] = max_s;
}
}
__global__ void matcher_get_scores_and_indexsv2(const float* gbboxes,const float* anchor_bboxes,float* scores,int* indexs,size_t gb_size,size_t ab_size)
{
const auto a_index = blockIdx.x;
const auto g_offset = threadIdx.x;
auto max_i = -1;
auto max_s = MIN_SCORE_FOR_POS_BOX;
float abbox[4];
float gbbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* gbboxes按kBlockSize划分为多个组,下面的代码找到在同一个组中与给定anchor box(a_index)对应的最大ground truth box(max_i,max_s)
*/
for(auto i=0; i<4; ++i)
abbox[i] = (anchor_bboxes+(a_index<<2))[i];
for(auto i=g_offset; i<gb_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
gbbox[j] = (gbboxes+(i<<2))[j];
if(!cuda_is_in_gtbox(abbox,gbbox))
continue;
const auto cs = cuda_bboxes_jaccard(abbox,gbbox);
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[g_offset] = max_i;
max_scores[g_offset] = max_s;
__syncthreads();
if(g_offset != 0) return;
/*
* 线程0在所有的组中找到最大的一个
*/
max_i = -1;
max_s = 1e-8;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0) {
indexs[a_index] = max_index[max_i];
scores[a_index] = max_s;
}
}
/*
* 找到与每一个ground truth box相对应的最大的anchor box
* gbboxes:[gb_size,4]
* anchor_bboxes: [ab_size,4]
* 输出:
* is_max_score:[ab_size]
* scores0:[gb_size]
* indexs0:[gb_size]
*/
__global__ void matcher_find_max_score_index(const float* gbboxes,const float* anchor_bboxes,bool* is_max_score,size_t ab_size)
{
const auto g_index = blockIdx.x;
const auto a_offset = threadIdx.x;
auto max_i = -1;
auto max_s = MIN_SCORE_FOR_POS_BOX;
float gbbox[4];
float abbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* anchor bboxes按kBlockSize分组,这部分找到在一个组里与指定的gbboxes(g_index)对应的最大的anchor boxes
*/
for(auto i=0; i<4; ++i)
gbbox[i] = (gbboxes+(g_index<<2))[i];
for(auto i=a_offset; i<ab_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
abbox[j] = (anchor_bboxes+(i<<2))[j];
const auto cs = cuda_bboxes_jaccard(gbbox,abbox);
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[a_offset] = max_i;
max_scores[a_offset] = max_s;
__syncthreads();
if(a_offset != 0) return;
/*
* 线程0找到唯一的最大anchor box索引
*/
max_i = -1;
max_s = MIN_SCORE_FOR_POS_BOX;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0)
is_max_score[max_index[max_i]] = true;
}
__global__ void matcher_get_labels(int* indexs,float* scores,const bool* is_max_score,const int* glabels,int* out_labels,float neg_threshold,float pos_threshold)
{
auto a_index = blockIdx.x;
const auto &index = indexs[a_index];
const auto score = scores[a_index];
if((score>=pos_threshold) || (score<neg_threshold) || is_max_score[a_index]) {
if((score>=pos_threshold) || is_max_score[a_index]) {
out_labels[a_index] = glabels[index];
} else {
out_labels[a_index] = 0;
indexs[a_index] = -1;
scores[a_index] = 0;
}
} else {
indexs[a_index] = -1;
scores[a_index] = 0.0f;
out_labels[a_index] = -1;
}
}
__host__ void matcher_by_gpu(const float* gbboxes,const float* anchor_bboxes,const int* glabels,
float* out_scores,int* out_labels,int* out_index,
size_t gb_size,size_t ab_size,float neg_threshold,float pos_threshold,bool max_overlap_as_pos=true,bool force_in_gtbox=false)
{
cuda_unique_ptr<int> g_out_index;
if(nullptr == out_index) {
g_out_index = make_cuda_unique<int>(ab_size);
out_index = g_out_index.get();
}
CHECK_OK(cudaMemset(out_scores,0,sizeof(float)*ab_size));
CHECK_OK(cudaMemset(out_index,0xff,sizeof(int)*ab_size));
CHECK_OK(cudaMemset(out_labels,0,sizeof(int)*ab_size));
dim3 grid(ab_size);
dim3 grid1(gb_size);
cuda_unique_ptr<bool> d_is_max_score = make_cuda_unique<bool>((unsigned char)(0x00),ab_size);
if(force_in_gtbox)
matcher_get_scores_and_indexsv2<<<grid,std::min<size_t>(kBlockSize,gb_size)>>>(gbboxes,anchor_bboxes,out_scores,out_index,gb_size,ab_size);
else
matcher_get_scores_and_indexs<<<grid,std::min<size_t>(kBlockSize,gb_size)>>>(gbboxes,anchor_bboxes,out_scores,out_index,gb_size,ab_size);
auto res = cudaPeekAtLastError();
if(res != cudaError::cudaSuccess) {
CHECK_CUDA_ERRORS(res);
cout<<"CUDAERROR INFO:"<<ab_size<<","<<kBlockSize<<","<<gb_size<<endl;
}
cudaDeviceSynchronize();
if(max_overlap_as_pos) {
matcher_find_max_score_index<<<grid1,std::min<size_t>(kBlockSize,ab_size)>>>(gbboxes,anchor_bboxes,d_is_max_score.get(),ab_size);
CHECK_CUDA_ERRORS(cudaPeekAtLastError());
cudaDeviceSynchronize();
}
cudaDeviceSynchronize();
CHECK_CUDA_ERRORS(cudaPeekAtLastError());
matcher_get_labels<<<grid,1>>>(out_index,out_scores,d_is_max_score.get(),glabels,out_labels,neg_threshold,pos_threshold);
CHECK_CUDA_ERRORS(cudaPeekAtLastError());
cudaDeviceSynchronize();
}
#endif
|
7d8d83bdfa986b658ff794a5f60dffcc7bf368c0.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "simple_radar_pipeline.h"
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
index_t numChannels = 16;
index_t numPulses = 128;
index_t numSamples = 9000;
index_t waveformLength = 1000;
// cuda stream to place work in
hipStream_t stream;
hipStreamCreate(&stream);
// create some events for timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
auto radar = RadarPipeline(numPulses, numSamples, waveformLength, numChannels, stream);
(*(radar.GetInputView()) = random<float>(radar.GetInputView().Shape(), NORMAL)).run();
(*(radar.GetWaveformView()) = random<float>(radar.GetWaveformView().Shape(), NORMAL)).run();
radar.PulseCompression();
auto rv = radar.GetInputView().Slice<1>({0, 0, 0}, {matxSliceDim, matxSliceDim, 16});
rv.print();
hipStreamDestroy(stream);
return 0;
}
| 7d8d83bdfa986b658ff794a5f60dffcc7bf368c0.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "simple_radar_pipeline.h"
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
index_t numChannels = 16;
index_t numPulses = 128;
index_t numSamples = 9000;
index_t waveformLength = 1000;
// cuda stream to place work in
cudaStream_t stream;
cudaStreamCreate(&stream);
// create some events for timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
auto radar = RadarPipeline(numPulses, numSamples, waveformLength, numChannels, stream);
(*(radar.GetInputView()) = random<float>(radar.GetInputView().Shape(), NORMAL)).run();
(*(radar.GetWaveformView()) = random<float>(radar.GetWaveformView().Shape(), NORMAL)).run();
radar.PulseCompression();
auto rv = radar.GetInputView().Slice<1>({0, 0, 0}, {matxSliceDim, matxSliceDim, 16});
rv.print();
cudaStreamDestroy(stream);
return 0;
}
|
9f685920f490ece36b7e03c62cb2e9edfc806473.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void laplace(float * U1, float * U2) {
int i = blockIdx.x;
int j = threadIdx.x;
int side = blockDim.x + 2;
U2[(i + 1) * side + j + 1] // i, j
= U1[i * side + j + 1] // i-1, j
+ U1[(i + 1) * side + j] // i, j-1
+ U1[(i + 2) * side + j + 1] // i+1, j
+ U1[(i + 1) * side + j + 2]; // i, j+1
U2[(i + 1) * side + j + 1] *= .25;
}
int main() {
int T = 10000;
int side = 128;
int area = side * side;
float * U1, * U2, * devU1, * devU2;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
//---------------------------
U1 = (float *)malloc(area * sizeof(float));
U2 = (float *)malloc(area * sizeof(float));
hipMalloc(&devU1, area * sizeof(float));
hipMalloc(&devU2, area * sizeof(float));
for (int i=0; i<side; ++i)
U1[i] = 1.;
for (int i=1; i<side; ++i) {
for (int j=0; j<side; ++j)
U1[i * side + j] = 0.;
}
memcpy(U2, U1, area * sizeof(float));
hipMemcpy(devU1, U1, area * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(devU2, U1, area * sizeof(float),
hipMemcpyHostToDevice);
for (int t=0; t<T;) {
hipLaunchKernelGGL(( laplace), dim3(side-2), dim3(side-2), 0, 0, devU1, devU2);
hipLaunchKernelGGL(( laplace), dim3(side-2), dim3(side-2), 0, 0, devU2, devU1);
t += 2;
}
hipMemcpy(U1, devU1, area * sizeof(float),
hipMemcpyDeviceToHost);
//----------------------------
hipEventRecord(stop);
float elapsed_time(0);
hipEventElapsedTime(&elapsed_time, start, stop);
printf("elapsed time: %f ms\n", elapsed_time);
FILE * cfout = fopen("output.bin", "wb");
fwrite(U1, sizeof(float), area, cfout);
fclose(cfout);
hipFree(devU1);
hipFree(devU2);
free(U1);
free(U2);
}
| 9f685920f490ece36b7e03c62cb2e9edfc806473.cu | #include <stdio.h>
__global__
void laplace(float * U1, float * U2) {
int i = blockIdx.x;
int j = threadIdx.x;
int side = blockDim.x + 2;
U2[(i + 1) * side + j + 1] // i, j
= U1[i * side + j + 1] // i-1, j
+ U1[(i + 1) * side + j] // i, j-1
+ U1[(i + 2) * side + j + 1] // i+1, j
+ U1[(i + 1) * side + j + 2]; // i, j+1
U2[(i + 1) * side + j + 1] *= .25;
}
int main() {
int T = 10000;
int side = 128;
int area = side * side;
float * U1, * U2, * devU1, * devU2;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//---------------------------
U1 = (float *)malloc(area * sizeof(float));
U2 = (float *)malloc(area * sizeof(float));
cudaMalloc(&devU1, area * sizeof(float));
cudaMalloc(&devU2, area * sizeof(float));
for (int i=0; i<side; ++i)
U1[i] = 1.;
for (int i=1; i<side; ++i) {
for (int j=0; j<side; ++j)
U1[i * side + j] = 0.;
}
memcpy(U2, U1, area * sizeof(float));
cudaMemcpy(devU1, U1, area * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(devU2, U1, area * sizeof(float),
cudaMemcpyHostToDevice);
for (int t=0; t<T;) {
laplace<<<side-2, side-2>>>(devU1, devU2);
laplace<<<side-2, side-2>>>(devU2, devU1);
t += 2;
}
cudaMemcpy(U1, devU1, area * sizeof(float),
cudaMemcpyDeviceToHost);
//----------------------------
cudaEventRecord(stop);
float elapsed_time(0);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("elapsed time: %f ms\n", elapsed_time);
FILE * cfout = fopen("output.bin", "wb");
fwrite(U1, sizeof(float), area, cfout);
fclose(cfout);
cudaFree(devU1);
cudaFree(devU2);
free(U1);
free(U2);
}
|
4e2a296310d46ecb44ea3a07c3f31d3e4f8cf3cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* SpectRE - A Spectral Code for Reheating
* Copyright (C) 2009-2010 Hal Finkel, Nathaniel Roth and Richard Easther
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "pow.hpp"
#include "integrator.hpp"
#include "reduction_helper.hpp"
#include <cufftw.h>
using namespace std;
__global__ void integrator_kernel(fftw_complex *phi, fftw_complex *chi,
double *total_gradient_phi, double *total_gradient_chi,
int n, double dp)
{
int x = blockIdx.x;
int y = blockIdx.y;
int z = threadIdx.x;
int px = x <= n/2 ? x : x - n;
int py = y <= n/2 ? y : y - n;
int pz = z;
int idx = z + (n/2+1)*(y + n*x);
double mom2 = pow2(dp)*(pow2(px) + pow2(py) + pow2(pz));
mom2 *= (z == 0 || z == n/2) ? 1 : 2;
total_gradient_phi[idx] += mom2*(pow2(phi[idx][0]) + pow2(phi[idx][1]));
total_gradient_chi[idx] += mom2*(pow2(chi[idx][0]) + pow2(chi[idx][1]));
}
template <typename R>
void integrator<R>::avg_gradients(field_size &fs,
field<R> &phi, field<R> &chi,
R &avg_gradient_phi, R &avg_gradient_chi)
{
phi.switch_state(momentum);
chi.switch_state(momentum);
auto total_gradient_phi_arr = double_array_gpu(fs.n, fs.n, fs.n/2+1);
auto total_gradient_chi_arr = double_array_gpu(fs.n, fs.n, fs.n/2+1);
dim3 num_blocks(fs.n, fs.n);
dim3 num_threads(fs.n/2+1, 1);
hipLaunchKernelGGL(( integrator_kernel), dim3(num_blocks), dim3(num_threads), 0, 0, phi.mdata.ptr, chi.mdata.ptr,
total_gradient_phi_arr.ptr(),
total_gradient_chi_arr.ptr(),
fs.n, MP_DP);
R total_gradient_phi = total_gradient_phi_arr.sum();
R total_gradient_chi = total_gradient_chi_arr.sum();
// Divide by total_gridpoints again to get *average* squared gradient and *average* potential energy.
avg_gradient_phi = total_gradient_phi/pow<2, R>(fs.total_gridpoints);
avg_gradient_chi = total_gradient_chi/pow<2, R>(fs.total_gridpoints);
}
// Explicit instantiations
template class integrator<double>;
| 4e2a296310d46ecb44ea3a07c3f31d3e4f8cf3cb.cu | /*
* SpectRE - A Spectral Code for Reheating
* Copyright (C) 2009-2010 Hal Finkel, Nathaniel Roth and Richard Easther
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "pow.hpp"
#include "integrator.hpp"
#include "reduction_helper.hpp"
#include <cufftw.h>
using namespace std;
__global__ void integrator_kernel(fftw_complex *phi, fftw_complex *chi,
double *total_gradient_phi, double *total_gradient_chi,
int n, double dp)
{
int x = blockIdx.x;
int y = blockIdx.y;
int z = threadIdx.x;
int px = x <= n/2 ? x : x - n;
int py = y <= n/2 ? y : y - n;
int pz = z;
int idx = z + (n/2+1)*(y + n*x);
double mom2 = pow2(dp)*(pow2(px) + pow2(py) + pow2(pz));
mom2 *= (z == 0 || z == n/2) ? 1 : 2;
total_gradient_phi[idx] += mom2*(pow2(phi[idx][0]) + pow2(phi[idx][1]));
total_gradient_chi[idx] += mom2*(pow2(chi[idx][0]) + pow2(chi[idx][1]));
}
template <typename R>
void integrator<R>::avg_gradients(field_size &fs,
field<R> &phi, field<R> &chi,
R &avg_gradient_phi, R &avg_gradient_chi)
{
phi.switch_state(momentum);
chi.switch_state(momentum);
auto total_gradient_phi_arr = double_array_gpu(fs.n, fs.n, fs.n/2+1);
auto total_gradient_chi_arr = double_array_gpu(fs.n, fs.n, fs.n/2+1);
dim3 num_blocks(fs.n, fs.n);
dim3 num_threads(fs.n/2+1, 1);
integrator_kernel<<<num_blocks, num_threads>>>(phi.mdata.ptr, chi.mdata.ptr,
total_gradient_phi_arr.ptr(),
total_gradient_chi_arr.ptr(),
fs.n, MP_DP);
R total_gradient_phi = total_gradient_phi_arr.sum();
R total_gradient_chi = total_gradient_chi_arr.sum();
// Divide by total_gridpoints again to get *average* squared gradient and *average* potential energy.
avg_gradient_phi = total_gradient_phi/pow<2, R>(fs.total_gridpoints);
avg_gradient_chi = total_gradient_chi/pow<2, R>(fs.total_gridpoints);
}
// Explicit instantiations
template class integrator<double>;
|
1641593ac0a082aaef445ce3a328c81e225ba598.hip | // !!! This is a file automatically generated by hipify!!!
// $Smake: nvcc -Xptxas -v -O2 -o %F %f wtime.c
//
// Demonstrates use of device shared memory in matrix-matrix multiplication.
//
// Jonathan Senning <[email protected]>
// Department of Mathematics and Computer Science
// Gordon College, 255 Grapevine Road, Wenham MA 01984-1899
// Spring 2016, 2018.
#include <cstdio>
#include <hip/hip_runtime.h>
#include "wtime.h"
#define IDX(i,j,n) ((i)*(n)+j)
#if !defined(BS)
const int BlockDim = 16;
#else
const int BlockDim = BS; // needs to be 32 or less
#endif
const int MaxSizeToDisplay = 25;
typedef float FLOAT;
//typedef double FLOAT;
//----------------------------------------------------------------------------
// Matrix-matrix kernel (global memory)
__global__ void matmulGlobal( FLOAT* c, FLOAT* a, FLOAT* b, int n )
{
// element of matrix c to compute
const int col = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= n || row >= n ) return;
FLOAT sum = (FLOAT) 0.0;
for ( int k = 0; k < n; k++ )
{
sum += a[IDX(row,k,n)] * b[IDX(k,col,n)];
}
c[IDX(row,col,n)] = sum;
}
//----------------------------------------------------------------------------
// Matrix-matrix kernel (shared memory)
__global__ void matmulShared( FLOAT* c, FLOAT* a, FLOAT* b, int n )
{
__shared__ FLOAT a_s[BlockDim][BlockDim];
__shared__ FLOAT b_s[BlockDim][BlockDim];
// element of matrix c to compute
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= n || row >= n ) return;
// loop over blocks from block row of matrix a and
// block column of matrix b.
FLOAT sum = (FLOAT) 0.0;
const int numBlocks = ( n + BlockDim - 1 ) / BlockDim;
for ( int m = 0; m < numBlocks; m++ )
{
// copy block from matrix to shared memory
int c = m * BlockDim + threadIdx.x;
int r = m * BlockDim + threadIdx.y;
c = c < n ? c : n - 1; // need to stay inbounds
r = r < n ? r : n - 1;
a_s[threadIdx.y][threadIdx.x] = a[IDX(row,c,n)];
b_s[threadIdx.y][threadIdx.x] = b[IDX(r,col,n)];
__syncthreads();
// length of this part of row-column product is BlockDim
// except for last block when it may be smaller
const int len = ( m == numBlocks - 1 ? n - m * BlockDim : BlockDim );
// compute this part of row-column product
for ( int k = 0; k < len; k++ )
{
sum += a_s[threadIdx.y][k] * b_s[k][threadIdx.x];
}
__syncthreads();
}
// all done; store computed element in matrix c
c[IDX(row,col,n)] = sum;
}
//-----------------------------------------------------------------------------
// Check CUDA function return error code
void cudaChkErr( hipError_t code )
{
if ( code != hipSuccess )
{
fprintf( stderr, "CUDA ERROR: %s\n", hipGetErrorString( code ) );
exit( EXIT_FAILURE );
}
}
//----------------------------------------------------------------------------
// Fill matrix with reasonable values
void initializeMatrix( FLOAT* a, int m, int n, FLOAT sf = 1.0 )
{
for ( int i = 0; i < m; i++ )
{
for ( int j = 0; j < n; j++ )
{
a[IDX(i,j,n)] = sf * (-1.0 * i + j);
}
}
}
//----------------------------------------------------------------------------
// Display matrix contents
void dumpMatrix( FLOAT* a, int m, int n )
{
for ( int i = 0; i < m; i++ )
{
printf( "[" );
for ( int j = 0; j < n; j++ )
{
printf( " %8.2f", a[IDX(i,j,n)] );
}
printf( "]\n" );
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
int main( int argc, char* argv[] )
{
double t0, t1; // timing variables
// Read matrix dimension from command line
int n = 4;
if ( argc > 1 ) n = atoi( argv[1] );
if ( n <= 0 ) n = 4; // safety check
printf( "matrix-matrix product with %dx%d matrices.\n", n, n );
printf( "BlockDim = %d\n", BlockDim );
// Declare and allocate memory for matrices
FLOAT* a = new FLOAT [n * n];
FLOAT* b = new FLOAT [n * n];
FLOAT* c = new FLOAT [n * n]; // C = A * B
// Initialize and display matrices (if small enough)
initializeMatrix( a, n, n, 1.0 );
initializeMatrix( b, n, n, 2.0 );
if ( n <= MaxSizeToDisplay )
{
printf( "A =\n" );
dumpMatrix( a, n, n );
printf( "\nB =\n" );
dumpMatrix( b, n, n );
}
// Declare and allocate memory for matrices on device
size_t matrixSize = n * n * sizeof( FLOAT );
FLOAT* a_d; // device memory for first factor
FLOAT* b_d; // device memory for second factor
FLOAT* c_d; // device memory for product
cudaChkErr( hipMalloc( &a_d, matrixSize ) );
cudaChkErr( hipMalloc( &b_d, matrixSize ) );
cudaChkErr( hipMalloc( &c_d, matrixSize ) );
// Initialize matrices on device
t0 = wtime();
cudaChkErr( hipMemcpy( a_d, a, matrixSize, hipMemcpyHostToDevice ) );
cudaChkErr( hipMemcpy( b_d, b, matrixSize, hipMemcpyHostToDevice ) );
t1 = wtime();
double data_transfer_time = t1 - t0;
// Prepare for kernel launches: use 2D grid
dim3 blockDim( BlockDim, BlockDim );
dim3 gridDim( ( n + blockDim.x - 1 ) / blockDim.x,
( n + blockDim.y - 1 ) / blockDim.y );
// Compute product using global-memory-only kernel
t0 = wtime();
hipLaunchKernelGGL(( matmulGlobal), dim3(gridDim), dim3(blockDim), 0, 0, c_d, a_d, b_d, n );
cudaChkErr( hipDeviceSynchronize() ); // wait for kernel to finish
cudaChkErr( hipGetLastError() ); // check for any errors in kernel
t1 = wtime();
double global_kernel_time = t1 - t0;
// Copy result from device to host
t0 = wtime();
cudaChkErr( hipMemcpy( c, c_d, matrixSize, hipMemcpyDeviceToHost ) );
t1 = wtime();
data_transfer_time += ( t1 - t0 );
if ( n <= MaxSizeToDisplay )
{
printf( "\n(Global Memory Only) A*B =\n" );
dumpMatrix( c, n, n );
}
// Compute product using shared-memory kernel
t0 = wtime();
hipLaunchKernelGGL(( matmulShared), dim3(gridDim), dim3(blockDim), 0, 0, c_d, a_d, b_d, n );
cudaChkErr( hipDeviceSynchronize() ); // wait for kernel to finish
cudaChkErr( hipGetLastError() ); // check for any errors in kernel
t1 = wtime();
double shared_kernel_time = t1 - t0;
// Copy result from device to host
cudaChkErr( hipMemcpy( c, c_d, matrixSize, hipMemcpyDeviceToHost ) );
if ( n <= MaxSizeToDisplay )
{
printf( "\n(with shared memory) A*B =\n" );
dumpMatrix( c, n, n );
}
// Report times and speedup
printf( "Data transfer time = %f sec\n", data_transfer_time );
printf( "Global kernel time = %f sec\n", global_kernel_time );
printf( "Shared kernel time = %f sec\n", shared_kernel_time );
printf( "Speedup = %6.2f\n", global_kernel_time / shared_kernel_time );
// All done; "let my people go!"
cudaChkErr( hipFree( a_d ) );
cudaChkErr( hipFree( b_d ) );
cudaChkErr( hipFree( c_d ) );
delete [] a;
delete [] b;
delete [] c;
return 0;
}
| 1641593ac0a082aaef445ce3a328c81e225ba598.cu | // $Smake: nvcc -Xptxas -v -O2 -o %F %f wtime.c
//
// Demonstrates use of device shared memory in matrix-matrix multiplication.
//
// Jonathan Senning <[email protected]>
// Department of Mathematics and Computer Science
// Gordon College, 255 Grapevine Road, Wenham MA 01984-1899
// Spring 2016, 2018.
#include <cstdio>
#include <cuda.h>
#include "wtime.h"
#define IDX(i,j,n) ((i)*(n)+j)
#if !defined(BS)
const int BlockDim = 16;
#else
const int BlockDim = BS; // needs to be 32 or less
#endif
const int MaxSizeToDisplay = 25;
typedef float FLOAT;
//typedef double FLOAT;
//----------------------------------------------------------------------------
// Matrix-matrix kernel (global memory)
__global__ void matmulGlobal( FLOAT* c, FLOAT* a, FLOAT* b, int n )
{
// element of matrix c to compute
const int col = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= n || row >= n ) return;
FLOAT sum = (FLOAT) 0.0;
for ( int k = 0; k < n; k++ )
{
sum += a[IDX(row,k,n)] * b[IDX(k,col,n)];
}
c[IDX(row,col,n)] = sum;
}
//----------------------------------------------------------------------------
// Matrix-matrix kernel (shared memory)
__global__ void matmulShared( FLOAT* c, FLOAT* a, FLOAT* b, int n )
{
__shared__ FLOAT a_s[BlockDim][BlockDim];
__shared__ FLOAT b_s[BlockDim][BlockDim];
// element of matrix c to compute
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= n || row >= n ) return;
// loop over blocks from block row of matrix a and
// block column of matrix b.
FLOAT sum = (FLOAT) 0.0;
const int numBlocks = ( n + BlockDim - 1 ) / BlockDim;
for ( int m = 0; m < numBlocks; m++ )
{
// copy block from matrix to shared memory
int c = m * BlockDim + threadIdx.x;
int r = m * BlockDim + threadIdx.y;
c = c < n ? c : n - 1; // need to stay inbounds
r = r < n ? r : n - 1;
a_s[threadIdx.y][threadIdx.x] = a[IDX(row,c,n)];
b_s[threadIdx.y][threadIdx.x] = b[IDX(r,col,n)];
__syncthreads();
// length of this part of row-column product is BlockDim
// except for last block when it may be smaller
const int len = ( m == numBlocks - 1 ? n - m * BlockDim : BlockDim );
// compute this part of row-column product
for ( int k = 0; k < len; k++ )
{
sum += a_s[threadIdx.y][k] * b_s[k][threadIdx.x];
}
__syncthreads();
}
// all done; store computed element in matrix c
c[IDX(row,col,n)] = sum;
}
//-----------------------------------------------------------------------------
// Check CUDA function return error code
void cudaChkErr( cudaError_t code )
{
if ( code != cudaSuccess )
{
fprintf( stderr, "CUDA ERROR: %s\n", cudaGetErrorString( code ) );
exit( EXIT_FAILURE );
}
}
//----------------------------------------------------------------------------
// Fill matrix with reasonable values
void initializeMatrix( FLOAT* a, int m, int n, FLOAT sf = 1.0 )
{
for ( int i = 0; i < m; i++ )
{
for ( int j = 0; j < n; j++ )
{
a[IDX(i,j,n)] = sf * (-1.0 * i + j);
}
}
}
//----------------------------------------------------------------------------
// Display matrix contents
void dumpMatrix( FLOAT* a, int m, int n )
{
for ( int i = 0; i < m; i++ )
{
printf( "[" );
for ( int j = 0; j < n; j++ )
{
printf( " %8.2f", a[IDX(i,j,n)] );
}
printf( "]\n" );
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
int main( int argc, char* argv[] )
{
double t0, t1; // timing variables
// Read matrix dimension from command line
int n = 4;
if ( argc > 1 ) n = atoi( argv[1] );
if ( n <= 0 ) n = 4; // safety check
printf( "matrix-matrix product with %dx%d matrices.\n", n, n );
printf( "BlockDim = %d\n", BlockDim );
// Declare and allocate memory for matrices
FLOAT* a = new FLOAT [n * n];
FLOAT* b = new FLOAT [n * n];
FLOAT* c = new FLOAT [n * n]; // C = A * B
// Initialize and display matrices (if small enough)
initializeMatrix( a, n, n, 1.0 );
initializeMatrix( b, n, n, 2.0 );
if ( n <= MaxSizeToDisplay )
{
printf( "A =\n" );
dumpMatrix( a, n, n );
printf( "\nB =\n" );
dumpMatrix( b, n, n );
}
// Declare and allocate memory for matrices on device
size_t matrixSize = n * n * sizeof( FLOAT );
FLOAT* a_d; // device memory for first factor
FLOAT* b_d; // device memory for second factor
FLOAT* c_d; // device memory for product
cudaChkErr( cudaMalloc( &a_d, matrixSize ) );
cudaChkErr( cudaMalloc( &b_d, matrixSize ) );
cudaChkErr( cudaMalloc( &c_d, matrixSize ) );
// Initialize matrices on device
t0 = wtime();
cudaChkErr( cudaMemcpy( a_d, a, matrixSize, cudaMemcpyHostToDevice ) );
cudaChkErr( cudaMemcpy( b_d, b, matrixSize, cudaMemcpyHostToDevice ) );
t1 = wtime();
double data_transfer_time = t1 - t0;
// Prepare for kernel launches: use 2D grid
dim3 blockDim( BlockDim, BlockDim );
dim3 gridDim( ( n + blockDim.x - 1 ) / blockDim.x,
( n + blockDim.y - 1 ) / blockDim.y );
// Compute product using global-memory-only kernel
t0 = wtime();
matmulGlobal<<<gridDim, blockDim>>>( c_d, a_d, b_d, n );
cudaChkErr( cudaDeviceSynchronize() ); // wait for kernel to finish
cudaChkErr( cudaGetLastError() ); // check for any errors in kernel
t1 = wtime();
double global_kernel_time = t1 - t0;
// Copy result from device to host
t0 = wtime();
cudaChkErr( cudaMemcpy( c, c_d, matrixSize, cudaMemcpyDeviceToHost ) );
t1 = wtime();
data_transfer_time += ( t1 - t0 );
if ( n <= MaxSizeToDisplay )
{
printf( "\n(Global Memory Only) A*B =\n" );
dumpMatrix( c, n, n );
}
// Compute product using shared-memory kernel
t0 = wtime();
matmulShared<<<gridDim, blockDim>>>( c_d, a_d, b_d, n );
cudaChkErr( cudaDeviceSynchronize() ); // wait for kernel to finish
cudaChkErr( cudaGetLastError() ); // check for any errors in kernel
t1 = wtime();
double shared_kernel_time = t1 - t0;
// Copy result from device to host
cudaChkErr( cudaMemcpy( c, c_d, matrixSize, cudaMemcpyDeviceToHost ) );
if ( n <= MaxSizeToDisplay )
{
printf( "\n(with shared memory) A*B =\n" );
dumpMatrix( c, n, n );
}
// Report times and speedup
printf( "Data transfer time = %f sec\n", data_transfer_time );
printf( "Global kernel time = %f sec\n", global_kernel_time );
printf( "Shared kernel time = %f sec\n", shared_kernel_time );
printf( "Speedup = %6.2f\n", global_kernel_time / shared_kernel_time );
// All done; "let my people go!"
cudaChkErr( cudaFree( a_d ) );
cudaChkErr( cudaFree( b_d ) );
cudaChkErr( cudaFree( c_d ) );
delete [] a;
delete [] b;
delete [] c;
return 0;
}
|
0df34472e32d1527ff1fefdfe96236d4f049f31b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/layers/roi_pooling_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 4;
int roi_start_w = round(bottom_rois[0] * spatial_scale);
int roi_start_h = round(bottom_rois[1] * spatial_scale);
int roi_end_w = round(bottom_rois[2] * spatial_scale);
int roi_end_h = round(bottom_rois[3] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
const Dtype* offset_bottom_rois = bottom_rois + n * 4;
int roi_start_w = round(offset_bottom_rois[0] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[1] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[3] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
| 0df34472e32d1527ff1fefdfe96236d4f049f31b.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/layers/roi_pooling_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 4;
int roi_start_w = round(bottom_rois[0] * spatial_scale);
int roi_start_h = round(bottom_rois[1] * spatial_scale);
int roi_end_w = round(bottom_rois[2] * spatial_scale);
int roi_end_h = round(bottom_rois[3] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
const Dtype* offset_bottom_rois = bottom_rois + n * 4;
int roi_start_w = round(offset_bottom_rois[0] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[1] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[3] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
fb143eccc6f2ed01c90b2cf8c2a4717095bd8ef6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <assert.h>
#include <type_traits>
#include "instanceNormFwd.h"
#include "instanceNormCommon.h"
namespace instance_norm_impl
{
static inline int div_up(int m, int n) {
return (m + n - 1) / n;
}
using kernel_params_32 = Instance_norm_kernel_params<uint16_t, uint16_t, uint16_t, 512, 8, 32>;
using kernel_params_64 = Instance_norm_kernel_params<uint16_t, uint16_t, uint16_t, 512, 16, 64>;
using kernel_params_32_int8 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32>;
using kernel_params_32_int8_sm_700 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 700>;
using kernel_params_32_int8_sm_720 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 720>;
using kernel_params_32_int8_sm_750 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 750>;
using kernel_params_32_int8_sm_800 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 800>;
using kernel_params_32_int8_sm_860 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 860>;
// debug :
//using kernel_params_32_int8 = Instance_norm_kernel_params<int8_t, int8_t, 256, 8, 32>;
using kernel_params_32_fp16_int8 = Instance_norm_kernel_params<uint16_t, int8_t, float, 512, 8, 32>;
//using kernel_params_32_int8 = Instance_norm_kernel_params<int8_t, int8_t, 512, 4, 16>;
template<
typename Storage,
typename Input_Data_Type,
typename Output_Data_Type,
int THREADS_PER_CTA,
int THREADS_PER_PIXEL,
int PIXELS_PER_THREAD_IN_REGISTERS,
int PIXELS_PER_THREAD_IN_SMEM,
int ELEMENTS_PER_LDG,
int USE_ONLINE_APPROACH,
int OUTER_LOOPS_,
int DESIRED_OCCUPANCY
>
__global__ __launch_bounds__(THREADS_PER_CTA, DESIRED_OCCUPANCY)
void instance_norm_fwd(InstanceNormFwdParams params) {
// Single pass numerically stable algorithm, see:
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
//
// n = 0, mean = 0.0, M2 = 0.0
//
// for x in data:
// n += 1
// delta = x - mean
// mean += delta/n
// delta2 = x - mean
// M2 += delta*delta2
//
// if n < 2:
// return float('nan')
// else:
// return M2 / (n - 1)
const bool IS_INPUT_INT8 = std::is_same<Input_Data_Type, int8_t>::value;
const bool IS_OUTPUT_INT8 = std::is_same<Output_Data_Type, int8_t>::value;
// The number of pixels loaded in a single LDG.
const int PIXELS_PER_LDG = THREADS_PER_CTA / THREADS_PER_PIXEL;
// The number of pixels computed per CTA stored in registers.
const int PIXELS_PER_CTA_IN_REGISTERS = PIXELS_PER_THREAD_IN_REGISTERS * PIXELS_PER_LDG;
// The number of pixels computed per CTA stored in SMEM.
const int PIXELS_PER_CTA_IN_SMEM = PIXELS_PER_THREAD_IN_SMEM*PIXELS_PER_LDG;
// The number of C elements per CTA.
const int C_ELEMENTS_PER_CTA = THREADS_PER_PIXEL*ELEMENTS_PER_LDG;
// Shared memory to do CTA-wide parallel sums.
__shared__ float smem[ELEMENTS_PER_LDG*THREADS_PER_CTA];
// The position in the NHW dimension where the CTA starts.
int cta_nhw_regs = blockIdx.x * PIXELS_PER_CTA_IN_REGISTERS;
// The position in the NHW dimension where the CTA starts for the portion in SMEM.
int cta_nhw_smem = blockIdx.x * PIXELS_PER_CTA_IN_SMEM;
// Compute the NHW coordinate of the thread in the CTA.
const int thread_in_cta_nhw = threadIdx.x / THREADS_PER_PIXEL;
for (int nc_blk_index = blockIdx.y; nc_blk_index < params.c_blks * params.n; nc_blk_index += gridDim.y) {
int n_blk_index = nc_blk_index / params.c_blks;
int c_blk_index = nc_blk_index % params.c_blks;
// The position in the C dimension where the CTA starts.
const int cta_c = c_blk_index * C_ELEMENTS_PER_CTA;
// Compute the C coordinate of the thread in the CTA.
const int thread_in_cta_c = threadIdx.x % THREADS_PER_PIXEL;
// Compute the C coordinate of the thread.
const int thread_c = cta_c + thread_in_cta_c*ELEMENTS_PER_LDG;
// Is the thread working on a valid C dimension?
const int is_valid_c = thread_c < params.c;
// The adapter for the storage.
typedef PackedStorage<Storage, ELEMENTS_PER_LDG> PackedStorage_;
// The data type for packed storage in SMEM.
typedef typename PackedStorage_::Type PackedStorageType;
// The number of elements in the packed storage.
const int PACKED_ELEMENTS_PER_LDG = PackedStorage_::PACKED_ELEMENTS_PER_LDG;
// Registers to keep the data live for the persistent approach.
PackedStorageType x_storage[PIXELS_PER_THREAD_IN_REGISTERS][PACKED_ELEMENTS_PER_LDG];
// Shared memory buffer to store the extra pixels.
extern __shared__ char smem_storage_[];
PackedStorageType * smem_storage = reinterpret_cast<PackedStorageType *>(smem_storage_);
float int8_in_scale = params.in_scale;
float int8_out_scale = params.out_scale;
// Register to store the number of elements read so far.
float count = 0.f, mean[ELEMENTS_PER_LDG], m2[ELEMENTS_PER_LDG];
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
mean[i] = 0.f;
m2 [i] = 0.f;
}
// The number of elements loaded by this CTA.
int cta_count = 0;
int global_batch_offset = n_blk_index * params.nhw * params.c;
// int8 relevant
// int8 output implies we have NC/32DHW32 input for bath fp16 and int8
int global_thread_c_input = ( IS_INPUT_INT8 || IS_OUTPUT_INT8 )? thread_in_cta_c*ELEMENTS_PER_LDG
+ (cta_c % 32) // handle C_ELEMENTS_PER_CTA == 16 case
+ (cta_c / 32) * 32 * params.nhw : thread_c;
int stride_c_input = ( IS_INPUT_INT8 || IS_OUTPUT_INT8 )? 32 : params.c;
int global_thread_c_output = ( IS_OUTPUT_INT8 )? thread_in_cta_c*ELEMENTS_PER_LDG
+ (cta_c % 32) // handle C_ELEMENTS_PER_CTA == 16 case
+ (cta_c / 32) * 32 * params.nhw : thread_c;
int stride_c_output = ( IS_OUTPUT_INT8 )? 32 : params.c;
// The base pointer to load from.
const Input_Data_Type *gmem_src = &reinterpret_cast<Input_Data_Type *>(params.gmem_src)[global_thread_c_input + global_batch_offset];
// Load the batch of elements. Compute the mean/var across those elements.
const int pixels_per_iteration = PIXELS_PER_CTA_IN_REGISTERS*gridDim.x;
// outer loops
int OUTER_LOOPS = OUTER_LOOPS_ == 1? 1 : params.outer_loops;
#pragma unroll 1
for( int loop_i = 0; loop_i < OUTER_LOOPS; ++loop_i ) {
// The nhw position.
int nhw_regs = cta_nhw_regs + loop_i*pixels_per_iteration;
cta_count += max(min(nhw_regs + PIXELS_PER_CTA_IN_REGISTERS, params.nhw) - max(nhw_regs, 0), 0);
// Load the data and compute the local mean/sum and the variance.
if( USE_ONLINE_APPROACH ) {
// Read the elements from memory.
float is_valid[PIXELS_PER_THREAD_IN_REGISTERS];
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
const int idx = nhw_regs + thread_in_cta_nhw + i*PIXELS_PER_LDG;
zero(x_storage[i]);
is_valid[i] = 0.f;
if( idx < params.nhw && is_valid_c ) {
ldg_stream(x_storage[i], &gmem_src[idx*stride_c_input]);
is_valid[i] = 1.f;
}
}
// Do the math.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
// Convert to float.
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage[i], int8_in_scale);
// Update the count.
count += is_valid[i];
// Invert the count.
float inv_count = is_valid[i] ? 1.f / count : 0.f;
// Update the mean and m2 using deltas.
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
float delta0 = x_math[j] - mean[j];
mean[j] += delta0 * inv_count;
float delta1 = x_math[j] - mean[j];
m2[j] += delta0 * delta1 * is_valid[i];
}
}
} else {
// Read the elements from memory.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
const int idx = nhw_regs + thread_in_cta_nhw + i*PIXELS_PER_LDG;
zero(x_storage[i]);
if( idx < params.nhw && is_valid_c ) {
ldg_stream(x_storage[i], &gmem_src[idx * stride_c_input]);
count += 1.f;
}
}
// Sum the elements in registers.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
// Convert to float.
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage[i], int8_in_scale);
// Update the mean and m2 using deltas.
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
mean[j] += x_math[j];
}
}
// Compute the mean.
float inv_count = 1.f / count;
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
mean[j] *= inv_count;
}
// Compute the variance.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
// Convert to float.
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage[i], int8_in_scale);
// Is it a valid pixel?
float is_valid = i < (int) count ? 1.f : 0.f;
// Update the mean and m2 using deltas.
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
m2[j] += (x_math[j] - mean[j]) * (x_math[j] - mean[j]) * is_valid;
}
}
}
}
// The elements to load and store in SMEM.
int smem_nhw = OUTER_LOOPS*pixels_per_iteration + cta_nhw_smem;
// Load elements from SMEM, update the CTA count.
int pixels_in_smem = min(smem_nhw + PIXELS_PER_CTA_IN_SMEM, params.nhw) - max(smem_nhw, 0);
if( pixels_in_smem > 0 ) {
cta_count += pixels_in_smem;
for( int i = 0; i < PIXELS_PER_THREAD_IN_SMEM; ++i ) {
const int idx = smem_nhw + thread_in_cta_nhw + i*PIXELS_PER_LDG;
float is_pixel_valid = (idx < params.nhw && is_valid_c) ? 1.f : 0.f;
PackedStorageType x_storage_local[PACKED_ELEMENTS_PER_LDG];
ldg_stream(x_storage_local, &gmem_src[(is_pixel_valid ? idx : 0) * stride_c_input]);
// The offset to store in SMEM.
const int offset = i*THREADS_PER_CTA*PACKED_ELEMENTS_PER_LDG;
// Store in SMEM.
write_to_smem(&smem_storage[offset], threadIdx.x, x_storage_local);
// Update the count.
count += is_pixel_valid;
// Invert the count.
float inv_count = is_pixel_valid ? 1.f / count : 0.f;
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage_local, int8_in_scale);
// Update the mean and m2 using deltas.
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
float delta0 = x_math[j] - mean[j];
mean[j] += delta0 * inv_count;
float delta1 = x_math[j] - mean[j];
m2[j] += delta0 * delta1 * is_pixel_valid;
}
}
}
// We scale the mean by the number of elements. It brings more stability.
float m1[ELEMENTS_PER_LDG];
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m1[i] = mean[i] * count;
}
// Run the parallel sum accross the CTA to get the local sum.
ParallelSums<THREADS_PER_PIXEL, ELEMENTS_PER_LDG>::dispatch<THREADS_PER_CTA>(
smem, m1, thread_in_cta_nhw);
__syncthreads();
// The values in shared memory correspond to the CTA-wide sums.
read_from_smem(m1, smem, thread_in_cta_c);
__syncthreads();
// Adjust the variance.
float inv_cta_count = 1.f / (float) cta_count;
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
float mean_diff = m1[i]*inv_cta_count - mean[i];
m2[i] = m2[i] + mean_diff * mean_diff * count;
}
// Run the parallel sum accross the CTA to get the local adjusted variance.
ParallelSums<THREADS_PER_PIXEL, ELEMENTS_PER_LDG>::dispatch<THREADS_PER_CTA>(
smem, m2, thread_in_cta_nhw);
// The workspace in global memory is distributed across the different CTA.
int gmem_sums_offset = nc_blk_index*gridDim.x*C_ELEMENTS_PER_CTA*2;
// Write the data for the CTA to global memory.
GMEM_SUMS_TYPE *gmem_sums = ¶ms.gmem_sums[gmem_sums_offset];
if( threadIdx.x < THREADS_PER_PIXEL ) {
const int idx = blockIdx.x*THREADS_PER_PIXEL + threadIdx.x;
write_to_gmem(&gmem_sums[ 0], idx, m1);
write_to_gmem(&gmem_sums[C_ELEMENTS_PER_CTA*gridDim.x], idx, m2);
}
// The memory location to store the number of pixels per CTA.
int *gmem_counts = ¶ms.gmem_counts[nc_blk_index*gridDim.x];
if( threadIdx.x == 0 ) {
//gmem_counts[0] = cta_count;
gmem_counts[blockIdx.x] = cta_count;
}
// Read the bias and scale.
float bias[ELEMENTS_PER_LDG];
float scale[ELEMENTS_PER_LDG];
read_from_gmem(bias, ¶ms.gmem_bias[cta_c], thread_in_cta_c);
read_from_gmem(scale, ¶ms.gmem_scale[cta_c], thread_in_cta_c);
// The counters to count how many CTAs have retired at this point. One per chunk of C.
int *gmem_retired_ctas = ¶ms.gmem_retired_ctas[nc_blk_index];
// Make sure the threads are done and reconverged.
__syncthreads();
// Register the CTA.
int expected_count = gridDim.x;
if( threadIdx.x == 0 ) {
// Issue the membar.
__threadfence();
// Notify that the CTA is done.
int val_to_add = 1;
if (blockIdx.x == 0) {
val_to_add = -(expected_count - 1);
}
atomicAdd(gmem_retired_ctas, val_to_add);
}
// Are all CTAs done?
if (threadIdx.x == 0) {
int retired_ctas = -1;
do {
__threadfence();
asm volatile("ld.global.cg.b32 %0, [%1];" : "=r"(retired_ctas) : "l"(gmem_retired_ctas));
} while (retired_ctas != 0);
}
__threadfence();
__syncthreads();
// Reset the mean to compute the global mean.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m1[i] = 0.f;
}
// Build the global mean.
#pragma unroll 1
for( int idx = threadIdx.x; idx < THREADS_PER_PIXEL*gridDim.x; idx += THREADS_PER_CTA ) {
float tmp[ELEMENTS_PER_LDG];
read_from_gmem(tmp, gmem_sums, idx);
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m1[i] += tmp[i];
}
}
// Run the parallel sum accross the CTA to get the local sum.
ParallelSums<THREADS_PER_PIXEL, ELEMENTS_PER_LDG>::dispatch<THREADS_PER_CTA>(
smem, m1, thread_in_cta_nhw);
__syncthreads();
// The values in shared memory correspond to the CTA-wide sums.
read_from_smem(m1, smem, thread_in_cta_c);
__syncthreads();
// Normalize the mean.
float inv_count = 1.f / (float) params.nhw;
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m1[i] = m1[i] * inv_count;
}
// Reset the variance.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m2[i] = 0.f;
}
// Build the global variance.
#pragma unroll 1
for( int idx = threadIdx.x; idx < THREADS_PER_PIXEL*gridDim.x; idx += THREADS_PER_CTA ) {
// Read the means computed by different CTAs (again). Reuse tmp if we have 1 iteration.
float tmp_mean[ELEMENTS_PER_LDG], tmp_var[ELEMENTS_PER_LDG];
read_from_gmem(tmp_mean, &gmem_sums[ 0], idx);
read_from_gmem(tmp_var, &gmem_sums[C_ELEMENTS_PER_CTA*gridDim.x], idx);
// Read the number of pixels visited by a given CTA.
cta_count = __ldg(&gmem_counts[idx / THREADS_PER_PIXEL]);
// Compute the diff to update the variance.
float mean_diff[ELEMENTS_PER_LDG], inv_cta_count = 1.f / (float) cta_count;
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
mean_diff[i] = m1[i] - tmp_mean[i]*inv_cta_count;
}
// Update the variance.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m2[i] += tmp_var[i] + mean_diff[i]*mean_diff[i]*(float) cta_count;
}
}
// Run the parallel sum accross the CTA to get the local sum.
ParallelSums<THREADS_PER_PIXEL, ELEMENTS_PER_LDG>::dispatch<THREADS_PER_CTA>(
smem, m2, thread_in_cta_nhw);
__syncthreads();
read_from_smem(m2, smem, thread_in_cta_c);
__syncthreads();
// Finalize the stddev.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m2[i] *= inv_count;
}
// store the saved mean/var
float svarinv[ELEMENTS_PER_LDG];
bool is_valid_for_saving = is_valid_c && blockIdx.x == 0 && thread_in_cta_nhw == 0;
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
svarinv[i] = rsqrtf(m2[i] + params.var_eps);
}
#if !DISABLE_MEAN_VAR_OUTPUT
int global_stats_offset = n_blk_index * params.c;
if( is_valid_for_saving ) {
write_to_gmem(params.gmem_saved_mean + global_stats_offset, \
thread_c/ELEMENTS_PER_LDG, m1);
write_to_gmem(params.gmem_saved_var + global_stats_offset, \
thread_c/ELEMENTS_PER_LDG, svarinv);
}
// store the running mean/var
float rmean[ELEMENTS_PER_LDG];
float rvar[ELEMENTS_PER_LDG];
zero(rmean);
zero(rvar);
if( params.exp_avg_factor != 1.f && is_valid_for_saving ) {
read_from_gmem(rmean, params.gmem_running_mean + global_stats_offset, \
thread_c/ELEMENTS_PER_LDG);
read_from_gmem(rvar, params.gmem_running_var + global_stats_offset, \
thread_c/ELEMENTS_PER_LDG);
}
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
rmean[i] = (1.f - params.exp_avg_factor) * rmean[i] + \
params.exp_avg_factor * m1[i];
rvar[i] = (1.f - params.exp_avg_factor) * rvar[i] + \
params.exp_avg_factor * m2[i];
}
if( is_valid_for_saving ) {
write_to_gmem(params.gmem_running_mean + global_stats_offset, thread_c/ELEMENTS_PER_LDG, rmean);
write_to_gmem(params.gmem_running_var + global_stats_offset, thread_c/ELEMENTS_PER_LDG, rvar);
}
#endif
// Update the scale with the stddev and eps.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
scale[i] *= svarinv[i];
}
// The base pointer to write to.
Output_Data_Type *const gmem_dst = &reinterpret_cast<Output_Data_Type *>(params.gmem_dst)[global_thread_c_output + global_batch_offset];
// Store the elements in registers.
#pragma unroll 1
for( int loop_i = OUTER_LOOPS-1; loop_i >= 0; --loop_i ) {
// The value for nhw.
int out_nhw = cta_nhw_regs + loop_i*pixels_per_iteration;
// Normalize the elements and write to memory.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
// Convert to float.
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage[i], int8_in_scale);
// Normalize and apply activation function
normalize(x_math, bias, scale, m1);
if( params.use_relu ) {
relu_activation(x_math, params.relu_alpha);
}
// Write back.
const int idx = out_nhw + thread_in_cta_nhw + i*PIXELS_PER_LDG;
if( (unsigned) idx < params.nhw && is_valid_c ) {
stg_stream(&gmem_dst[idx*stride_c_output], x_math, int8_out_scale);
}
}
// The next value of nhw.
out_nhw -= pixels_per_iteration;
// Read the next elements from memory.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
const int idx = out_nhw + thread_in_cta_nhw + i*PIXELS_PER_LDG;
if( (unsigned) idx < params.nhw && is_valid_c ) {
ldg_stream(x_storage[i], &gmem_src[idx*stride_c_output]);
}
}
}
// Normalize the elements from SMEM and write them out.
if( pixels_in_smem > 0 ) {
for( int i = 0; i < PIXELS_PER_THREAD_IN_SMEM; ++i ) {
// Read from SMEM.
const int offset = i*THREADS_PER_CTA*PACKED_ELEMENTS_PER_LDG;
float x_math[ELEMENTS_PER_LDG];
PackedStorageType x_storage_local[PACKED_ELEMENTS_PER_LDG];
read_from_smem(x_storage_local, &smem_storage[offset], threadIdx.x);
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage_local, int8_in_scale);
// Normalize and apply activation function
normalize(x_math, bias, scale, m1);
if( params.use_relu ) {
relu_activation(x_math, params.relu_alpha);
}
// Write back.
const int idx = smem_nhw + thread_in_cta_nhw + i*PIXELS_PER_LDG;
if( (unsigned) idx < params.nhw && is_valid_c ) {
stg_stream(&gmem_dst[idx*stride_c_output], x_math, int8_out_scale);
}
}
}
__syncthreads();
} // blockIdx.y loop
}
template <typename Kernel_params>
dim3 estimate_in_grid_dim(const InstanceNormFwdParams& params)
{
dim3 grid_dim;
grid_dim.x = div_up(params.nhw, Kernel_params::MIN_PIXELS_PER_CTA); // PIXELS_PER_CTA
grid_dim.y = div_up(params.c * params.n, Kernel_params::C_ELEMENTS_PER_CTA);
grid_dim.z = 1; //params.n;
return grid_dim;
}
template <typename Kernel_params>
void instance_norm_buffer_sizes(const InstanceNormFwdParams& params,
size_t &size_sums, size_t &size_counts, size_t &size_retired_ctas)
{
dim3 grid_dim = estimate_in_grid_dim<Kernel_params>(params);
size_sums = grid_dim.z*grid_dim.y*grid_dim.x*Kernel_params::THREADS_PER_PIXEL*Kernel_params::ELEMENTS_PER_LDG*2*sizeof(GMEM_SUMS_TYPE);
size_counts = grid_dim.z*grid_dim.y*grid_dim.x*sizeof(int);
size_retired_ctas = grid_dim.z*grid_dim.y*sizeof(int);
size_sums = div_up(size_sums, 256) * 256;
size_counts = div_up(size_counts, 256) * 256;
size_retired_ctas = div_up(size_retired_ctas, 256) * 256;
}
template <typename Kernel_params>
int instance_norm_fwd_launch(const InstanceNormFwdContext& context, InstanceNormFwdParams& params, hipStream_t stream)
{
size_t smem_size = Kernel_params::PIXELS_PER_THREAD_IN_SMEM *
Kernel_params::THREADS_PER_CTA *
Kernel_params::ELEMENTS_PER_LDG * sizeof(typename Kernel_params::StorageType);
dim3 grid_dim = estimate_in_grid_dim<Kernel_params>(params);
params.c_blks = div_up(params.c, Kernel_params::C_ELEMENTS_PER_CTA);
size_t size_retired_ctas = grid_dim.z*grid_dim.y*sizeof(int);
#define KERNEL_RUN(OUTER_LOOPS, DESIRED_OCCUPANCY) \
{ \
CHECK_CUDA(hipMemsetAsync(params.gmem_retired_ctas, 0, size_retired_ctas, stream)); \
if( smem_size > 0 ) \
CHECK_CUDA(hipFuncSetAttribute( \
instance_norm_fwd< \
typename Kernel_params::StorageType, \
typename Kernel_params::Input_Data_Type, \
typename Kernel_params::Output_Data_Type, \
Kernel_params::THREADS_PER_CTA, \
Kernel_params::THREADS_PER_PIXEL, \
Kernel_params::PIXELS_PER_THREAD_IN_REGISTERS, \
Kernel_params::PIXELS_PER_THREAD_IN_SMEM, \
Kernel_params::ELEMENTS_PER_LDG, \
Kernel_params::USE_ONLINE_APPROACH, \
OUTER_LOOPS, \
DESIRED_OCCUPANCY>, \
hipFuncAttributeMaxDynamicSharedMemorySize, \
smem_size)); \
hipLaunchKernelGGL(( instance_norm_fwd< \
typename Kernel_params::StorageType, \
typename Kernel_params::Input_Data_Type, \
typename Kernel_params::Output_Data_Type, \
Kernel_params::THREADS_PER_CTA, \
Kernel_params::THREADS_PER_PIXEL, \
Kernel_params::PIXELS_PER_THREAD_IN_REGISTERS, \
Kernel_params::PIXELS_PER_THREAD_IN_SMEM, \
Kernel_params::ELEMENTS_PER_LDG, \
Kernel_params::USE_ONLINE_APPROACH, \
OUTER_LOOPS, \
DESIRED_OCCUPANCY>), dim3(grid_dim),dim3(Kernel_params::THREADS_PER_CTA), smem_size, stream, params); }
size_t total_smem_bytes = smem_size + Kernel_params::ELEMENTS_PER_LDG * Kernel_params::THREADS_PER_CTA * sizeof(float);
int smem_driven_fwd_occupancy = min(int(context.sm_shared_size) / (int)total_smem_bytes, (int)2);
int max_grid = context.sm_count * smem_driven_fwd_occupancy;
if ((context.sm_version >= 700 ) && (context.sm_version < 800))
{
max_grid = max_grid - 4;
}
if (max_grid / int(grid_dim.x) > 1) {
grid_dim.y = max_grid / int(grid_dim.x);
grid_dim.y = int(grid_dim.y) > params.c_blks * params.n ? params.c_blks * params.n : int(grid_dim.y);
} else {
grid_dim.y = 1;
}
int loop = 1;
if( grid_dim.x <= max_grid ) {
if (smem_driven_fwd_occupancy >= 2) {
KERNEL_RUN(1, 2);
} else {
KERNEL_RUN(1, 1);
}
} else {
grid_dim.x = max_grid;
int nhw_in_regs = params.nhw - Kernel_params::PIXELS_PER_THREAD_IN_SMEM*Kernel_params::PIXELS_PER_LDG*grid_dim.x;
int pixels_per_iteration = Kernel_params::PIXELS_PER_THREAD_IN_REGISTERS*Kernel_params::PIXELS_PER_LDG*grid_dim.x;
nhw_in_regs = (nhw_in_regs <= 0)? pixels_per_iteration : nhw_in_regs;
if (nhw_in_regs < 0)
{
nhw_in_regs = pixels_per_iteration;
// make PIXELS_PER_THREAD_IN_SMEM <= PIXELS_PER_THREAD_IN_REGISTERS if the assert fails
assert(pixels_per_iteration >= params.nhw);
}
loop = div_up(nhw_in_regs, pixels_per_iteration);
params.outer_loops = loop;
assert(loop >= 1);
if( loop == 1 ) {
if (smem_driven_fwd_occupancy >= 2) {
KERNEL_RUN(1, 2);
} else {
KERNEL_RUN(1, 1);
}
} else {
if (smem_driven_fwd_occupancy >= 2) {
KERNEL_RUN(0, 2);
} else {
KERNEL_RUN(0, 1);
}
}
}
return loop;
}
static int c_cond_g = 32;
void instance_norm_buffer_sizes_dispatch(const InstanceNormFwdContext& context, const InstanceNormFwdParams& params,
size_t &size_sums, size_t &size_counts, size_t &size_retired_ctas,
int input_data_type, int output_data_type)
{
if (input_data_type == 2 && output_data_type == 2) {
switch (context.sm_version)
{
case 700: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_700>(params, size_sums, size_counts, size_retired_ctas); break;
case 720: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_720>(params, size_sums, size_counts, size_retired_ctas); break;
case 750: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_750>(params, size_sums, size_counts, size_retired_ctas); break;
case 800: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_800>(params, size_sums, size_counts, size_retired_ctas); break;
case 860: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_860>(params, size_sums, size_counts, size_retired_ctas); break;
default: return instance_norm_buffer_sizes<kernel_params_32_int8>(params, size_sums, size_counts, size_retired_ctas); break;
}
return instance_norm_buffer_sizes<kernel_params_32_int8>(params, size_sums, size_counts, size_retired_ctas);
} else if (input_data_type == 1 && output_data_type == 2) {
return instance_norm_buffer_sizes<kernel_params_32_fp16_int8>(params, size_sums, size_counts, size_retired_ctas);
} else if (input_data_type == 1 && output_data_type == 1) {
if (params.c <= c_cond_g) {
return instance_norm_buffer_sizes<kernel_params_32>(params, size_sums, size_counts, size_retired_ctas);
}
else {
return instance_norm_buffer_sizes<kernel_params_64>(params, size_sums, size_counts, size_retired_ctas);
}
} else {
fprintf(stderr, "Unsupported format combination by the instance norm kernel\n");
assert(0);
}
}
int instance_norm_fwd_dispatch(const InstanceNormFwdContext& context, InstanceNormFwdParams& params, hipStream_t stream,
int input_data_type, int output_data_type)
{
assert(context.sm_version >= 600);
if (input_data_type == 2 && output_data_type == 2) {
switch (context.sm_version)
{
case 700: return instance_norm_fwd_launch<kernel_params_32_int8_sm_700>(context, params, stream); break;
case 720: return instance_norm_fwd_launch<kernel_params_32_int8_sm_720>(context, params, stream); break;
case 750: return instance_norm_fwd_launch<kernel_params_32_int8_sm_750>(context, params, stream); break;
case 800: return instance_norm_fwd_launch<kernel_params_32_int8_sm_800>(context, params, stream); break;
case 860: return instance_norm_fwd_launch<kernel_params_32_int8_sm_860>(context, params, stream); break;
default: return instance_norm_fwd_launch<kernel_params_32_int8>(context, params, stream); break;
}
} else if (input_data_type == 1 && output_data_type == 2) {
return instance_norm_fwd_launch<kernel_params_32_fp16_int8>(context, params, stream);
} else if (input_data_type == 1 && output_data_type == 1) {
if (params.c <= c_cond_g) {
return instance_norm_fwd_launch<kernel_params_32>(context, params, stream);
}
else {
return instance_norm_fwd_launch<kernel_params_64>(context, params, stream);
}
} else {
fprintf(stderr, "Unsupported format combination by the instance norm kernel\n");
assert(0);
}
return 0;
}
} // namespace instance_norm_impl | fb143eccc6f2ed01c90b2cf8c2a4717095bd8ef6.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <assert.h>
#include <type_traits>
#include "instanceNormFwd.h"
#include "instanceNormCommon.h"
namespace instance_norm_impl
{
static inline int div_up(int m, int n) {
return (m + n - 1) / n;
}
using kernel_params_32 = Instance_norm_kernel_params<uint16_t, uint16_t, uint16_t, 512, 8, 32>;
using kernel_params_64 = Instance_norm_kernel_params<uint16_t, uint16_t, uint16_t, 512, 16, 64>;
using kernel_params_32_int8 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32>;
using kernel_params_32_int8_sm_700 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 700>;
using kernel_params_32_int8_sm_720 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 720>;
using kernel_params_32_int8_sm_750 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 750>;
using kernel_params_32_int8_sm_800 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 800>;
using kernel_params_32_int8_sm_860 = Instance_norm_kernel_params<int8_t, int8_t, int8_t, 512, 8, 32, 860>;
// debug :
//using kernel_params_32_int8 = Instance_norm_kernel_params<int8_t, int8_t, 256, 8, 32>;
using kernel_params_32_fp16_int8 = Instance_norm_kernel_params<uint16_t, int8_t, float, 512, 8, 32>;
//using kernel_params_32_int8 = Instance_norm_kernel_params<int8_t, int8_t, 512, 4, 16>;
template<
typename Storage,
typename Input_Data_Type,
typename Output_Data_Type,
int THREADS_PER_CTA,
int THREADS_PER_PIXEL,
int PIXELS_PER_THREAD_IN_REGISTERS,
int PIXELS_PER_THREAD_IN_SMEM,
int ELEMENTS_PER_LDG,
int USE_ONLINE_APPROACH,
int OUTER_LOOPS_,
int DESIRED_OCCUPANCY
>
__global__ __launch_bounds__(THREADS_PER_CTA, DESIRED_OCCUPANCY)
void instance_norm_fwd(InstanceNormFwdParams params) {
// Single pass numerically stable algorithm, see:
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
//
// n = 0, mean = 0.0, M2 = 0.0
//
// for x in data:
// n += 1
// delta = x - mean
// mean += delta/n
// delta2 = x - mean
// M2 += delta*delta2
//
// if n < 2:
// return float('nan')
// else:
// return M2 / (n - 1)
const bool IS_INPUT_INT8 = std::is_same<Input_Data_Type, int8_t>::value;
const bool IS_OUTPUT_INT8 = std::is_same<Output_Data_Type, int8_t>::value;
// The number of pixels loaded in a single LDG.
const int PIXELS_PER_LDG = THREADS_PER_CTA / THREADS_PER_PIXEL;
// The number of pixels computed per CTA stored in registers.
const int PIXELS_PER_CTA_IN_REGISTERS = PIXELS_PER_THREAD_IN_REGISTERS * PIXELS_PER_LDG;
// The number of pixels computed per CTA stored in SMEM.
const int PIXELS_PER_CTA_IN_SMEM = PIXELS_PER_THREAD_IN_SMEM*PIXELS_PER_LDG;
// The number of C elements per CTA.
const int C_ELEMENTS_PER_CTA = THREADS_PER_PIXEL*ELEMENTS_PER_LDG;
// Shared memory to do CTA-wide parallel sums.
__shared__ float smem[ELEMENTS_PER_LDG*THREADS_PER_CTA];
// The position in the NHW dimension where the CTA starts.
int cta_nhw_regs = blockIdx.x * PIXELS_PER_CTA_IN_REGISTERS;
// The position in the NHW dimension where the CTA starts for the portion in SMEM.
int cta_nhw_smem = blockIdx.x * PIXELS_PER_CTA_IN_SMEM;
// Compute the NHW coordinate of the thread in the CTA.
const int thread_in_cta_nhw = threadIdx.x / THREADS_PER_PIXEL;
for (int nc_blk_index = blockIdx.y; nc_blk_index < params.c_blks * params.n; nc_blk_index += gridDim.y) {
int n_blk_index = nc_blk_index / params.c_blks;
int c_blk_index = nc_blk_index % params.c_blks;
// The position in the C dimension where the CTA starts.
const int cta_c = c_blk_index * C_ELEMENTS_PER_CTA;
// Compute the C coordinate of the thread in the CTA.
const int thread_in_cta_c = threadIdx.x % THREADS_PER_PIXEL;
// Compute the C coordinate of the thread.
const int thread_c = cta_c + thread_in_cta_c*ELEMENTS_PER_LDG;
// Is the thread working on a valid C dimension?
const int is_valid_c = thread_c < params.c;
// The adapter for the storage.
typedef PackedStorage<Storage, ELEMENTS_PER_LDG> PackedStorage_;
// The data type for packed storage in SMEM.
typedef typename PackedStorage_::Type PackedStorageType;
// The number of elements in the packed storage.
const int PACKED_ELEMENTS_PER_LDG = PackedStorage_::PACKED_ELEMENTS_PER_LDG;
// Registers to keep the data live for the persistent approach.
PackedStorageType x_storage[PIXELS_PER_THREAD_IN_REGISTERS][PACKED_ELEMENTS_PER_LDG];
// Shared memory buffer to store the extra pixels.
extern __shared__ char smem_storage_[];
PackedStorageType * smem_storage = reinterpret_cast<PackedStorageType *>(smem_storage_);
float int8_in_scale = params.in_scale;
float int8_out_scale = params.out_scale;
// Register to store the number of elements read so far.
float count = 0.f, mean[ELEMENTS_PER_LDG], m2[ELEMENTS_PER_LDG];
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
mean[i] = 0.f;
m2 [i] = 0.f;
}
// The number of elements loaded by this CTA.
int cta_count = 0;
int global_batch_offset = n_blk_index * params.nhw * params.c;
// int8 relevant
// int8 output implies we have NC/32DHW32 input for bath fp16 and int8
int global_thread_c_input = ( IS_INPUT_INT8 || IS_OUTPUT_INT8 )? thread_in_cta_c*ELEMENTS_PER_LDG
+ (cta_c % 32) // handle C_ELEMENTS_PER_CTA == 16 case
+ (cta_c / 32) * 32 * params.nhw : thread_c;
int stride_c_input = ( IS_INPUT_INT8 || IS_OUTPUT_INT8 )? 32 : params.c;
int global_thread_c_output = ( IS_OUTPUT_INT8 )? thread_in_cta_c*ELEMENTS_PER_LDG
+ (cta_c % 32) // handle C_ELEMENTS_PER_CTA == 16 case
+ (cta_c / 32) * 32 * params.nhw : thread_c;
int stride_c_output = ( IS_OUTPUT_INT8 )? 32 : params.c;
// The base pointer to load from.
const Input_Data_Type *gmem_src = &reinterpret_cast<Input_Data_Type *>(params.gmem_src)[global_thread_c_input + global_batch_offset];
// Load the batch of elements. Compute the mean/var across those elements.
const int pixels_per_iteration = PIXELS_PER_CTA_IN_REGISTERS*gridDim.x;
// outer loops
int OUTER_LOOPS = OUTER_LOOPS_ == 1? 1 : params.outer_loops;
#pragma unroll 1
for( int loop_i = 0; loop_i < OUTER_LOOPS; ++loop_i ) {
// The nhw position.
int nhw_regs = cta_nhw_regs + loop_i*pixels_per_iteration;
cta_count += max(min(nhw_regs + PIXELS_PER_CTA_IN_REGISTERS, params.nhw) - max(nhw_regs, 0), 0);
// Load the data and compute the local mean/sum and the variance.
if( USE_ONLINE_APPROACH ) {
// Read the elements from memory.
float is_valid[PIXELS_PER_THREAD_IN_REGISTERS];
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
const int idx = nhw_regs + thread_in_cta_nhw + i*PIXELS_PER_LDG;
zero(x_storage[i]);
is_valid[i] = 0.f;
if( idx < params.nhw && is_valid_c ) {
ldg_stream(x_storage[i], &gmem_src[idx*stride_c_input]);
is_valid[i] = 1.f;
}
}
// Do the math.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
// Convert to float.
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage[i], int8_in_scale);
// Update the count.
count += is_valid[i];
// Invert the count.
float inv_count = is_valid[i] ? 1.f / count : 0.f;
// Update the mean and m2 using deltas.
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
float delta0 = x_math[j] - mean[j];
mean[j] += delta0 * inv_count;
float delta1 = x_math[j] - mean[j];
m2[j] += delta0 * delta1 * is_valid[i];
}
}
} else {
// Read the elements from memory.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
const int idx = nhw_regs + thread_in_cta_nhw + i*PIXELS_PER_LDG;
zero(x_storage[i]);
if( idx < params.nhw && is_valid_c ) {
ldg_stream(x_storage[i], &gmem_src[idx * stride_c_input]);
count += 1.f;
}
}
// Sum the elements in registers.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
// Convert to float.
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage[i], int8_in_scale);
// Update the mean and m2 using deltas.
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
mean[j] += x_math[j];
}
}
// Compute the mean.
float inv_count = 1.f / count;
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
mean[j] *= inv_count;
}
// Compute the variance.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
// Convert to float.
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage[i], int8_in_scale);
// Is it a valid pixel?
float is_valid = i < (int) count ? 1.f : 0.f;
// Update the mean and m2 using deltas.
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
m2[j] += (x_math[j] - mean[j]) * (x_math[j] - mean[j]) * is_valid;
}
}
}
}
// The elements to load and store in SMEM.
int smem_nhw = OUTER_LOOPS*pixels_per_iteration + cta_nhw_smem;
// Load elements from SMEM, update the CTA count.
int pixels_in_smem = min(smem_nhw + PIXELS_PER_CTA_IN_SMEM, params.nhw) - max(smem_nhw, 0);
if( pixels_in_smem > 0 ) {
cta_count += pixels_in_smem;
for( int i = 0; i < PIXELS_PER_THREAD_IN_SMEM; ++i ) {
const int idx = smem_nhw + thread_in_cta_nhw + i*PIXELS_PER_LDG;
float is_pixel_valid = (idx < params.nhw && is_valid_c) ? 1.f : 0.f;
PackedStorageType x_storage_local[PACKED_ELEMENTS_PER_LDG];
ldg_stream(x_storage_local, &gmem_src[(is_pixel_valid ? idx : 0) * stride_c_input]);
// The offset to store in SMEM.
const int offset = i*THREADS_PER_CTA*PACKED_ELEMENTS_PER_LDG;
// Store in SMEM.
write_to_smem(&smem_storage[offset], threadIdx.x, x_storage_local);
// Update the count.
count += is_pixel_valid;
// Invert the count.
float inv_count = is_pixel_valid ? 1.f / count : 0.f;
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage_local, int8_in_scale);
// Update the mean and m2 using deltas.
#pragma unroll
for( int j = 0; j < ELEMENTS_PER_LDG; ++j ) {
float delta0 = x_math[j] - mean[j];
mean[j] += delta0 * inv_count;
float delta1 = x_math[j] - mean[j];
m2[j] += delta0 * delta1 * is_pixel_valid;
}
}
}
// We scale the mean by the number of elements. It brings more stability.
float m1[ELEMENTS_PER_LDG];
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m1[i] = mean[i] * count;
}
// Run the parallel sum accross the CTA to get the local sum.
ParallelSums<THREADS_PER_PIXEL, ELEMENTS_PER_LDG>::dispatch<THREADS_PER_CTA>(
smem, m1, thread_in_cta_nhw);
__syncthreads();
// The values in shared memory correspond to the CTA-wide sums.
read_from_smem(m1, smem, thread_in_cta_c);
__syncthreads();
// Adjust the variance.
float inv_cta_count = 1.f / (float) cta_count;
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
float mean_diff = m1[i]*inv_cta_count - mean[i];
m2[i] = m2[i] + mean_diff * mean_diff * count;
}
// Run the parallel sum accross the CTA to get the local adjusted variance.
ParallelSums<THREADS_PER_PIXEL, ELEMENTS_PER_LDG>::dispatch<THREADS_PER_CTA>(
smem, m2, thread_in_cta_nhw);
// The workspace in global memory is distributed across the different CTA.
int gmem_sums_offset = nc_blk_index*gridDim.x*C_ELEMENTS_PER_CTA*2;
// Write the data for the CTA to global memory.
GMEM_SUMS_TYPE *gmem_sums = ¶ms.gmem_sums[gmem_sums_offset];
if( threadIdx.x < THREADS_PER_PIXEL ) {
const int idx = blockIdx.x*THREADS_PER_PIXEL + threadIdx.x;
write_to_gmem(&gmem_sums[ 0], idx, m1);
write_to_gmem(&gmem_sums[C_ELEMENTS_PER_CTA*gridDim.x], idx, m2);
}
// The memory location to store the number of pixels per CTA.
int *gmem_counts = ¶ms.gmem_counts[nc_blk_index*gridDim.x];
if( threadIdx.x == 0 ) {
//gmem_counts[0] = cta_count;
gmem_counts[blockIdx.x] = cta_count;
}
// Read the bias and scale.
float bias[ELEMENTS_PER_LDG];
float scale[ELEMENTS_PER_LDG];
read_from_gmem(bias, ¶ms.gmem_bias[cta_c], thread_in_cta_c);
read_from_gmem(scale, ¶ms.gmem_scale[cta_c], thread_in_cta_c);
// The counters to count how many CTAs have retired at this point. One per chunk of C.
int *gmem_retired_ctas = ¶ms.gmem_retired_ctas[nc_blk_index];
// Make sure the threads are done and reconverged.
__syncthreads();
// Register the CTA.
int expected_count = gridDim.x;
if( threadIdx.x == 0 ) {
// Issue the membar.
__threadfence();
// Notify that the CTA is done.
int val_to_add = 1;
if (blockIdx.x == 0) {
val_to_add = -(expected_count - 1);
}
atomicAdd(gmem_retired_ctas, val_to_add);
}
// Are all CTAs done?
if (threadIdx.x == 0) {
int retired_ctas = -1;
do {
__threadfence();
asm volatile("ld.global.cg.b32 %0, [%1];" : "=r"(retired_ctas) : "l"(gmem_retired_ctas));
} while (retired_ctas != 0);
}
__threadfence();
__syncthreads();
// Reset the mean to compute the global mean.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m1[i] = 0.f;
}
// Build the global mean.
#pragma unroll 1
for( int idx = threadIdx.x; idx < THREADS_PER_PIXEL*gridDim.x; idx += THREADS_PER_CTA ) {
float tmp[ELEMENTS_PER_LDG];
read_from_gmem(tmp, gmem_sums, idx);
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m1[i] += tmp[i];
}
}
// Run the parallel sum accross the CTA to get the local sum.
ParallelSums<THREADS_PER_PIXEL, ELEMENTS_PER_LDG>::dispatch<THREADS_PER_CTA>(
smem, m1, thread_in_cta_nhw);
__syncthreads();
// The values in shared memory correspond to the CTA-wide sums.
read_from_smem(m1, smem, thread_in_cta_c);
__syncthreads();
// Normalize the mean.
float inv_count = 1.f / (float) params.nhw;
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m1[i] = m1[i] * inv_count;
}
// Reset the variance.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m2[i] = 0.f;
}
// Build the global variance.
#pragma unroll 1
for( int idx = threadIdx.x; idx < THREADS_PER_PIXEL*gridDim.x; idx += THREADS_PER_CTA ) {
// Read the means computed by different CTAs (again). Reuse tmp if we have 1 iteration.
float tmp_mean[ELEMENTS_PER_LDG], tmp_var[ELEMENTS_PER_LDG];
read_from_gmem(tmp_mean, &gmem_sums[ 0], idx);
read_from_gmem(tmp_var, &gmem_sums[C_ELEMENTS_PER_CTA*gridDim.x], idx);
// Read the number of pixels visited by a given CTA.
cta_count = __ldg(&gmem_counts[idx / THREADS_PER_PIXEL]);
// Compute the diff to update the variance.
float mean_diff[ELEMENTS_PER_LDG], inv_cta_count = 1.f / (float) cta_count;
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
mean_diff[i] = m1[i] - tmp_mean[i]*inv_cta_count;
}
// Update the variance.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m2[i] += tmp_var[i] + mean_diff[i]*mean_diff[i]*(float) cta_count;
}
}
// Run the parallel sum accross the CTA to get the local sum.
ParallelSums<THREADS_PER_PIXEL, ELEMENTS_PER_LDG>::dispatch<THREADS_PER_CTA>(
smem, m2, thread_in_cta_nhw);
__syncthreads();
read_from_smem(m2, smem, thread_in_cta_c);
__syncthreads();
// Finalize the stddev.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
m2[i] *= inv_count;
}
// store the saved mean/var
float svarinv[ELEMENTS_PER_LDG];
bool is_valid_for_saving = is_valid_c && blockIdx.x == 0 && thread_in_cta_nhw == 0;
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
svarinv[i] = rsqrtf(m2[i] + params.var_eps);
}
#if !DISABLE_MEAN_VAR_OUTPUT
int global_stats_offset = n_blk_index * params.c;
if( is_valid_for_saving ) {
write_to_gmem(params.gmem_saved_mean + global_stats_offset, \
thread_c/ELEMENTS_PER_LDG, m1);
write_to_gmem(params.gmem_saved_var + global_stats_offset, \
thread_c/ELEMENTS_PER_LDG, svarinv);
}
// store the running mean/var
float rmean[ELEMENTS_PER_LDG];
float rvar[ELEMENTS_PER_LDG];
zero(rmean);
zero(rvar);
if( params.exp_avg_factor != 1.f && is_valid_for_saving ) {
read_from_gmem(rmean, params.gmem_running_mean + global_stats_offset, \
thread_c/ELEMENTS_PER_LDG);
read_from_gmem(rvar, params.gmem_running_var + global_stats_offset, \
thread_c/ELEMENTS_PER_LDG);
}
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
rmean[i] = (1.f - params.exp_avg_factor) * rmean[i] + \
params.exp_avg_factor * m1[i];
rvar[i] = (1.f - params.exp_avg_factor) * rvar[i] + \
params.exp_avg_factor * m2[i];
}
if( is_valid_for_saving ) {
write_to_gmem(params.gmem_running_mean + global_stats_offset, thread_c/ELEMENTS_PER_LDG, rmean);
write_to_gmem(params.gmem_running_var + global_stats_offset, thread_c/ELEMENTS_PER_LDG, rvar);
}
#endif
// Update the scale with the stddev and eps.
#pragma unroll
for( int i = 0; i < ELEMENTS_PER_LDG; ++i ) {
scale[i] *= svarinv[i];
}
// The base pointer to write to.
Output_Data_Type *const gmem_dst = &reinterpret_cast<Output_Data_Type *>(params.gmem_dst)[global_thread_c_output + global_batch_offset];
// Store the elements in registers.
#pragma unroll 1
for( int loop_i = OUTER_LOOPS-1; loop_i >= 0; --loop_i ) {
// The value for nhw.
int out_nhw = cta_nhw_regs + loop_i*pixels_per_iteration;
// Normalize the elements and write to memory.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
// Convert to float.
float x_math[ELEMENTS_PER_LDG];
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage[i], int8_in_scale);
// Normalize and apply activation function
normalize(x_math, bias, scale, m1);
if( params.use_relu ) {
relu_activation(x_math, params.relu_alpha);
}
// Write back.
const int idx = out_nhw + thread_in_cta_nhw + i*PIXELS_PER_LDG;
if( (unsigned) idx < params.nhw && is_valid_c ) {
stg_stream(&gmem_dst[idx*stride_c_output], x_math, int8_out_scale);
}
}
// The next value of nhw.
out_nhw -= pixels_per_iteration;
// Read the next elements from memory.
#pragma unroll
for( int i = 0; i < PIXELS_PER_THREAD_IN_REGISTERS; ++i ) {
const int idx = out_nhw + thread_in_cta_nhw + i*PIXELS_PER_LDG;
if( (unsigned) idx < params.nhw && is_valid_c ) {
ldg_stream(x_storage[i], &gmem_src[idx*stride_c_output]);
}
}
}
// Normalize the elements from SMEM and write them out.
if( pixels_in_smem > 0 ) {
for( int i = 0; i < PIXELS_PER_THREAD_IN_SMEM; ++i ) {
// Read from SMEM.
const int offset = i*THREADS_PER_CTA*PACKED_ELEMENTS_PER_LDG;
float x_math[ELEMENTS_PER_LDG];
PackedStorageType x_storage_local[PACKED_ELEMENTS_PER_LDG];
read_from_smem(x_storage_local, &smem_storage[offset], threadIdx.x);
to_float<PACKED_ELEMENTS_PER_LDG, IS_INPUT_INT8>(x_math, x_storage_local, int8_in_scale);
// Normalize and apply activation function
normalize(x_math, bias, scale, m1);
if( params.use_relu ) {
relu_activation(x_math, params.relu_alpha);
}
// Write back.
const int idx = smem_nhw + thread_in_cta_nhw + i*PIXELS_PER_LDG;
if( (unsigned) idx < params.nhw && is_valid_c ) {
stg_stream(&gmem_dst[idx*stride_c_output], x_math, int8_out_scale);
}
}
}
__syncthreads();
} // blockIdx.y loop
}
template <typename Kernel_params>
dim3 estimate_in_grid_dim(const InstanceNormFwdParams& params)
{
dim3 grid_dim;
grid_dim.x = div_up(params.nhw, Kernel_params::MIN_PIXELS_PER_CTA); // PIXELS_PER_CTA
grid_dim.y = div_up(params.c * params.n, Kernel_params::C_ELEMENTS_PER_CTA);
grid_dim.z = 1; //params.n;
return grid_dim;
}
template <typename Kernel_params>
void instance_norm_buffer_sizes(const InstanceNormFwdParams& params,
size_t &size_sums, size_t &size_counts, size_t &size_retired_ctas)
{
dim3 grid_dim = estimate_in_grid_dim<Kernel_params>(params);
size_sums = grid_dim.z*grid_dim.y*grid_dim.x*Kernel_params::THREADS_PER_PIXEL*Kernel_params::ELEMENTS_PER_LDG*2*sizeof(GMEM_SUMS_TYPE);
size_counts = grid_dim.z*grid_dim.y*grid_dim.x*sizeof(int);
size_retired_ctas = grid_dim.z*grid_dim.y*sizeof(int);
size_sums = div_up(size_sums, 256) * 256;
size_counts = div_up(size_counts, 256) * 256;
size_retired_ctas = div_up(size_retired_ctas, 256) * 256;
}
template <typename Kernel_params>
int instance_norm_fwd_launch(const InstanceNormFwdContext& context, InstanceNormFwdParams& params, cudaStream_t stream)
{
size_t smem_size = Kernel_params::PIXELS_PER_THREAD_IN_SMEM *
Kernel_params::THREADS_PER_CTA *
Kernel_params::ELEMENTS_PER_LDG * sizeof(typename Kernel_params::StorageType);
dim3 grid_dim = estimate_in_grid_dim<Kernel_params>(params);
params.c_blks = div_up(params.c, Kernel_params::C_ELEMENTS_PER_CTA);
size_t size_retired_ctas = grid_dim.z*grid_dim.y*sizeof(int);
#define KERNEL_RUN(OUTER_LOOPS, DESIRED_OCCUPANCY) \
{ \
CHECK_CUDA(cudaMemsetAsync(params.gmem_retired_ctas, 0, size_retired_ctas, stream)); \
if( smem_size > 0 ) \
CHECK_CUDA(cudaFuncSetAttribute( \
instance_norm_fwd< \
typename Kernel_params::StorageType, \
typename Kernel_params::Input_Data_Type, \
typename Kernel_params::Output_Data_Type, \
Kernel_params::THREADS_PER_CTA, \
Kernel_params::THREADS_PER_PIXEL, \
Kernel_params::PIXELS_PER_THREAD_IN_REGISTERS, \
Kernel_params::PIXELS_PER_THREAD_IN_SMEM, \
Kernel_params::ELEMENTS_PER_LDG, \
Kernel_params::USE_ONLINE_APPROACH, \
OUTER_LOOPS, \
DESIRED_OCCUPANCY>, \
cudaFuncAttributeMaxDynamicSharedMemorySize, \
smem_size)); \
instance_norm_fwd< \
typename Kernel_params::StorageType, \
typename Kernel_params::Input_Data_Type, \
typename Kernel_params::Output_Data_Type, \
Kernel_params::THREADS_PER_CTA, \
Kernel_params::THREADS_PER_PIXEL, \
Kernel_params::PIXELS_PER_THREAD_IN_REGISTERS, \
Kernel_params::PIXELS_PER_THREAD_IN_SMEM, \
Kernel_params::ELEMENTS_PER_LDG, \
Kernel_params::USE_ONLINE_APPROACH, \
OUTER_LOOPS, \
DESIRED_OCCUPANCY><<<grid_dim,Kernel_params::THREADS_PER_CTA, smem_size, stream>>>(params); }
size_t total_smem_bytes = smem_size + Kernel_params::ELEMENTS_PER_LDG * Kernel_params::THREADS_PER_CTA * sizeof(float);
int smem_driven_fwd_occupancy = min(int(context.sm_shared_size) / (int)total_smem_bytes, (int)2);
int max_grid = context.sm_count * smem_driven_fwd_occupancy;
if ((context.sm_version >= 700 ) && (context.sm_version < 800))
{
max_grid = max_grid - 4;
}
if (max_grid / int(grid_dim.x) > 1) {
grid_dim.y = max_grid / int(grid_dim.x);
grid_dim.y = int(grid_dim.y) > params.c_blks * params.n ? params.c_blks * params.n : int(grid_dim.y);
} else {
grid_dim.y = 1;
}
int loop = 1;
if( grid_dim.x <= max_grid ) {
if (smem_driven_fwd_occupancy >= 2) {
KERNEL_RUN(1, 2);
} else {
KERNEL_RUN(1, 1);
}
} else {
grid_dim.x = max_grid;
int nhw_in_regs = params.nhw - Kernel_params::PIXELS_PER_THREAD_IN_SMEM*Kernel_params::PIXELS_PER_LDG*grid_dim.x;
int pixels_per_iteration = Kernel_params::PIXELS_PER_THREAD_IN_REGISTERS*Kernel_params::PIXELS_PER_LDG*grid_dim.x;
nhw_in_regs = (nhw_in_regs <= 0)? pixels_per_iteration : nhw_in_regs;
if (nhw_in_regs < 0)
{
nhw_in_regs = pixels_per_iteration;
// make PIXELS_PER_THREAD_IN_SMEM <= PIXELS_PER_THREAD_IN_REGISTERS if the assert fails
assert(pixels_per_iteration >= params.nhw);
}
loop = div_up(nhw_in_regs, pixels_per_iteration);
params.outer_loops = loop;
assert(loop >= 1);
if( loop == 1 ) {
if (smem_driven_fwd_occupancy >= 2) {
KERNEL_RUN(1, 2);
} else {
KERNEL_RUN(1, 1);
}
} else {
if (smem_driven_fwd_occupancy >= 2) {
KERNEL_RUN(0, 2);
} else {
KERNEL_RUN(0, 1);
}
}
}
return loop;
}
static int c_cond_g = 32;
void instance_norm_buffer_sizes_dispatch(const InstanceNormFwdContext& context, const InstanceNormFwdParams& params,
size_t &size_sums, size_t &size_counts, size_t &size_retired_ctas,
int input_data_type, int output_data_type)
{
if (input_data_type == 2 && output_data_type == 2) {
switch (context.sm_version)
{
case 700: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_700>(params, size_sums, size_counts, size_retired_ctas); break;
case 720: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_720>(params, size_sums, size_counts, size_retired_ctas); break;
case 750: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_750>(params, size_sums, size_counts, size_retired_ctas); break;
case 800: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_800>(params, size_sums, size_counts, size_retired_ctas); break;
case 860: return instance_norm_buffer_sizes<kernel_params_32_int8_sm_860>(params, size_sums, size_counts, size_retired_ctas); break;
default: return instance_norm_buffer_sizes<kernel_params_32_int8>(params, size_sums, size_counts, size_retired_ctas); break;
}
return instance_norm_buffer_sizes<kernel_params_32_int8>(params, size_sums, size_counts, size_retired_ctas);
} else if (input_data_type == 1 && output_data_type == 2) {
return instance_norm_buffer_sizes<kernel_params_32_fp16_int8>(params, size_sums, size_counts, size_retired_ctas);
} else if (input_data_type == 1 && output_data_type == 1) {
if (params.c <= c_cond_g) {
return instance_norm_buffer_sizes<kernel_params_32>(params, size_sums, size_counts, size_retired_ctas);
}
else {
return instance_norm_buffer_sizes<kernel_params_64>(params, size_sums, size_counts, size_retired_ctas);
}
} else {
fprintf(stderr, "Unsupported format combination by the instance norm kernel\n");
assert(0);
}
}
int instance_norm_fwd_dispatch(const InstanceNormFwdContext& context, InstanceNormFwdParams& params, cudaStream_t stream,
int input_data_type, int output_data_type)
{
assert(context.sm_version >= 600);
if (input_data_type == 2 && output_data_type == 2) {
switch (context.sm_version)
{
case 700: return instance_norm_fwd_launch<kernel_params_32_int8_sm_700>(context, params, stream); break;
case 720: return instance_norm_fwd_launch<kernel_params_32_int8_sm_720>(context, params, stream); break;
case 750: return instance_norm_fwd_launch<kernel_params_32_int8_sm_750>(context, params, stream); break;
case 800: return instance_norm_fwd_launch<kernel_params_32_int8_sm_800>(context, params, stream); break;
case 860: return instance_norm_fwd_launch<kernel_params_32_int8_sm_860>(context, params, stream); break;
default: return instance_norm_fwd_launch<kernel_params_32_int8>(context, params, stream); break;
}
} else if (input_data_type == 1 && output_data_type == 2) {
return instance_norm_fwd_launch<kernel_params_32_fp16_int8>(context, params, stream);
} else if (input_data_type == 1 && output_data_type == 1) {
if (params.c <= c_cond_g) {
return instance_norm_fwd_launch<kernel_params_32>(context, params, stream);
}
else {
return instance_norm_fwd_launch<kernel_params_64>(context, params, stream);
}
} else {
fprintf(stderr, "Unsupported format combination by the instance norm kernel\n");
assert(0);
}
return 0;
}
} // namespace instance_norm_impl |
a0c663e995e2009c377f7246d1d361e4475d65ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/sparse_to_dense_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <typename TInd, typename TData>
__global__ void SparseToDenseKernel(
size_t N, int64_t block_nitems, const TInd* indices, const TData* vals, TData* dst) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = indices[i / block_nitems];
int dst_idx = block_nitems * idx + i % block_nitems;
atomicAdd(&dst[dst_idx], vals[i]);
}
}
template <>
bool SparseToDenseOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t>>::call(
this, Input(INDICES));
}
template <>
template <typename TInd>
bool SparseToDenseOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<
float,
int32_t>,
TInd>::call(this, Input(VALUES));
}
template <>
template <typename TInd, typename TData>
bool SparseToDenseOp<CUDAContext>::DoRunWithType2() {
auto& sparse_indices = Input(INDICES);
CAFFE_ENFORCE_EQ(sparse_indices.dim(), 1);
auto& sparse_values = Input(VALUES);
CAFFE_ENFORCE_GE(sparse_values.dim(), 1);
CAFFE_ENFORCE_EQ(sparse_indices.numel(), sparse_values.dim(0));
const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();
const int32_t sparse_indices_len = sparse_indices.dim32(0);
const int output_first_dim =
GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);
auto shape = sparse_values.sizes().vec();
shape[0] = output_first_dim;
auto* output = Output(0, shape, at::dtype<TData>());
TData* output_data = output->template mutable_data<TData>();
math::Set<TData>(output->numel(), TData(0), output_data, &context_);
const auto block_nitems = sparse_values.size_from_dim(1);
const TData* sparse_values_vec = sparse_values.template data<TData>();
size_t N = block_nitems * sparse_indices_len;
CAFFE_ENFORCE_EQ(output->numel(), output_first_dim * block_nitems);
hipLaunchKernelGGL(( SparseToDenseKernel<TInd, TData>),
dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
N,
block_nitems,
sparse_indices_vec,
sparse_values_vec,
output_data
);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(SparseToDense, SparseToDenseOp<CUDAContext>);
} // namespace caffe2
| a0c663e995e2009c377f7246d1d361e4475d65ab.cu | #include "caffe2/operators/sparse_to_dense_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <typename TInd, typename TData>
__global__ void SparseToDenseKernel(
size_t N, int64_t block_nitems, const TInd* indices, const TData* vals, TData* dst) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = indices[i / block_nitems];
int dst_idx = block_nitems * idx + i % block_nitems;
atomicAdd(&dst[dst_idx], vals[i]);
}
}
template <>
bool SparseToDenseOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t>>::call(
this, Input(INDICES));
}
template <>
template <typename TInd>
bool SparseToDenseOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<
float,
int32_t>,
TInd>::call(this, Input(VALUES));
}
template <>
template <typename TInd, typename TData>
bool SparseToDenseOp<CUDAContext>::DoRunWithType2() {
auto& sparse_indices = Input(INDICES);
CAFFE_ENFORCE_EQ(sparse_indices.dim(), 1);
auto& sparse_values = Input(VALUES);
CAFFE_ENFORCE_GE(sparse_values.dim(), 1);
CAFFE_ENFORCE_EQ(sparse_indices.numel(), sparse_values.dim(0));
const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();
const int32_t sparse_indices_len = sparse_indices.dim32(0);
const int output_first_dim =
GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);
auto shape = sparse_values.sizes().vec();
shape[0] = output_first_dim;
auto* output = Output(0, shape, at::dtype<TData>());
TData* output_data = output->template mutable_data<TData>();
math::Set<TData>(output->numel(), TData(0), output_data, &context_);
const auto block_nitems = sparse_values.size_from_dim(1);
const TData* sparse_values_vec = sparse_values.template data<TData>();
size_t N = block_nitems * sparse_indices_len;
CAFFE_ENFORCE_EQ(output->numel(), output_first_dim * block_nitems);
SparseToDenseKernel<TInd, TData><<<
CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
N,
block_nitems,
sparse_indices_vec,
sparse_values_vec,
output_data
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(SparseToDense, SparseToDenseOp<CUDAContext>);
} // namespace caffe2
|
b6f090dd9d5d63faef03f25d9fba20762f38f24d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
*
* Code and text by Sean Baxter, NVIDIA Research
* See http://nvlabs.github.io/moderngpu for repository and documentation.
*
******************************************************************************/
#include "util/mgpucontext.h"
namespace mgpu {
////////////////////////////////////////////////////////////////////////////////
// CudaTimer
void CudaTimer::Start() {
hipEventRecord(start);
hipDeviceSynchronize();
}
double CudaTimer::Split() {
hipEventRecord(end);
hipDeviceSynchronize();
float t;
hipEventElapsedTime(&t, start, end);
start.Swap(end);
return (t / 1000.0);
}
double CudaTimer::Throughput(int count, int numIterations) {
double elapsed = Split();
return (double)numIterations * count / elapsed;
}
////////////////////////////////////////////////////////////////////////////////
// CudaDevice
__global__ void KernelVersionShim() { }
struct DeviceGroup {
int numCudaDevices;
CudaDevice** cudaDevices;
DeviceGroup() {
numCudaDevices = -1;
cudaDevices = 0;
}
int GetDeviceCount() {
if(-1 == numCudaDevices) {
hipError_t error = hipGetDeviceCount(&numCudaDevices);
if(hipSuccess != error || numCudaDevices <= 0) {
fprintf(stderr, "ERROR ENUMERATING CUDA DEVICES.\nExiting.\n");
exit(0);
}
cudaDevices = new CudaDevice*[numCudaDevices];
memset(cudaDevices, 0, sizeof(CudaDevice*) * numCudaDevices);
}
return numCudaDevices;
}
CudaDevice* GetByOrdinal(int ordinal) {
if(ordinal >= GetDeviceCount()) return 0;
if(!cudaDevices[ordinal]) {
// Retrieve the device properties.
CudaDevice* device = cudaDevices[ordinal] = new CudaDevice;
device->_ordinal = ordinal;
hipError_t error = hipGetDeviceProperties(&device->_prop,
ordinal);
if(hipSuccess != error) {
fprintf(stderr, "FAILURE TO CREATE CUDA DEVICE %d\n", ordinal);
exit(0);
}
// Get the compiler version for this device.
hipSetDevice(ordinal);
hipFuncAttributes attr;
error = hipFuncGetAttributes(&attr, KernelVersionShim);
if(hipSuccess == error)
device->_ptxVersion = 10 * attr.ptxVersion;
else {
printf("NOT COMPILED WITH COMPATIBLE PTX VERSION FOR DEVICE"
" %d\n", ordinal);
// The module wasn't compiled with support for this device.
device->_ptxVersion = 0;
}
}
return cudaDevices[ordinal];
}
~DeviceGroup() {
if(cudaDevices) {
for(int i = 0; i < numCudaDevices; ++i)
delete cudaDevices[i];
delete [] cudaDevices;
}
hipDeviceReset();
}
};
std::auto_ptr<DeviceGroup> deviceGroup;
int CudaDevice::DeviceCount() {
if(!deviceGroup.get())
deviceGroup.reset(new DeviceGroup);
return deviceGroup->GetDeviceCount();
}
CudaDevice& CudaDevice::ByOrdinal(int ordinal) {
if(ordinal < 0 || ordinal >= DeviceCount()) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
return *deviceGroup->GetByOrdinal(ordinal);
}
CudaDevice& CudaDevice::Selected() {
int ordinal;
hipError_t error = hipGetDevice(&ordinal);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
return ByOrdinal(ordinal);
}
void CudaDevice::SetActive() {
hipError_t error = hipSetDevice(_ordinal);
if(hipSuccess != error) {
fprintf(stderr, "ERROR SETTING CUDA DEVICE TO ORDINAL %d\n", _ordinal);
exit(0);
}
}
std::string CudaDevice::DeviceString() const {
size_t freeMem, totalMem;
hipError_t error = hipMemGetInfo(&freeMem, &totalMem);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
_ordinal);
exit(0);
}
double memBandwidth = (_prop.memoryClockRate * 1000.0) *
(_prop.memoryBusWidth / 8 * 2) / 1.0e9;
std::string s = stringprintf(
"%s : %8.3lf Mhz (Ordinal %d)\n"
"%d SMs enabled. Compute Capability sm_%d%d\n"
"FreeMem: %6dMB TotalMem: %6dMB %2d-bit pointers.\n"
"Mem Clock: %8.3lf Mhz x %d bits (%5.1lf GB/s)\n"
"ECC %s\n\n",
_prop.name, _prop.clockRate / 1000.0, _ordinal,
_prop.multiProcessorCount, _prop.major, _prop.minor,
(int)(freeMem / (1<< 20)), (int)(totalMem / (1<< 20)), 8 * sizeof(int*),
_prop.memoryClockRate / 1000.0, _prop.memoryBusWidth, memBandwidth,
_prop.ECCEnabled ? "Enabled" : "Disabled");
return s;
}
////////////////////////////////////////////////////////////////////////////////
// CudaContext
struct ContextGroup {
CudaContext** standardContexts;
int numDevices;
ContextGroup() {
numDevices = CudaDevice::DeviceCount();
standardContexts = new CudaContext*[numDevices];
memset(standardContexts, 0, sizeof(CudaContext*) * numDevices);
}
CudaContext* GetByOrdinal(int ordinal) {
if(!standardContexts[ordinal]) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
standardContexts[ordinal] = new CudaContext(device, false, true);
}
return standardContexts[ordinal];
}
~ContextGroup() {
if(standardContexts) {
for(int i = 0; i < numDevices; ++i)
delete standardContexts[i];
delete [] standardContexts;
}
}
};
std::auto_ptr<ContextGroup> contextGroup;
CudaContext::CudaContext(CudaDevice& device, bool newStream, bool standard) :
_event(hipEventDisableTiming /*| hipEventBlockingSync */),
_stream(0), _noRefCount(standard), _pageLocked(0) {
// Create an allocator.
if(standard)
_alloc.reset(new CudaAllocSimple(device));
else
_alloc = CreateDefaultAlloc(device);
if(newStream) hipStreamCreate(&_stream);
_ownStream = newStream;
// Allocate 4KB of page-locked memory.
hipError_t error = hipHostMalloc((void**)&_pageLocked, 4096);
// Allocate an auxiliary stream.
error = hipStreamCreate(&_auxStream);
}
CudaContext::~CudaContext() {
if(_pageLocked)
hipHostFree(_pageLocked);
if(_ownStream && _stream)
hipStreamDestroy(_stream);
if(_auxStream)
hipStreamDestroy(_auxStream);
}
AllocPtr CudaContext::CreateDefaultAlloc(CudaDevice& device) {
intrusive_ptr<CudaAllocBuckets> alloc(new CudaAllocBuckets(device));
size_t freeMem, totalMem;
hipError_t error = hipMemGetInfo(&freeMem, &totalMem);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
device.Ordinal());
exit(0);
}
// Maintain a buffer of 128MB with max objects of 64MB.
alloc->SetCapacity(128<< 20, 64<< 20);
return AllocPtr(alloc.get());
}
CudaContext& CudaContext::StandardContext(int ordinal) {
bool setActive = -1 != ordinal;
if(-1 == ordinal) {
hipError_t error = hipGetDevice(&ordinal);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
}
int numDevices = CudaDevice::DeviceCount();
if(ordinal < 0 || ordinal >= numDevices) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
if(!contextGroup.get())
contextGroup.reset(new ContextGroup);
CudaContext& context = //*contextGroup->standardContexts[ordinal];
*contextGroup->GetByOrdinal(ordinal);
if(!context.PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context.ArchVersion() / 10);
exit(0);
}
if(setActive) context.SetActive();
return context;
}
ContextPtr CreateCudaDevice(int ordinal) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
ContextPtr context(new CudaContext(device, false, false));
return context;
}
ContextPtr CreateCudaDevice(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDevice(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceStream(int ordinal) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), true, false));
return context;
}
ContextPtr CreateCudaDeviceStream(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDeviceStream(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceAttachStream(int ordinal, hipStream_t stream) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), false, false));
context->_stream = stream;
return context;
}
ContextPtr CreateCudaDeviceAttachStream(hipStream_t stream) {
int ordinal;
hipGetDevice(&ordinal);
return CreateCudaDeviceAttachStream(ordinal, stream);
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocSimple
hipError_t CudaAllocSimple::Malloc(size_t size, void** p) {
hipError_t error = hipSuccess;
*p = 0;
if(size) error = hipMalloc(p, size);
if(hipSuccess != error) {
printf("CUDA MALLOC ERROR %d\n", error);
exit(0);
}
return error;
}
bool CudaAllocSimple::Free(void* p) {
hipError_t error = hipSuccess;
if(p) error = hipFree(p);
return hipSuccess == error;
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocBuckets
CudaAllocBuckets::CudaAllocBuckets(CudaDevice& device) : CudaAlloc(device) {
_maxObjectSize = _capacity = _allocated = _committed = 0;
_counter = 0;
}
CudaAllocBuckets::~CudaAllocBuckets() {
SetCapacity(0, 0);
assert(!_allocated);
}
bool CudaAllocBuckets::SanityCheck() const {
// Iterate through all allocated objects and verify sizes.
size_t allocatedCount = 0, committedCount = 0;
for(AddressMap::const_iterator i = _addressMap.begin();
i != _addressMap.end(); ++i) {
int bucket = i->second->bucket;
size_t size = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
allocatedCount += size;
if(i->second->priority == _priorityMap.end())
committedCount += size;
}
return allocatedCount == _allocated && committedCount == _committed;
}
hipError_t CudaAllocBuckets::Malloc(size_t size, void** p) {
// Locate the bucket index and adjust the size of the allocation to the
// bucket size.
size_t allocSize = size;
size_t commitSize = 0;
int bucket = LocateBucket(size);
if(bucket < NumBuckets)
allocSize = commitSize = BucketSizes[bucket];
// Peel off an already-allocated node and reuse it.
MemList& list = _memLists[bucket];
if(list.size() && list.front().priority != _priorityMap.end()) {
MemList::iterator memIt = list.begin();
_priorityMap.erase(memIt->priority);
memIt->priority = _priorityMap.end();
list.splice(list.end(), list, memIt);
_committed += commitSize;
*p = memIt->address->first;
return hipSuccess;
}
// Shrink if this allocation would put us over the limit.
Compact(commitSize);
hipError_t error = hipSuccess;
*p = 0;
if(size) error = hipMalloc(p, allocSize);
while((hipErrorMemoryAllocation == error) && (_committed < _allocated)) {
SetCapacity(_capacity - _capacity / 10, _maxObjectSize);
error = hipMalloc(&p, size);
}
if(hipSuccess != error) return error;
MemList::iterator memIt =
_memLists[bucket].insert(_memLists[bucket].end(), MemNode());
memIt->bucket = bucket;
memIt->address = _addressMap.insert(std::make_pair(*p, memIt)).first;
memIt->priority = _priorityMap.end();
_allocated += commitSize;
_committed += commitSize;
assert(SanityCheck());
return hipSuccess;
}
bool CudaAllocBuckets::Free(void* p) {
AddressMap::iterator it = _addressMap.find(p);
if(it == _addressMap.end()) {
// If the pointer was not found in the address map, hipFree it anyways
// but return false.
if(p) hipFree(p);
return false;
}
// Because we're freeing a page, it had better not be in the priority queue.
MemList::iterator memIt = it->second;
assert(memIt->priority == _priorityMap.end());
// Always free allocations larger than the largest bucket
it->second->priority = _priorityMap.insert(
std::make_pair(_counter++ - memIt->bucket, memIt));
// Freed nodes are moved to the front, committed nodes are moved to the
// end.
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
MemList& list = _memLists[bucket];
list.splice(list.begin(), list, memIt);
_committed -= commitSize;
// Delete data that's not cached.
if(NumBuckets == bucket)
FreeNode(memIt);
Compact(0);
return true;
}
void CudaAllocBuckets::Clear() {
Compact(_allocated);
}
void CudaAllocBuckets::FreeNode(CudaAllocBuckets::MemList::iterator memIt) {
if(memIt->address->first) hipFree(memIt->address->first);
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
_addressMap.erase(memIt->address);
if(memIt->priority != _priorityMap.end())
_priorityMap.erase(memIt->priority);
else
_committed -= commitSize;
_allocated -= commitSize;
_memLists[bucket].erase(memIt);
assert(SanityCheck());
}
void CudaAllocBuckets::Compact(size_t extra) {
while(_allocated + extra > _capacity && _allocated > _committed) {
// Walk the priority queue from beginning to end removing nodes.
MemList::iterator memIt = _priorityMap.begin()->second;
FreeNode(memIt);
}
}
// Exponentially spaced buckets.
const size_t CudaAllocBuckets::BucketSizes[CudaAllocBuckets::NumBuckets] = {
256, 512, 1024, 2048, 4096, 8192,
12288, 16384, 24576, 32768, 49152, 65536,
98304, 131072, 174848, 218624, 262144, 349696,
436992, 524288, 655360, 786432, 917504, 1048576,
1310720, 1572864, 1835008, 2097152, 2516736, 2936064,
3355648, 3774976, 4194304, 4893440, 5592576, 6291456,
6990592, 7689728, 8388608, 9786880, 11184896, 12582912,
13981184, 15379200, 16777216, 18874368, 20971520, 23068672,
25165824, 27262976, 29360128, 31457280, 33554432, 36910080,
40265472, 43620864, 46976256, 50331648, 53687296, 57042688,
60398080, 63753472, 67108864, 72701440, 78293760, 83886080,
89478656, 95070976, 100663296, 106255872, 111848192, 117440512,
123033088, 128625408, 134217728, 143804928, 153391872, 162978816,
172565760, 182152704, 191739648, 201326592, 210913792, 220500736
};
int CudaAllocBuckets::LocateBucket(size_t size) const {
if(size > _maxObjectSize || size > BucketSizes[NumBuckets - 1])
return NumBuckets;
return (int)(std::lower_bound(BucketSizes, BucketSizes + NumBuckets, size) -
BucketSizes);
}
} // namespace mgpu
| b6f090dd9d5d63faef03f25d9fba20762f38f24d.cu | /******************************************************************************
* Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
*
* Code and text by Sean Baxter, NVIDIA Research
* See http://nvlabs.github.io/moderngpu for repository and documentation.
*
******************************************************************************/
#include "util/mgpucontext.h"
namespace mgpu {
////////////////////////////////////////////////////////////////////////////////
// CudaTimer
void CudaTimer::Start() {
cudaEventRecord(start);
cudaDeviceSynchronize();
}
double CudaTimer::Split() {
cudaEventRecord(end);
cudaDeviceSynchronize();
float t;
cudaEventElapsedTime(&t, start, end);
start.Swap(end);
return (t / 1000.0);
}
double CudaTimer::Throughput(int count, int numIterations) {
double elapsed = Split();
return (double)numIterations * count / elapsed;
}
////////////////////////////////////////////////////////////////////////////////
// CudaDevice
__global__ void KernelVersionShim() { }
struct DeviceGroup {
int numCudaDevices;
CudaDevice** cudaDevices;
DeviceGroup() {
numCudaDevices = -1;
cudaDevices = 0;
}
int GetDeviceCount() {
if(-1 == numCudaDevices) {
cudaError_t error = cudaGetDeviceCount(&numCudaDevices);
if(cudaSuccess != error || numCudaDevices <= 0) {
fprintf(stderr, "ERROR ENUMERATING CUDA DEVICES.\nExiting.\n");
exit(0);
}
cudaDevices = new CudaDevice*[numCudaDevices];
memset(cudaDevices, 0, sizeof(CudaDevice*) * numCudaDevices);
}
return numCudaDevices;
}
CudaDevice* GetByOrdinal(int ordinal) {
if(ordinal >= GetDeviceCount()) return 0;
if(!cudaDevices[ordinal]) {
// Retrieve the device properties.
CudaDevice* device = cudaDevices[ordinal] = new CudaDevice;
device->_ordinal = ordinal;
cudaError_t error = cudaGetDeviceProperties(&device->_prop,
ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "FAILURE TO CREATE CUDA DEVICE %d\n", ordinal);
exit(0);
}
// Get the compiler version for this device.
cudaSetDevice(ordinal);
cudaFuncAttributes attr;
error = cudaFuncGetAttributes(&attr, KernelVersionShim);
if(cudaSuccess == error)
device->_ptxVersion = 10 * attr.ptxVersion;
else {
printf("NOT COMPILED WITH COMPATIBLE PTX VERSION FOR DEVICE"
" %d\n", ordinal);
// The module wasn't compiled with support for this device.
device->_ptxVersion = 0;
}
}
return cudaDevices[ordinal];
}
~DeviceGroup() {
if(cudaDevices) {
for(int i = 0; i < numCudaDevices; ++i)
delete cudaDevices[i];
delete [] cudaDevices;
}
cudaDeviceReset();
}
};
std::auto_ptr<DeviceGroup> deviceGroup;
int CudaDevice::DeviceCount() {
if(!deviceGroup.get())
deviceGroup.reset(new DeviceGroup);
return deviceGroup->GetDeviceCount();
}
CudaDevice& CudaDevice::ByOrdinal(int ordinal) {
if(ordinal < 0 || ordinal >= DeviceCount()) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
return *deviceGroup->GetByOrdinal(ordinal);
}
CudaDevice& CudaDevice::Selected() {
int ordinal;
cudaError_t error = cudaGetDevice(&ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
return ByOrdinal(ordinal);
}
void CudaDevice::SetActive() {
cudaError_t error = cudaSetDevice(_ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR SETTING CUDA DEVICE TO ORDINAL %d\n", _ordinal);
exit(0);
}
}
std::string CudaDevice::DeviceString() const {
size_t freeMem, totalMem;
cudaError_t error = cudaMemGetInfo(&freeMem, &totalMem);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
_ordinal);
exit(0);
}
double memBandwidth = (_prop.memoryClockRate * 1000.0) *
(_prop.memoryBusWidth / 8 * 2) / 1.0e9;
std::string s = stringprintf(
"%s : %8.3lf Mhz (Ordinal %d)\n"
"%d SMs enabled. Compute Capability sm_%d%d\n"
"FreeMem: %6dMB TotalMem: %6dMB %2d-bit pointers.\n"
"Mem Clock: %8.3lf Mhz x %d bits (%5.1lf GB/s)\n"
"ECC %s\n\n",
_prop.name, _prop.clockRate / 1000.0, _ordinal,
_prop.multiProcessorCount, _prop.major, _prop.minor,
(int)(freeMem / (1<< 20)), (int)(totalMem / (1<< 20)), 8 * sizeof(int*),
_prop.memoryClockRate / 1000.0, _prop.memoryBusWidth, memBandwidth,
_prop.ECCEnabled ? "Enabled" : "Disabled");
return s;
}
////////////////////////////////////////////////////////////////////////////////
// CudaContext
struct ContextGroup {
CudaContext** standardContexts;
int numDevices;
ContextGroup() {
numDevices = CudaDevice::DeviceCount();
standardContexts = new CudaContext*[numDevices];
memset(standardContexts, 0, sizeof(CudaContext*) * numDevices);
}
CudaContext* GetByOrdinal(int ordinal) {
if(!standardContexts[ordinal]) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
standardContexts[ordinal] = new CudaContext(device, false, true);
}
return standardContexts[ordinal];
}
~ContextGroup() {
if(standardContexts) {
for(int i = 0; i < numDevices; ++i)
delete standardContexts[i];
delete [] standardContexts;
}
}
};
std::auto_ptr<ContextGroup> contextGroup;
CudaContext::CudaContext(CudaDevice& device, bool newStream, bool standard) :
_event(cudaEventDisableTiming /*| cudaEventBlockingSync */),
_stream(0), _noRefCount(standard), _pageLocked(0) {
// Create an allocator.
if(standard)
_alloc.reset(new CudaAllocSimple(device));
else
_alloc = CreateDefaultAlloc(device);
if(newStream) cudaStreamCreate(&_stream);
_ownStream = newStream;
// Allocate 4KB of page-locked memory.
cudaError_t error = cudaMallocHost((void**)&_pageLocked, 4096);
// Allocate an auxiliary stream.
error = cudaStreamCreate(&_auxStream);
}
CudaContext::~CudaContext() {
if(_pageLocked)
cudaFreeHost(_pageLocked);
if(_ownStream && _stream)
cudaStreamDestroy(_stream);
if(_auxStream)
cudaStreamDestroy(_auxStream);
}
AllocPtr CudaContext::CreateDefaultAlloc(CudaDevice& device) {
intrusive_ptr<CudaAllocBuckets> alloc(new CudaAllocBuckets(device));
size_t freeMem, totalMem;
cudaError_t error = cudaMemGetInfo(&freeMem, &totalMem);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
device.Ordinal());
exit(0);
}
// Maintain a buffer of 128MB with max objects of 64MB.
alloc->SetCapacity(128<< 20, 64<< 20);
return AllocPtr(alloc.get());
}
CudaContext& CudaContext::StandardContext(int ordinal) {
bool setActive = -1 != ordinal;
if(-1 == ordinal) {
cudaError_t error = cudaGetDevice(&ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
}
int numDevices = CudaDevice::DeviceCount();
if(ordinal < 0 || ordinal >= numDevices) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
if(!contextGroup.get())
contextGroup.reset(new ContextGroup);
CudaContext& context = //*contextGroup->standardContexts[ordinal];
*contextGroup->GetByOrdinal(ordinal);
if(!context.PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context.ArchVersion() / 10);
exit(0);
}
if(setActive) context.SetActive();
return context;
}
ContextPtr CreateCudaDevice(int ordinal) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
ContextPtr context(new CudaContext(device, false, false));
return context;
}
ContextPtr CreateCudaDevice(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDevice(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceStream(int ordinal) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), true, false));
return context;
}
ContextPtr CreateCudaDeviceStream(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDeviceStream(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceAttachStream(int ordinal, cudaStream_t stream) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), false, false));
context->_stream = stream;
return context;
}
ContextPtr CreateCudaDeviceAttachStream(cudaStream_t stream) {
int ordinal;
cudaGetDevice(&ordinal);
return CreateCudaDeviceAttachStream(ordinal, stream);
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocSimple
cudaError_t CudaAllocSimple::Malloc(size_t size, void** p) {
cudaError_t error = cudaSuccess;
*p = 0;
if(size) error = cudaMalloc(p, size);
if(cudaSuccess != error) {
printf("CUDA MALLOC ERROR %d\n", error);
exit(0);
}
return error;
}
bool CudaAllocSimple::Free(void* p) {
cudaError_t error = cudaSuccess;
if(p) error = cudaFree(p);
return cudaSuccess == error;
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocBuckets
CudaAllocBuckets::CudaAllocBuckets(CudaDevice& device) : CudaAlloc(device) {
_maxObjectSize = _capacity = _allocated = _committed = 0;
_counter = 0;
}
CudaAllocBuckets::~CudaAllocBuckets() {
SetCapacity(0, 0);
assert(!_allocated);
}
bool CudaAllocBuckets::SanityCheck() const {
// Iterate through all allocated objects and verify sizes.
size_t allocatedCount = 0, committedCount = 0;
for(AddressMap::const_iterator i = _addressMap.begin();
i != _addressMap.end(); ++i) {
int bucket = i->second->bucket;
size_t size = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
allocatedCount += size;
if(i->second->priority == _priorityMap.end())
committedCount += size;
}
return allocatedCount == _allocated && committedCount == _committed;
}
cudaError_t CudaAllocBuckets::Malloc(size_t size, void** p) {
// Locate the bucket index and adjust the size of the allocation to the
// bucket size.
size_t allocSize = size;
size_t commitSize = 0;
int bucket = LocateBucket(size);
if(bucket < NumBuckets)
allocSize = commitSize = BucketSizes[bucket];
// Peel off an already-allocated node and reuse it.
MemList& list = _memLists[bucket];
if(list.size() && list.front().priority != _priorityMap.end()) {
MemList::iterator memIt = list.begin();
_priorityMap.erase(memIt->priority);
memIt->priority = _priorityMap.end();
list.splice(list.end(), list, memIt);
_committed += commitSize;
*p = memIt->address->first;
return cudaSuccess;
}
// Shrink if this allocation would put us over the limit.
Compact(commitSize);
cudaError_t error = cudaSuccess;
*p = 0;
if(size) error = cudaMalloc(p, allocSize);
while((cudaErrorMemoryAllocation == error) && (_committed < _allocated)) {
SetCapacity(_capacity - _capacity / 10, _maxObjectSize);
error = cudaMalloc(&p, size);
}
if(cudaSuccess != error) return error;
MemList::iterator memIt =
_memLists[bucket].insert(_memLists[bucket].end(), MemNode());
memIt->bucket = bucket;
memIt->address = _addressMap.insert(std::make_pair(*p, memIt)).first;
memIt->priority = _priorityMap.end();
_allocated += commitSize;
_committed += commitSize;
assert(SanityCheck());
return cudaSuccess;
}
bool CudaAllocBuckets::Free(void* p) {
AddressMap::iterator it = _addressMap.find(p);
if(it == _addressMap.end()) {
// If the pointer was not found in the address map, cudaFree it anyways
// but return false.
if(p) cudaFree(p);
return false;
}
// Because we're freeing a page, it had better not be in the priority queue.
MemList::iterator memIt = it->second;
assert(memIt->priority == _priorityMap.end());
// Always free allocations larger than the largest bucket
it->second->priority = _priorityMap.insert(
std::make_pair(_counter++ - memIt->bucket, memIt));
// Freed nodes are moved to the front, committed nodes are moved to the
// end.
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
MemList& list = _memLists[bucket];
list.splice(list.begin(), list, memIt);
_committed -= commitSize;
// Delete data that's not cached.
if(NumBuckets == bucket)
FreeNode(memIt);
Compact(0);
return true;
}
void CudaAllocBuckets::Clear() {
Compact(_allocated);
}
void CudaAllocBuckets::FreeNode(CudaAllocBuckets::MemList::iterator memIt) {
if(memIt->address->first) cudaFree(memIt->address->first);
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
_addressMap.erase(memIt->address);
if(memIt->priority != _priorityMap.end())
_priorityMap.erase(memIt->priority);
else
_committed -= commitSize;
_allocated -= commitSize;
_memLists[bucket].erase(memIt);
assert(SanityCheck());
}
void CudaAllocBuckets::Compact(size_t extra) {
while(_allocated + extra > _capacity && _allocated > _committed) {
// Walk the priority queue from beginning to end removing nodes.
MemList::iterator memIt = _priorityMap.begin()->second;
FreeNode(memIt);
}
}
// Exponentially spaced buckets.
const size_t CudaAllocBuckets::BucketSizes[CudaAllocBuckets::NumBuckets] = {
256, 512, 1024, 2048, 4096, 8192,
12288, 16384, 24576, 32768, 49152, 65536,
98304, 131072, 174848, 218624, 262144, 349696,
436992, 524288, 655360, 786432, 917504, 1048576,
1310720, 1572864, 1835008, 2097152, 2516736, 2936064,
3355648, 3774976, 4194304, 4893440, 5592576, 6291456,
6990592, 7689728, 8388608, 9786880, 11184896, 12582912,
13981184, 15379200, 16777216, 18874368, 20971520, 23068672,
25165824, 27262976, 29360128, 31457280, 33554432, 36910080,
40265472, 43620864, 46976256, 50331648, 53687296, 57042688,
60398080, 63753472, 67108864, 72701440, 78293760, 83886080,
89478656, 95070976, 100663296, 106255872, 111848192, 117440512,
123033088, 128625408, 134217728, 143804928, 153391872, 162978816,
172565760, 182152704, 191739648, 201326592, 210913792, 220500736
};
int CudaAllocBuckets::LocateBucket(size_t size) const {
if(size > _maxObjectSize || size > BucketSizes[NumBuckets - 1])
return NumBuckets;
return (int)(std::lower_bound(BucketSizes, BucketSizes + NumBuckets, size) -
BucketSizes);
}
} // namespace mgpu
|
d135d7d8e596539ebfb244c53ee6f500b0604c14.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include "../include/ParSecureML.h"
void MallocD(float *&gpu_a, int size){
hipError_t cudaStat;
cudaStat = hipMalloc((void**)&gpu_a, sizeof(*gpu_a)*size);
if(cudaStat != hipSuccess){
cout << "Malloc failed:" << hipGetErrorString(cudaStat)<< endl;
exit(0);
}
}
void CopyHtoD(float *gpu_a, float *a, int size){
hipError_t cudaStat;
cudaStat = hipMemcpy(gpu_a, a, sizeof(*a)*size, hipMemcpyHostToDevice);
if(cudaStat != hipSuccess){
cout << "Error code:" << cudaStat << endl;
cout << "CopyHtoD failed." << endl;
exit(0);
}
}
void CopyDtoH(float *&a, float *&gpu_a, int size){
hipError_t cudaStat;
cudaStat = hipMemcpy(a, gpu_a, sizeof(*a)*size, hipMemcpyDeviceToHost);
if(cudaStat != hipSuccess){
cout << "Error code:" << cudaStat << endl;
cout << "CopyDtoH failed." << endl;
exit(0);
}
}
void Support::GPU_Mul(){
hipblasStatus_t stat;
hipblasHandle_t handle;
stat = hipblasCreate(&handle);
if(stat != HIPBLAS_STATUS_SUCCESS){
cout << "CUBLAS create failed." << endl;
exit(0);
}
float alpha = 1;
float b = 0;
stat = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, row1, col2, col1, &alpha, GPU_U, row1, GPU_V, row2, &b, GPU_Z, row1);
if(stat != HIPBLAS_STATUS_SUCCESS){
cout << "Cublas sgemm failed." << endl;
exit(0);
}
}
void ReleaseGPU(float *A){
hipFree(A);
}
__global__ void cudaTripletSum(float *sum, float *fac1, float *fac2, float *fac3, int size){
int bid = blockIdx.x;
int tid = threadIdx.x;
int cur = bid*blockDim.x+tid;
if(cur >= size) return;
float tmp = fac1[cur] + fac2[cur] + fac3[cur];
sum[cur] = tmp;
}
__global__ void cudaSum(float *A, float *B, float *sum, int size){
int bid = blockIdx.x;
int tid = threadIdx.x;
int cur = bid*blockDim.x+tid;
if(cur >= size) return;
float tmp = A[cur]+B[cur];
sum[cur] = tmp;
}
__global__ void cudaMinus(float *A, float *B, float *min, int size){
int bid = blockIdx.x;
int tid = threadIdx.x;
int cur = bid*blockDim.x+tid;
if(cur >= size) return;
float tmp = A[cur]-B[cur];
min[cur] = tmp;
}
void Triplet::cudaTripletMul(int flag){
hipblasStatus_t stat;
hipblasHandle_t handle;
hipError_t cudaStat;
stat = hipblasCreate(&handle);
if(stat != HIPBLAS_STATUS_SUCCESS){
cout << "CUBLAS create failed." << endl;
exit(0);
}
float alpha1 = 1;
float alpha2 = 1;
float b = 0;
while(flag1 == 0){
continue;
}
if(flag == 0){
while(flag2 == 0){
continue;
}
stat = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, row1, col2, col1, &alpha1, GPU_A, row1, GPU_F, row2, &b, fac1, row1);
if(stat != HIPBLAS_STATUS_SUCCESS){
cout << "Cublas sgemm failed." << endl;
exit(0);
}
}
else if(flag == 1){
hipLaunchKernelGGL(( cudaMinus), dim3(row1*col1/1024+1), dim3(1024), 0, 0, GPU_A, GPU_E, GPU_D, row1*col1);
while(flag2 == 0){
continue;
}
stat = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, row1, col2, col1, &alpha1, GPU_D, row1, GPU_F, row2, &b, fac1, row1);
if(stat != HIPBLAS_STATUS_SUCCESS){
cout << "Cublas sgemm failed." << endl;
exit(0);
}
}
while(flag3 == 0){
continue;
}
stat = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, row1, col2, col1, &alpha2, GPU_E, row1, GPU_B, row2, &b, fac2, row1);
if(stat != HIPBLAS_STATUS_SUCCESS){
cout << "Cublas sgemm failed." << endl;
exit(0);
}
hipblasDestroy(handle);
hipLaunchKernelGGL(( cudaTripletSum), dim3(row1*col2/1024+1024), dim3(1024), 0, 0, GPU_C, fac1, fac2, GPU_Z, row1*col2);
cudaStat = hipGetLastError();
if(cudaStat != hipSuccess){
cout << "Kernel launch failed." << endl;
exit(0);
}
}
__global__ void cudaConv(int flag, float *GPU_A, float *GPU_B, float *GPU_C, float *GPU_E, float *GPU_F, float *GPU_Z, int row1, int col1, int row2, int col2, int o_row, int o_col, int num){
int bid = blockIdx.x;
int tid = threadIdx.x;
int cur = bid*blockDim.x+tid;
if(cur >= num*o_row*o_col) return;
int num_cur = cur/(o_row*o_col);
int row_cur = cur%(o_row*o_col)/o_col;
int col_cur = cur%(o_row*o_col)%o_col;
float tem = 0;
for(int i = 0; i < row2; i++){
for(int j = 0; j < col2; j++){
tem += flag*GPU_E[num_cur*o_row*o_col*row2*col2+row_cur*o_col*row2*col2+col_cur*row2*col2+i*col2*j]*GPU_F[i*col2+j] + GPU_A[num_cur*row1*col1+(row_cur+i)*row1+col_cur+j] * GPU_F[i*col2+j] + GPU_E[num_cur*o_row*o_col*row2*col2+row_cur*o_col*row2*col2+col_cur*row2*col2+i*col2*j] * GPU_B[i*col2+j] + GPU_Z[i*col2+j];
}
}
GPU_C[num_cur*o_row*o_col+row_cur*o_col+col_cur] = tem;
}
void ConvTriplet::GPU_OP(int flag){
hipLaunchKernelGGL(( cudaConv), dim3(o_row*o_col*num/1024+1024), dim3(1024), 0, 0, flag, GPU_A, GPU_B, GPU_C, GPU_E, GPU_F, GPU_Z, row1, col1, row2, col2, o_row, o_row, num);
} | d135d7d8e596539ebfb244c53ee6f500b0604c14.cu | #include <cuda_runtime.h>
#include "cublas_v2.h"
#include "../include/ParSecureML.h"
void MallocD(float *&gpu_a, int size){
cudaError_t cudaStat;
cudaStat = cudaMalloc((void**)&gpu_a, sizeof(*gpu_a)*size);
if(cudaStat != cudaSuccess){
cout << "Malloc failed:" << cudaGetErrorString(cudaStat)<< endl;
exit(0);
}
}
void CopyHtoD(float *gpu_a, float *a, int size){
cudaError_t cudaStat;
cudaStat = cudaMemcpy(gpu_a, a, sizeof(*a)*size, cudaMemcpyHostToDevice);
if(cudaStat != cudaSuccess){
cout << "Error code:" << cudaStat << endl;
cout << "CopyHtoD failed." << endl;
exit(0);
}
}
void CopyDtoH(float *&a, float *&gpu_a, int size){
cudaError_t cudaStat;
cudaStat = cudaMemcpy(a, gpu_a, sizeof(*a)*size, cudaMemcpyDeviceToHost);
if(cudaStat != cudaSuccess){
cout << "Error code:" << cudaStat << endl;
cout << "CopyDtoH failed." << endl;
exit(0);
}
}
void Support::GPU_Mul(){
cublasStatus_t stat;
cublasHandle_t handle;
stat = cublasCreate(&handle);
if(stat != CUBLAS_STATUS_SUCCESS){
cout << "CUBLAS create failed." << endl;
exit(0);
}
float alpha = 1;
float b = 0;
stat = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, row1, col2, col1, &alpha, GPU_U, row1, GPU_V, row2, &b, GPU_Z, row1);
if(stat != CUBLAS_STATUS_SUCCESS){
cout << "Cublas sgemm failed." << endl;
exit(0);
}
}
void ReleaseGPU(float *A){
cudaFree(A);
}
__global__ void cudaTripletSum(float *sum, float *fac1, float *fac2, float *fac3, int size){
int bid = blockIdx.x;
int tid = threadIdx.x;
int cur = bid*blockDim.x+tid;
if(cur >= size) return;
float tmp = fac1[cur] + fac2[cur] + fac3[cur];
sum[cur] = tmp;
}
__global__ void cudaSum(float *A, float *B, float *sum, int size){
int bid = blockIdx.x;
int tid = threadIdx.x;
int cur = bid*blockDim.x+tid;
if(cur >= size) return;
float tmp = A[cur]+B[cur];
sum[cur] = tmp;
}
__global__ void cudaMinus(float *A, float *B, float *min, int size){
int bid = blockIdx.x;
int tid = threadIdx.x;
int cur = bid*blockDim.x+tid;
if(cur >= size) return;
float tmp = A[cur]-B[cur];
min[cur] = tmp;
}
void Triplet::cudaTripletMul(int flag){
cublasStatus_t stat;
cublasHandle_t handle;
cudaError_t cudaStat;
stat = cublasCreate(&handle);
if(stat != CUBLAS_STATUS_SUCCESS){
cout << "CUBLAS create failed." << endl;
exit(0);
}
float alpha1 = 1;
float alpha2 = 1;
float b = 0;
while(flag1 == 0){
continue;
}
if(flag == 0){
while(flag2 == 0){
continue;
}
stat = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, row1, col2, col1, &alpha1, GPU_A, row1, GPU_F, row2, &b, fac1, row1);
if(stat != CUBLAS_STATUS_SUCCESS){
cout << "Cublas sgemm failed." << endl;
exit(0);
}
}
else if(flag == 1){
cudaMinus<<<row1*col1/1024+1, 1024>>>(GPU_A, GPU_E, GPU_D, row1*col1);
while(flag2 == 0){
continue;
}
stat = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, row1, col2, col1, &alpha1, GPU_D, row1, GPU_F, row2, &b, fac1, row1);
if(stat != CUBLAS_STATUS_SUCCESS){
cout << "Cublas sgemm failed." << endl;
exit(0);
}
}
while(flag3 == 0){
continue;
}
stat = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, row1, col2, col1, &alpha2, GPU_E, row1, GPU_B, row2, &b, fac2, row1);
if(stat != CUBLAS_STATUS_SUCCESS){
cout << "Cublas sgemm failed." << endl;
exit(0);
}
cublasDestroy(handle);
cudaTripletSum<<<row1*col2/1024+1024, 1024>>>(GPU_C, fac1, fac2, GPU_Z, row1*col2);
cudaStat = cudaGetLastError();
if(cudaStat != cudaSuccess){
cout << "Kernel launch failed." << endl;
exit(0);
}
}
__global__ void cudaConv(int flag, float *GPU_A, float *GPU_B, float *GPU_C, float *GPU_E, float *GPU_F, float *GPU_Z, int row1, int col1, int row2, int col2, int o_row, int o_col, int num){
int bid = blockIdx.x;
int tid = threadIdx.x;
int cur = bid*blockDim.x+tid;
if(cur >= num*o_row*o_col) return;
int num_cur = cur/(o_row*o_col);
int row_cur = cur%(o_row*o_col)/o_col;
int col_cur = cur%(o_row*o_col)%o_col;
float tem = 0;
for(int i = 0; i < row2; i++){
for(int j = 0; j < col2; j++){
tem += flag*GPU_E[num_cur*o_row*o_col*row2*col2+row_cur*o_col*row2*col2+col_cur*row2*col2+i*col2*j]*GPU_F[i*col2+j] + GPU_A[num_cur*row1*col1+(row_cur+i)*row1+col_cur+j] * GPU_F[i*col2+j] + GPU_E[num_cur*o_row*o_col*row2*col2+row_cur*o_col*row2*col2+col_cur*row2*col2+i*col2*j] * GPU_B[i*col2+j] + GPU_Z[i*col2+j];
}
}
GPU_C[num_cur*o_row*o_col+row_cur*o_col+col_cur] = tem;
}
void ConvTriplet::GPU_OP(int flag){
cudaConv<<<o_row*o_col*num/1024+1024, 1024>>>(flag, GPU_A, GPU_B, GPU_C, GPU_E, GPU_F, GPU_Z, row1, col1, row2, col2, o_row, o_row, num);
} |
0008bedb89c12531542d0546fd22dd4d54395884.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "StiffnessMatrixGPU.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
StiffnessMatrixGPU::StiffnessMatrixGPU(double* mat, Geometry &geo, unsigned int n)
: StiffnessMatrixFirstOrder(mat, geo, n)
{
int device = -1;
hipGetDevice(&device);
// copy from the material matarix
//hipMallocManaged(&D_d, 6*sizeof(double));
//hipMemcpy(D_d, material->materialMatrix, 6*sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
Log::Logger().Info("StiffnessMatrixGPU created by CPU");
};
StiffnessMatrixGPU::~StiffnessMatrixGPU()
{
Log::Logger().Info("StiffnessMatrixGPU deleted by CPU");
//hipFree(D_d);
}
__global__ void constantCreatorKernel(int n, double* c, double* x, double* y, unsigned int* mesh, StiffnessMatrixGPU *s)
{
//printf("in the function\n blockDim.x = %d, gridDim.x = %d, blockIdx.x = %d\n", blockDim.x,gridDim.x, blockIdx.x);
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
//printf("i is %d stride is %d threadID = %d\n",i,stride,threadIdx.x);
s->constantCreator(i, c, x, y, mesh);
}
};
__global__ void StiffnessMatrixKernel(unsigned int n, unsigned int nip, double* in, unsigned int* ip, double* iw, double* c, double* D, unsigned int* mesh, double* k, unsigned int* i_index, unsigned int *j_index, unsigned int* dofFree, const double* thickness, StiffnessMatrixGPU *obj)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
obj->stiffnessMatrixCalculation(i, nip, in, ip, iw, c, D, mesh, k, i_index, j_index, dofFree, thickness);
}
}
Sparse& StiffnessMatrixGPU::GetStiffnessMatrix()
{
blockSize = 32;
//numberOfElements=33;
int numBlocks = (numberOfElements + blockSize-1)/blockSize;
hipLaunchKernelGGL(( constantCreatorKernel), dim3(numBlocks), dim3(blockSize), 0, 0, numberOfElements, c, geometry->get_x(), geometry->get_y(), geometry->get_mesh(), this);
hipDeviceSynchronize();
numBlocks = (simulationSize + blockSize-1)/blockSize;
Timer timer("Time spend in GPU: ");
hipLaunchKernelGGL(( StiffnessMatrixKernel), dim3(numBlocks), dim3(blockSize), 0, 0, numberOfElements, nipSquared, integrationNode, integrationPos, integrationWeight, c, material, geometry->get_mesh(), stiffMat->value, stiffMat->i, stiffMat->j , geometry->get_Dof().get_free(), geometry->get_thickness(),this);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipDeviceSynchronize();
return *stiffMat;
}
| 0008bedb89c12531542d0546fd22dd4d54395884.cu | #include "StiffnessMatrixGPU.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
StiffnessMatrixGPU::StiffnessMatrixGPU(double* mat, Geometry &geo, unsigned int n)
: StiffnessMatrixFirstOrder(mat, geo, n)
{
int device = -1;
cudaGetDevice(&device);
// copy from the material matarix
//cudaMallocManaged(&D_d, 6*sizeof(double));
//cudaMemcpy(D_d, material->materialMatrix, 6*sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
Log::Logger().Info("StiffnessMatrixGPU created by CPU");
};
StiffnessMatrixGPU::~StiffnessMatrixGPU()
{
Log::Logger().Info("StiffnessMatrixGPU deleted by CPU");
//cudaFree(D_d);
}
__global__ void constantCreatorKernel(int n, double* c, double* x, double* y, unsigned int* mesh, StiffnessMatrixGPU *s)
{
//printf("in the function\n blockDim.x = %d, gridDim.x = %d, blockIdx.x = %d\n", blockDim.x,gridDim.x, blockIdx.x);
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
//printf("i is %d stride is %d threadID = %d\n",i,stride,threadIdx.x);
s->constantCreator(i, c, x, y, mesh);
}
};
__global__ void StiffnessMatrixKernel(unsigned int n, unsigned int nip, double* in, unsigned int* ip, double* iw, double* c, double* D, unsigned int* mesh, double* k, unsigned int* i_index, unsigned int *j_index, unsigned int* dofFree, const double* thickness, StiffnessMatrixGPU *obj)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
obj->stiffnessMatrixCalculation(i, nip, in, ip, iw, c, D, mesh, k, i_index, j_index, dofFree, thickness);
}
}
Sparse& StiffnessMatrixGPU::GetStiffnessMatrix()
{
blockSize = 32;
//numberOfElements=33;
int numBlocks = (numberOfElements + blockSize-1)/blockSize;
constantCreatorKernel<<<numBlocks, blockSize>>>(numberOfElements, c, geometry->get_x(), geometry->get_y(), geometry->get_mesh(), this);
cudaDeviceSynchronize();
numBlocks = (simulationSize + blockSize-1)/blockSize;
Timer timer("Time spend in GPU: ");
StiffnessMatrixKernel<<<numBlocks, blockSize>>>(numberOfElements, nipSquared, integrationNode, integrationPos, integrationWeight, c, material, geometry->get_mesh(), stiffMat->value, stiffMat->i, stiffMat->j , geometry->get_Dof().get_free(), geometry->get_thickness(),this);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaDeviceSynchronize();
return *stiffMat;
}
|
38f1c700c9baca71de00ed49fabfccfdaf063062.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdio.h>
#define uint uint32_t
#define WST 8
#define WSU 8
#define WSV 8
#define WS (WST*WSU*WSV)
#define CELL_LENGTH 3
#define CELL_SIZE (CELL_LENGTH*CELL_LENGTH*CELL_LENGTH)
#define BLOCK_SIZE (WS*CELL_SIZE)
#define WLT (WST*CELL_LENGTH)
#define WLU (WSU*CELL_LENGTH)
#define WLV (WSV*CELL_LENGTH)
#define WS_MASK (WS-1)
#define TID_MASK (WST-1)
#define UID_MASK (WSU-1)
#define VID_MASK (WSV-1)
#include <stdint.h>
#define uint uint32_t
__device__ uint Rng4(uint4& state){
uint t=state.w;
t^= t << 11;
t^= t >> 8;
state.w=state.z; state.z=state.y; state.y=state.x;
t ^= state.x;
t ^= state.x>>19;
state.x=t;
return t;
}
__device__ int TUVToIndex(int t, int u, int v){
int index=0;
index += (t%3)<<9;
index += ((u%3)*3)<<9;
index += ((v%3)*9)<<9;
index += (t/3)+(u/3)*WST+(v/3)*WST*WSU;
return index;
}
__device__ void IndexToTUV(int index, int& t, int& u, int& v){
t=(index>>9)%3;
u=((index>>9)/3)%3;
v=((index>>9)/9);
t+= (index&0x7)*3;
u+= ((index>>3)&0x7)*3;
v+= ((index>>6)&0x7)*3;
}
__device__ int AddUnitToIndex(int unit, int index, int& OOB){
int dt = ((unit>>0)&0x1);
int du = ((unit>>1)&0x1);
int dv = ((unit>>2)&0x1);
int dw = (unit>>3)&0x1;
int t,u,v;
IndexToTUV(index, t,u,v);
t+= dt-dw;
u+= du-dw;
v+= dv-dw;
OOB = ((t+WLT)/WLT-1);
OOB |= ((u+WLU)/WLU-1);
OOB |= ((v+WLV)/WLV-1);
int newIndex = TUVToIndex(t,u,v);
return newIndex;
}
__device__ uint GPUValidateAddUnitVectors(int a, int b, int& c){
int valid;
if((a|b) != 0xf && (a&b))
return 0;
c = (((a|b)==0xf)?(a&b):(a|b));
valid = (c==0x3||c==0xc)?0:1;
return valid;
}
__device__ uint GPUAddUnitVectors(uint a, uint b){
return (((a|b)==0xf)?(a&b):(a|b));
}
__device__ void TransForw(char* lattice, int index, uint* trans, uint4& rngState){
int OOB;
int latSiteComplete = lattice[index];
if(!latSiteComplete) return;
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
int newIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSl=newSiteComp&0x40;
if(sl+newSl==0) return;
uint rand = Rng4(rngState);
int newBond1 = (trans[next/4]>>(4*(2*(next%4)+(rand&0x1))))&0xf;
int newBond2 = GPUAddUnitVectors((~newBond1)&0xf, next);
int temp = newBond1;
newBond1 = (rand&0x2)?newBond1:newBond2;
newBond2 = (rand&0x2)?newBond2:temp;
int destIndex = AddUnitToIndex(newBond1,index, OOB);
if(OOB) return;
int destSiteComp = lattice[destIndex];
if(destSiteComp) return;
int moveFirst;
if(sl+newSl==0x80){
moveFirst = (rand&0x4)>>2;
}
else if(sl)
moveFirst = 1;
else
moveFirst = 0;
// int t,u,v, tn, un, vn,td,ud,vd;
// IndexToTUV(index, t,u,v);
// IndexToTUV(newIndex,tn,un,vn);
// IndexToTUV(destIndex,td,ud,vd);
// printf("index=%i (%i,%i,%i) [%x], newIndex=%i (%i %i %i) [%x], destIndex=%i (%i %i %i) [%x]\n", index, t,u,v, latSiteComplete, newIndex, tn,un,vn, newSiteComp, destIndex, td,ud,vd, destSiteComp);
destSiteComp = newBond2;
if(moveFirst){
latSiteComplete = newBond1|((label>>1)&0x10);
destSiteComp |= label&0x10;
}
else{
latSiteComplete = newBond1|label|sl;
destSiteComp |= (newSiteComp&0x20)>>1;
newSiteComp = newSiteComp&0x1f;
}
// printf("index=%i (%i,%i,%i) [%x], newIndex=%i (%i %i %i) [%x], destIndex=%i (%i %i %i) [%x]: %x=>%x+%x, %x\n", index, t,u,v, latSiteComplete, newIndex, tn,un,vn, newSiteComp, destIndex, td,ud,vd, destSiteComp, next, newBond1, newBond2, (trans[next/4]>>(4*(2*(next%4))))&0xff);
lattice[index] = latSiteComplete;
lattice[destIndex] = destSiteComp;
if(!moveFirst)
lattice[newIndex] = newSiteComp;
}
__device__ void TransBack(char* lattice, int index, uint* trans, uint4& rngState){
int OOB;
int latSiteComplete = lattice[index];
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
if(!latSiteComplete) return;
int srcIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int srcSiteComp = lattice[srcIndex];
int srcNext = srcSiteComp&0xf;
int srcLabel= srcSiteComp&0x30;
int srcSl = srcSiteComp&0x40;
int newNext;
if(srcSl) return;
if(!GPUValidateAddUnitVectors(next, srcNext, newNext)) return;
int newIndex = AddUnitToIndex(newNext, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSiteSl = newSiteComp&0x40;
if(sl+newSiteSl == 0x80) return;
uint rand = Rng4(rngState);
int moveFirst;
if(sl+newSiteSl == 0x0){
moveFirst = rand&0x1;
}
else if(sl == 0x40)
moveFirst = 0;
else
moveFirst = 1;
if(moveFirst){
latSiteComplete = newNext|(label<<1)|srcLabel|0x40;
}
else{
latSiteComplete = newNext|label|sl;
newSiteComp = (newSiteComp&0x3f)|(srcLabel<<1)|0x40;
}
// printf("%x + %x -> %x\n", next, srcNext, newNext);
lattice[srcIndex]=0;
lattice[index] = latSiteComplete;
lattice[newIndex] = newSiteComp;
}
__device__ void DiffuseSL(char* lattice, int index){
int OOB;
int latSiteComplete = lattice[index];
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
if(!latSiteComplete) return;
int newIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSiteLabel = newSiteComp&0x30;
int newSiteSl = newSiteComp&0x40;
if(newSiteSl + sl != 0x40) return;
if(sl){
newSiteComp = newSiteComp | ((label&0x10)<<1) | 0x40;
latSiteComplete = next|((label>>1)&0x10);
}
else{
latSiteComplete = next|(label<<1)|((newSiteLabel>>1)&0x10)|0x40;
newSiteComp = newSiteComp&0x1f;
}
// if(!sl){
// int t,u,v, tn, un, vn;
// IndexToTUV(index, t,u,v);
// IndexToTUV(newIndex,tn,un,vn);
// printf("sl=%i, next=%x, index=%i (%i,%i,%i), newIndex=%i (%i %i %i)\n", sl, next, index, t,u,v, newIndex, tn,un,vn);
lattice[index] = latSiteComplete;
lattice[newIndex] = newSiteComp;
// }
}
__global__ void polmove(int nStep, uint4* seeds, char* srcLattice, char* dstLattice, uint* gTrans, int dtuv, int dtuv_next, uint NWT, uint NWU, uint NWV){
__shared__ char lattice[BLOCK_SIZE];
uint trans[4];
int lid = threadIdx.x;
int wid = blockIdx.x;
int gid = wid * blockDim.x + lid;
int widt = wid%NWT;
int widu = (wid/NWT)%NWU;
int widv = wid/(NWU*NWT);
uint4 rngl;
uint4 rngp;
uint site;
int dt = dtuv%WLT;
int du = (dtuv/WLT)%(WLU);
int dv = dtuv/(WLT*WLU);
int p=0;
int dtBlock=WLT-dt;
int duBlock=WLU-du;
int dvBlock=WLV-dv;
int pSwitchNext=dtBlock*duBlock*dvBlock;
int memOffSet=0;
// printf("pSwitchNext=%i\n", pSwitchNext);
int src;
for(src=lid*4; src<BLOCK_SIZE; src += 4*WS){
for(int i=0; i<4 && i+src<BLOCK_SIZE; i++){
while(i+src>=pSwitchNext){
memOffSet = pSwitchNext;
p++;
dtBlock = (p&0x1)?dt:(WLT-dt);
duBlock = (p&0x2)?du:(WLU-du);
dvBlock = (p&0x4)?dv:(WLV-dv);
pSwitchNext += dtBlock*duBlock*dvBlock;
}
int offSet = src+i-memOffSet;
int t = ((p&0x1)?(WLT-dt):0) + (offSet%dtBlock);
int u = ((p&0x2)?(WLU-du):0) + ((offSet/dtBlock)%duBlock);
int v = ((p&0x4)?(WLV-dv):0) + (offSet/(dtBlock*duBlock));
int index = TUVToIndex(t,u,v);
lattice[index]=srcLattice[src+i+wid*BLOCK_SIZE];
}
}
for(int i=0; i<4; i++) trans[i] = gTrans[i];
int indexStart = ((lid&0x1f)<<2)|((lid&0x60)>>5)|(lid&0x180);
rngp = seeds[gid*2];
rngl = seeds[gid*2+1];
__syncthreads();
for(int i=0; i<nStep; i++){
// site = indexStart | ((Rng4(rngl)%27)<<9);
// DiffuseSL(lattice, site); __syncthreads();
uint randLoc;
do {
randLoc = Rng4(rngl);
}while(randLoc>=4294574721); /// 8081*27^4, so that we are getting good random numbers.
// site = indexStart | ((Rng4(rngl)%27)<<9);
// TransForw(lattice, site, trans, rngp); __syncthreads();
//
// site = indexStart | ((Rng4(rngl)%27)<<9);
// DiffuseSL(lattice, site); __syncthreads();
//
// site = indexStart | ((Rng4(rngl)%27)<<9);
// TransForw(lattice, site, trans, rngp); __syncthreads();
//
// site = indexStart | ((Rng4(rngl)%27)<<9);
// TransBack(lattice, site, trans, rngp); __syncthreads();
site = indexStart | ((randLoc%27)<<9);
TransForw(lattice, site, trans, rngp); __syncthreads();
randLoc /= 27;
site = indexStart | ((randLoc%27)<<9);
DiffuseSL(lattice, site); __syncthreads();
randLoc /= 27;
site = indexStart | ((randLoc%27)<<9);
TransForw(lattice, site, trans, rngp); __syncthreads();
randLoc /= 27;
site = indexStart | ((randLoc%27)<<9);
TransBack(lattice, site, trans, rngp); __syncthreads();
}
dt = dtuv_next%WLT;
du = (dtuv_next/WLT)%(WLU);
dv = dtuv_next/(WLT*WLU);
memOffSet=0;
// printf("????\n");
for(int p=0; p<8; p++){
int dtBlock = (p&0x1)?dt:(WLT-dt);
int duBlock = (p&0x2)?du:(WLU-du);
int dvBlock = (p&0x4)?dv:(WLV-dv);
int dstWid = (widt+NWT-(((p>>0)&0x1)))%NWT;
dstWid += ((widu+NWU-(((p>>1)&0x1)))%NWU)*NWT;
dstWid += ((widv+NWV-(((p>>2)&0x1)))%NWV)*NWT*NWU;
// if(lid==0)
// printf("p=%i, wid=(%i,%i,%i), dstWid=(%i,%i,%i)=%i\n", p,widt,widu,widv,(widt+NWT-(((p>>0)&0x1)))%NWT, ((widu+NWU-(((p>>1)&0x1)))%NWU), ((widv+NWV-(((p>>2)&0x1)))%NWV), dstWid);
// if(lid==0 && wid==0)
// printf("block=(%i,%i,%i), p=%i\n", dtBlock, duBlock, dvBlock, p);
for(int i=lid; i<dtBlock*duBlock*dvBlock; i+=WS){
int t = i%dtBlock + ((p&0x1)?0:dt);
int u = (i/dtBlock)%duBlock + ((p&0x2)?0:du);
int v = i/(dtBlock*duBlock) + ((p&0x4)?0:dv);
int dst = dstWid*BLOCK_SIZE+memOffSet+i;
int index = TUVToIndex(t, u, v);
// if(lid%55==0)
// printf("dstWid=%i,%i (p=%i), memOffSet=%i, i=%i, (%i,%i,%i)\n", dstWid, dst, p, memOffSet, i, t,u,v);
dstLattice[dst] = lattice[index];
}
memOffSet += dtBlock*duBlock*dvBlock;
}
seeds[gid*2]=rngp;
seeds[gid*2+1]=rngl;
__syncthreads();
}
| 38f1c700c9baca71de00ed49fabfccfdaf063062.cu | #include <stdint.h>
#include <stdio.h>
#define uint uint32_t
#define WST 8
#define WSU 8
#define WSV 8
#define WS (WST*WSU*WSV)
#define CELL_LENGTH 3
#define CELL_SIZE (CELL_LENGTH*CELL_LENGTH*CELL_LENGTH)
#define BLOCK_SIZE (WS*CELL_SIZE)
#define WLT (WST*CELL_LENGTH)
#define WLU (WSU*CELL_LENGTH)
#define WLV (WSV*CELL_LENGTH)
#define WS_MASK (WS-1)
#define TID_MASK (WST-1)
#define UID_MASK (WSU-1)
#define VID_MASK (WSV-1)
#include <stdint.h>
#define uint uint32_t
__device__ uint Rng4(uint4& state){
uint t=state.w;
t^= t << 11;
t^= t >> 8;
state.w=state.z; state.z=state.y; state.y=state.x;
t ^= state.x;
t ^= state.x>>19;
state.x=t;
return t;
}
__device__ int TUVToIndex(int t, int u, int v){
int index=0;
index += (t%3)<<9;
index += ((u%3)*3)<<9;
index += ((v%3)*9)<<9;
index += (t/3)+(u/3)*WST+(v/3)*WST*WSU;
return index;
}
__device__ void IndexToTUV(int index, int& t, int& u, int& v){
t=(index>>9)%3;
u=((index>>9)/3)%3;
v=((index>>9)/9);
t+= (index&0x7)*3;
u+= ((index>>3)&0x7)*3;
v+= ((index>>6)&0x7)*3;
}
__device__ int AddUnitToIndex(int unit, int index, int& OOB){
int dt = ((unit>>0)&0x1);
int du = ((unit>>1)&0x1);
int dv = ((unit>>2)&0x1);
int dw = (unit>>3)&0x1;
int t,u,v;
IndexToTUV(index, t,u,v);
t+= dt-dw;
u+= du-dw;
v+= dv-dw;
OOB = ((t+WLT)/WLT-1);
OOB |= ((u+WLU)/WLU-1);
OOB |= ((v+WLV)/WLV-1);
int newIndex = TUVToIndex(t,u,v);
return newIndex;
}
__device__ uint GPUValidateAddUnitVectors(int a, int b, int& c){
int valid;
if((a|b) != 0xf && (a&b))
return 0;
c = (((a|b)==0xf)?(a&b):(a|b));
valid = (c==0x3||c==0xc)?0:1;
return valid;
}
__device__ uint GPUAddUnitVectors(uint a, uint b){
return (((a|b)==0xf)?(a&b):(a|b));
}
__device__ void TransForw(char* lattice, int index, uint* trans, uint4& rngState){
int OOB;
int latSiteComplete = lattice[index];
if(!latSiteComplete) return;
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
int newIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSl=newSiteComp&0x40;
if(sl+newSl==0) return;
uint rand = Rng4(rngState);
int newBond1 = (trans[next/4]>>(4*(2*(next%4)+(rand&0x1))))&0xf;
int newBond2 = GPUAddUnitVectors((~newBond1)&0xf, next);
int temp = newBond1;
newBond1 = (rand&0x2)?newBond1:newBond2;
newBond2 = (rand&0x2)?newBond2:temp;
int destIndex = AddUnitToIndex(newBond1,index, OOB);
if(OOB) return;
int destSiteComp = lattice[destIndex];
if(destSiteComp) return;
int moveFirst;
if(sl+newSl==0x80){
moveFirst = (rand&0x4)>>2;
}
else if(sl)
moveFirst = 1;
else
moveFirst = 0;
// int t,u,v, tn, un, vn,td,ud,vd;
// IndexToTUV(index, t,u,v);
// IndexToTUV(newIndex,tn,un,vn);
// IndexToTUV(destIndex,td,ud,vd);
// printf("index=%i (%i,%i,%i) [%x], newIndex=%i (%i %i %i) [%x], destIndex=%i (%i %i %i) [%x]\n", index, t,u,v, latSiteComplete, newIndex, tn,un,vn, newSiteComp, destIndex, td,ud,vd, destSiteComp);
destSiteComp = newBond2;
if(moveFirst){
latSiteComplete = newBond1|((label>>1)&0x10);
destSiteComp |= label&0x10;
}
else{
latSiteComplete = newBond1|label|sl;
destSiteComp |= (newSiteComp&0x20)>>1;
newSiteComp = newSiteComp&0x1f;
}
// printf("index=%i (%i,%i,%i) [%x], newIndex=%i (%i %i %i) [%x], destIndex=%i (%i %i %i) [%x]: %x=>%x+%x, %x\n", index, t,u,v, latSiteComplete, newIndex, tn,un,vn, newSiteComp, destIndex, td,ud,vd, destSiteComp, next, newBond1, newBond2, (trans[next/4]>>(4*(2*(next%4))))&0xff);
lattice[index] = latSiteComplete;
lattice[destIndex] = destSiteComp;
if(!moveFirst)
lattice[newIndex] = newSiteComp;
}
__device__ void TransBack(char* lattice, int index, uint* trans, uint4& rngState){
int OOB;
int latSiteComplete = lattice[index];
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
if(!latSiteComplete) return;
int srcIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int srcSiteComp = lattice[srcIndex];
int srcNext = srcSiteComp&0xf;
int srcLabel= srcSiteComp&0x30;
int srcSl = srcSiteComp&0x40;
int newNext;
if(srcSl) return;
if(!GPUValidateAddUnitVectors(next, srcNext, newNext)) return;
int newIndex = AddUnitToIndex(newNext, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSiteSl = newSiteComp&0x40;
if(sl+newSiteSl == 0x80) return;
uint rand = Rng4(rngState);
int moveFirst;
if(sl+newSiteSl == 0x0){
moveFirst = rand&0x1;
}
else if(sl == 0x40)
moveFirst = 0;
else
moveFirst = 1;
if(moveFirst){
latSiteComplete = newNext|(label<<1)|srcLabel|0x40;
}
else{
latSiteComplete = newNext|label|sl;
newSiteComp = (newSiteComp&0x3f)|(srcLabel<<1)|0x40;
}
// printf("%x + %x -> %x\n", next, srcNext, newNext);
lattice[srcIndex]=0;
lattice[index] = latSiteComplete;
lattice[newIndex] = newSiteComp;
}
__device__ void DiffuseSL(char* lattice, int index){
int OOB;
int latSiteComplete = lattice[index];
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
if(!latSiteComplete) return;
int newIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSiteLabel = newSiteComp&0x30;
int newSiteSl = newSiteComp&0x40;
if(newSiteSl + sl != 0x40) return;
if(sl){
newSiteComp = newSiteComp | ((label&0x10)<<1) | 0x40;
latSiteComplete = next|((label>>1)&0x10);
}
else{
latSiteComplete = next|(label<<1)|((newSiteLabel>>1)&0x10)|0x40;
newSiteComp = newSiteComp&0x1f;
}
// if(!sl){
// int t,u,v, tn, un, vn;
// IndexToTUV(index, t,u,v);
// IndexToTUV(newIndex,tn,un,vn);
// printf("sl=%i, next=%x, index=%i (%i,%i,%i), newIndex=%i (%i %i %i)\n", sl, next, index, t,u,v, newIndex, tn,un,vn);
lattice[index] = latSiteComplete;
lattice[newIndex] = newSiteComp;
// }
}
__global__ void polmove(int nStep, uint4* seeds, char* srcLattice, char* dstLattice, uint* gTrans, int dtuv, int dtuv_next, uint NWT, uint NWU, uint NWV){
__shared__ char lattice[BLOCK_SIZE];
uint trans[4];
int lid = threadIdx.x;
int wid = blockIdx.x;
int gid = wid * blockDim.x + lid;
int widt = wid%NWT;
int widu = (wid/NWT)%NWU;
int widv = wid/(NWU*NWT);
uint4 rngl;
uint4 rngp;
uint site;
int dt = dtuv%WLT;
int du = (dtuv/WLT)%(WLU);
int dv = dtuv/(WLT*WLU);
int p=0;
int dtBlock=WLT-dt;
int duBlock=WLU-du;
int dvBlock=WLV-dv;
int pSwitchNext=dtBlock*duBlock*dvBlock;
int memOffSet=0;
// printf("pSwitchNext=%i\n", pSwitchNext);
int src;
for(src=lid*4; src<BLOCK_SIZE; src += 4*WS){
for(int i=0; i<4 && i+src<BLOCK_SIZE; i++){
while(i+src>=pSwitchNext){
memOffSet = pSwitchNext;
p++;
dtBlock = (p&0x1)?dt:(WLT-dt);
duBlock = (p&0x2)?du:(WLU-du);
dvBlock = (p&0x4)?dv:(WLV-dv);
pSwitchNext += dtBlock*duBlock*dvBlock;
}
int offSet = src+i-memOffSet;
int t = ((p&0x1)?(WLT-dt):0) + (offSet%dtBlock);
int u = ((p&0x2)?(WLU-du):0) + ((offSet/dtBlock)%duBlock);
int v = ((p&0x4)?(WLV-dv):0) + (offSet/(dtBlock*duBlock));
int index = TUVToIndex(t,u,v);
lattice[index]=srcLattice[src+i+wid*BLOCK_SIZE];
}
}
for(int i=0; i<4; i++) trans[i] = gTrans[i];
int indexStart = ((lid&0x1f)<<2)|((lid&0x60)>>5)|(lid&0x180);
rngp = seeds[gid*2];
rngl = seeds[gid*2+1];
__syncthreads();
for(int i=0; i<nStep; i++){
// site = indexStart | ((Rng4(rngl)%27)<<9);
// DiffuseSL(lattice, site); __syncthreads();
uint randLoc;
do {
randLoc = Rng4(rngl);
}while(randLoc>=4294574721); /// 8081*27^4, so that we are getting good random numbers.
// site = indexStart | ((Rng4(rngl)%27)<<9);
// TransForw(lattice, site, trans, rngp); __syncthreads();
//
// site = indexStart | ((Rng4(rngl)%27)<<9);
// DiffuseSL(lattice, site); __syncthreads();
//
// site = indexStart | ((Rng4(rngl)%27)<<9);
// TransForw(lattice, site, trans, rngp); __syncthreads();
//
// site = indexStart | ((Rng4(rngl)%27)<<9);
// TransBack(lattice, site, trans, rngp); __syncthreads();
site = indexStart | ((randLoc%27)<<9);
TransForw(lattice, site, trans, rngp); __syncthreads();
randLoc /= 27;
site = indexStart | ((randLoc%27)<<9);
DiffuseSL(lattice, site); __syncthreads();
randLoc /= 27;
site = indexStart | ((randLoc%27)<<9);
TransForw(lattice, site, trans, rngp); __syncthreads();
randLoc /= 27;
site = indexStart | ((randLoc%27)<<9);
TransBack(lattice, site, trans, rngp); __syncthreads();
}
dt = dtuv_next%WLT;
du = (dtuv_next/WLT)%(WLU);
dv = dtuv_next/(WLT*WLU);
memOffSet=0;
// printf("????\n");
for(int p=0; p<8; p++){
int dtBlock = (p&0x1)?dt:(WLT-dt);
int duBlock = (p&0x2)?du:(WLU-du);
int dvBlock = (p&0x4)?dv:(WLV-dv);
int dstWid = (widt+NWT-(((p>>0)&0x1)))%NWT;
dstWid += ((widu+NWU-(((p>>1)&0x1)))%NWU)*NWT;
dstWid += ((widv+NWV-(((p>>2)&0x1)))%NWV)*NWT*NWU;
// if(lid==0)
// printf("p=%i, wid=(%i,%i,%i), dstWid=(%i,%i,%i)=%i\n", p,widt,widu,widv,(widt+NWT-(((p>>0)&0x1)))%NWT, ((widu+NWU-(((p>>1)&0x1)))%NWU), ((widv+NWV-(((p>>2)&0x1)))%NWV), dstWid);
// if(lid==0 && wid==0)
// printf("block=(%i,%i,%i), p=%i\n", dtBlock, duBlock, dvBlock, p);
for(int i=lid; i<dtBlock*duBlock*dvBlock; i+=WS){
int t = i%dtBlock + ((p&0x1)?0:dt);
int u = (i/dtBlock)%duBlock + ((p&0x2)?0:du);
int v = i/(dtBlock*duBlock) + ((p&0x4)?0:dv);
int dst = dstWid*BLOCK_SIZE+memOffSet+i;
int index = TUVToIndex(t, u, v);
// if(lid%55==0)
// printf("dstWid=%i,%i (p=%i), memOffSet=%i, i=%i, (%i,%i,%i)\n", dstWid, dst, p, memOffSet, i, t,u,v);
dstLattice[dst] = lattice[index];
}
memOffSet += dtBlock*duBlock*dvBlock;
}
seeds[gid*2]=rngp;
seeds[gid*2+1]=rngl;
__syncthreads();
}
|
5276e5ced189eae0f76a378fda1d0e6d8b2cff34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S3_10.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6500849625341,0.00126772131045507,0.781700888482196,0.781376988692993,0.000172885285710852,0.485941322704491,0.00292324069927923,0.999998374780913,1.90738126423793e-08,1.87055315682450e-05,0.999771441127599,1.00714652032967,0.999996065405881,4.39569670678322e-05,0.349416217289088,10.2352513381449,139.498519949149};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.5163347146812,0.000250345530072526,0.000146653936785431,0.000402287403168208,0.264792221623256,0.156805136116351,0.192566798261608,4.94346962733006,0.0156132259343099,1.87269950357503,1095.06789437116,0.000319948172992241,0.212567611293609,0.0189488293272868,0.00423610167704355,4.08044134129332e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 5276e5ced189eae0f76a378fda1d0e6d8b2cff34.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S3_10.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6500849625341,0.00126772131045507,0.781700888482196,0.781376988692993,0.000172885285710852,0.485941322704491,0.00292324069927923,0.999998374780913,1.90738126423793e-08,1.87055315682450e-05,0.999771441127599,1.00714652032967,0.999996065405881,4.39569670678322e-05,0.349416217289088,10.2352513381449,139.498519949149};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.5163347146812,0.000250345530072526,0.000146653936785431,0.000402287403168208,0.264792221623256,0.156805136116351,0.192566798261608,4.94346962733006,0.0156132259343099,1.87269950357503,1095.06789437116,0.000319948172992241,0.212567611293609,0.0189488293272868,0.00423610167704355,4.08044134129332e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
71c6d84f55f389303625dcc5206097fe6fceb92b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
//#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
//#else
// #define BLOCK_SIZE 768
//#endif
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
static
__device__ void zsum_reduce( int n, int i, float* x )
{
__syncthreads();
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
//==============================================================================
__global__
void magma_slarfx_kernel( int m, float *v, float *tau,
float *c, int ldc, float *xnorm,
float *T, int it )
{
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
const int i = threadIdx.x;
//float *dc = c + (blockIdx.x-it-1) * ldc;
float *dc = c + (blockIdx.x) * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
v[j] = MAGMA_S_ONE;
}
else
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] );
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_S_CNJG(z__1);
}
}
}
//==============================================================================
__global__
void magma_strmv_kernel(const float *T, int ldt, float *t)
{
const int i = threadIdx.x;
T += i;
__shared__ float tlocal[ BLOCK_SIZE ];
float res = MAGMA_S_MAKE(0., 0.);
tlocal[i] = t[i];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[i] = res;
}
__global__
void magma_strmv_kernel2(const float *T, int ldt, float *t,
float *y, float *tau)
{
const int i = threadIdx.x;
T += blockIdx.x;
__shared__ float sum[ 128 ];
sum[i] = T[i*ldt]*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
__global__
void magma_strmv_tkernel(float *T, int ldt, float *t, float *y)
{
const int i = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ float sum[ 128 ];
sum[i] = MAGMA_S_CNJG(T[i])*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's slarf routine.
*/
extern "C" void
magma_slarfx_gpu(magma_int_t m, magma_int_t n, float *v, float *tau,
float *c, magma_int_t ldc, float *xnorm,
float *T, magma_int_t i, float *work )
{
magma_int_t N = n + i + 1;
if (i==0)
hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, c, ldc, xnorm, T+i*N, i);
else
hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, c, ldc, xnorm, work, i);
if (i > 0){
//magma_strmv_kernel<<< 1, i, 0, magma_stream >>>( T, N, T+i*N);
hipLaunchKernelGGL(( magma_strmv_kernel2), dim3(i), dim3(i), 0, magma_stream , T, N, work, T+i*N, tau);
}
}
//==============================================================================
| 71c6d84f55f389303625dcc5206097fe6fceb92b.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
//#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
//#else
// #define BLOCK_SIZE 768
//#endif
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
static
__device__ void zsum_reduce( int n, int i, float* x )
{
__syncthreads();
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
//==============================================================================
__global__
void magma_slarfx_kernel( int m, float *v, float *tau,
float *c, int ldc, float *xnorm,
float *T, int it )
{
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
const int i = threadIdx.x;
//float *dc = c + (blockIdx.x-it-1) * ldc;
float *dc = c + (blockIdx.x) * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
v[j] = MAGMA_S_ONE;
}
else
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] );
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_S_CNJG(z__1);
}
}
}
//==============================================================================
__global__
void magma_strmv_kernel(const float *T, int ldt, float *t)
{
const int i = threadIdx.x;
T += i;
__shared__ float tlocal[ BLOCK_SIZE ];
float res = MAGMA_S_MAKE(0., 0.);
tlocal[i] = t[i];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[i] = res;
}
__global__
void magma_strmv_kernel2(const float *T, int ldt, float *t,
float *y, float *tau)
{
const int i = threadIdx.x;
T += blockIdx.x;
__shared__ float sum[ 128 ];
sum[i] = T[i*ldt]*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
__global__
void magma_strmv_tkernel(float *T, int ldt, float *t, float *y)
{
const int i = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ float sum[ 128 ];
sum[i] = MAGMA_S_CNJG(T[i])*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's slarf routine.
*/
extern "C" void
magma_slarfx_gpu(magma_int_t m, magma_int_t n, float *v, float *tau,
float *c, magma_int_t ldc, float *xnorm,
float *T, magma_int_t i, float *work )
{
magma_int_t N = n + i + 1;
if (i==0)
magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm, T+i*N, i);
else
magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm, work, i);
if (i > 0){
//magma_strmv_kernel<<< 1, i, 0, magma_stream >>>( T, N, T+i*N);
magma_strmv_kernel2<<< i, i, 0, magma_stream >>>( T, N, work, T+i*N, tau);
}
}
//==============================================================================
|
10c79f14588f1c92af03a2b81e64e583efd4aeed.hip | // !!! This is a file automatically generated by hipify!!!
/* PAPI Multiple GPU example. This example is taken from the NVIDIA
* documentation (Copyright 1993-2013 NVIDIA Corporation) and has been
* adapted to show the use of CUPTI and PAPI in collecting event
* counters for multiple GPU contexts. PAPI Team (2015)
*/
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cupti.h>
#include <timer.h>
#include "papi.h"
#include "papi_test.h"
#if not defined PAPI
#undef PAPI
#endif
#if not defined CUPTI_ONLY
#undef CUPTI_ONLY
#endif
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
// //////////////////////////////////////////////////////////////////////////////
// Data configuration
// //////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
const int DATA_N = 48576 * 32;
#ifdef PAPI
const int MAX_NUM_EVENTS = 32;
#endif
#define CHECK_CU_ERROR(err, cufunc) \
if (err != hipSuccess) { printf ("Error %d for CUDA Driver API function '%s'\n", err, cufunc); return -1; }
#define CHECK_CUDA_ERROR(err) \
if (err != hipSuccess) { printf ("%s:%i Error %d for CUDA [%s]\n", __FILE__, __LINE__, err, hipGetErrorString(err) ); return -1; }
#define CHECK_CUPTI_ERROR(err, cuptifunc) \
if (err != CUPTI_SUCCESS) { const char *errStr; cuptiGetResultString(err, &errStr); \
printf ("%s:%i Error %d [%s] for CUPTI API function '%s'\n", __FILE__, __LINE__, err, errStr, cuptifunc); return -1; }
// //////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA SDK sample describing
// reduction optimization strategies
// //////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel( float *d_Result, float *d_Input, int N )
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for( int pos = tid; pos < N; pos += threadN )
sum += d_Input[pos];
d_Result[tid] = sum;
}
// //////////////////////////////////////////////////////////////////////////////
// Program main
// //////////////////////////////////////////////////////////////////////////////
int main( int argc, char **argv )
{
// Solver config
TGPUplan plan[MAX_GPU_COUNT];
// GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
hipCtx_t ctx[MAX_GPU_COUNT];
printf( "Starting simpleMultiGPU\n" );
// Report on the available CUDA devices
int computeCapabilityMajor = 0, computeCapabilityMinor = 0;
int runtimeVersion = 0, driverVersion = 0;
char deviceName[64];
hipDevice_t device[MAX_GPU_COUNT];
CHECK_CUDA_ERROR( hipGetDeviceCount( &GPU_N ) );
if( GPU_N > MAX_GPU_COUNT ) GPU_N = MAX_GPU_COUNT;
printf( "CUDA-capable device count: %i\n", GPU_N );
for ( i=0; i<GPU_N; i++ ) {
CHECK_CU_ERROR( hipDeviceGet( &device[i], i ), "hipDeviceGet" );
CHECK_CU_ERROR( hipDeviceGetName( deviceName, 64, device[i] ), "hipDeviceGetName" );
CHECK_CU_ERROR( hipDeviceComputeCapability( &computeCapabilityMajor, &computeCapabilityMinor, device[i] ), "hipDeviceComputeCapability" );
hipRuntimeGetVersion( &runtimeVersion );
hipDriverGetVersion( &driverVersion );
printf( "CUDA Device %d: %s : computeCapability %d.%d runtimeVersion %d.%d driverVersion %d.%d\n", i, deviceName, computeCapabilityMajor, computeCapabilityMinor, runtimeVersion/1000, (runtimeVersion%100)/10, driverVersion/1000, (driverVersion%100)/10 );
if ( computeCapabilityMajor < 2 ) {
printf( "CUDA Device %d compute capability is too low... will not add any more GPUs\n", i );
GPU_N = i;
break;
}
}
uint32_t cupti_linked_version;
cuptiGetVersion( &cupti_linked_version );
printf("CUPTI version: Compiled against version %d; Linked against version %d\n", CUPTI_API_VERSION, cupti_linked_version );
// create one context per device
for (i = 0; i < GPU_N; i++) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR( hipCtxCreate( &(ctx[i]), 0, device[i] ), "hipCtxCreate" );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
printf( "Generating input data...\n" );
// Subdividing input data across GPUs
// Get data sizes for each GPU
for( i = 0; i < GPU_N; i++ )
plan[i].dataN = DATA_N / GPU_N;
// Take into account "odd" data sizes
for( i = 0; i < DATA_N % GPU_N; i++ )
plan[i].dataN++;
// Assign data ranges to GPUs
gpuBase = 0;
for( i = 0; i < GPU_N; i++ ) {
plan[i].h_Sum = h_SumGPU + i; // point within h_SumGPU array
gpuBase += plan[i].dataN;
}
// Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUDA_ERROR( hipStreamCreate( &plan[i].stream ) );
CHECK_CUDA_ERROR( hipMalloc( ( void ** ) &plan[i].d_Data, plan[i].dataN * sizeof( float ) ) );
CHECK_CUDA_ERROR( hipMalloc( ( void ** ) &plan[i].d_Sum, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( hipHostMalloc( ( void ** ) &plan[i].h_Sum_from_device, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( hipHostMalloc( ( void ** ) &plan[i].h_Data, plan[i].dataN * sizeof( float ) ) );
for( j = 0; j < plan[i].dataN; j++ ) {
plan[i].h_Data[j] = ( float ) rand() / ( float ) RAND_MAX;
}
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
#ifdef CUPTI_ONLY
// char const *cuptiEventName = "elapsed_cycles_sm"; // "elapsed_cycles_sm" "inst_executed"; "inst_issued0";
// char const *cuptiEventName = "inst_executed"; // "elapsed_cycles_sm" "inst_executed"; "inst_issued0";
char const *cuptiEventName = "inst_per_warp"; // "elapsed_cycles_sm" "inst_executed"; "inst_issued0";
printf("Setup CUPTI counters internally for event '%s' (CUPTI_ONLY)\n", cuptiEventName);
CUpti_EventGroup eg[MAX_GPU_COUNT];
CUpti_EventID *myevent = (CUpti_EventID*) calloc(GPU_N, sizeof(CUpti_EventID)); // Make space for event ids.
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUPTI_ERROR(cuptiSetEventCollectionMode(ctx[i], CUPTI_EVENT_COLLECTION_MODE_KERNEL), "cuptiSetEventCollectionMode" );
CHECK_CUPTI_ERROR( cuptiEventGroupCreate( ctx[i], &eg[i], 0 ), "cuptiEventGroupCreate" );
cuptiEventGetIdFromName ( device[i], cuptiEventName, &myevent[i] );
printf("GPU %i %s=%u.\n", i, cuptiEventName, myevent[i]);
CHECK_CUPTI_ERROR( cuptiEventGroupAddEvent( eg[i], myevent[i] ), "cuptiEventGroupAddEvent" );
CHECK_CUPTI_ERROR( cuptiEventGroupEnable( eg[i] ), "cuptiEventGroupEnable" );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
#endif
#ifdef PAPI
printf("Setup PAPI counters internally (PAPI)\n");
int EventSet = PAPI_NULL;
int NUM_EVENTS = MAX_GPU_COUNT*MAX_NUM_EVENTS;
long long values[NUM_EVENTS];
int eventCount;
int retval, ee;
/* PAPI Initialization */
retval = PAPI_library_init( PAPI_VER_CURRENT );
if( retval != PAPI_VER_CURRENT ) fprintf( stderr, "PAPI_library_init failed\n" );
printf( "PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR( PAPI_VERSION ), PAPI_VERSION_MINOR( PAPI_VERSION ), PAPI_VERSION_REVISION( PAPI_VERSION ) );
retval = PAPI_create_eventset( &EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_create_eventset failed\n" );
// In this example measure events from each GPU
int numEventEndings = 3;
char const *EventEndings[] = {
"cuda:::metric:inst_per_warp",
"cuda:::event:inst_executed",
"cuda:::event:elapsed_cycles_sm"
};
// Add events at a GPU specific level ... eg cuda:::device:2:elapsed_cycles_sm
char *EventName[NUM_EVENTS];
char tmpEventName[50];
eventCount = 0;
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) ); // Set device
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUPTI_ERROR(cuptiSetEventCollectionMode(ctx[i], CUPTI_EVENT_COLLECTION_MODE_KERNEL), "cuptiSetEventCollectionMode" );
for ( ee=0; ee<numEventEndings; ee++ ) {
snprintf( tmpEventName, 50, "%s:device=%d\0", EventEndings[ee], i );
// printf( "Trying to add event %s to GPU %d in PAPI...", tmpEventName , i ); fflush(NULL);
retval = PAPI_add_named_event( EventSet, tmpEventName );
if (retval==PAPI_OK) {
printf( "Add event success: '%s' GPU %i\n", tmpEventName, i );
EventName[eventCount] = (char *)calloc( 50, sizeof(char) );
snprintf( EventName[eventCount], 50, "%s", tmpEventName );
eventCount++;
} else {
printf( "Add event failure: '%s' GPU %i error=%s\n", tmpEventName, i, PAPI_strerror(retval));
}
}
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// Start PAPI event measurement
retval = PAPI_start( EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_start failed\n" );
#endif
// Start timing and compute on GPU(s)
printf( "Computing with %d GPUs...\n", GPU_N );
StartTimer();
// Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++) {
// Set device
CHECK_CUDA_ERROR( hipSetDevice( i ));
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
// Copy input data from CPU
CHECK_CUDA_ERROR( hipMemcpyAsync( plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof( float ), hipMemcpyHostToDevice, plan[i].stream ) );
// Perform GPU computations
hipLaunchKernelGGL(( reduceKernel) , dim3(BLOCK_N), dim3(THREAD_N), 0, plan[i].stream , plan[i].d_Sum, plan[i].d_Data, plan[i].dataN );
if ( hipGetLastError() != hipSuccess ) { printf( "reduceKernel() execution failed (GPU %d).\n", i ); exit(EXIT_FAILURE); }
// Read back GPU results
CHECK_CUDA_ERROR( hipMemcpyAsync( plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N * sizeof( float ), hipMemcpyDeviceToHost, plan[i].stream ) );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// Process GPU results
printf( "Process GPU results on %d GPUs...\n", GPU_N );
for( i = 0; i < GPU_N; i++ ) {
float sum;
// Set device
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
// Wait for all operations to finish
hipStreamSynchronize( plan[i].stream );
// Finalize GPU reduction for current subvector
sum = 0;
for( j = 0; j < ACCUM_N; j++ ) {
sum += plan[i].h_Sum_from_device[j];
}
*( plan[i].h_Sum ) = ( float ) sum;
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
double gpuTime = GetTimer();
#ifdef CUPTI_ONLY
size_t size = 1024;
size_t sizeBytes = size*sizeof(uint64_t);
uint64_t buffer[size];
uint64_t tmp[size]; for (int jj=0; jj<1024; jj++) tmp[jj]=0;
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CU_ERROR( hipCtxSynchronize( ), "hipCtxSynchronize" );
CHECK_CUPTI_ERROR( cuptiEventGroupReadEvent ( eg[i], CUPTI_EVENT_READ_FLAG_NONE, myevent[i], &sizeBytes, &tmp[0] ), "cuptiEventGroupReadEvent" );
buffer[i] = tmp[0];
printf( "CUPTI %s device %d counterValue %u (on one domain, may need to be multiplied by num of domains)\n", cuptiEventName, i, buffer[i] );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
#endif
#ifdef PAPI
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CU_ERROR( hipCtxSynchronize( ), "hipCtxSynchronize" );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// retval = PAPI_read( EventSet, values );
// if( retval != PAPI_OK ) fprintf( stderr, "PAPI_read failed\n" );
// for( i = 0; i < eventCount; i++ )
// printf( "PAPI counterValue %12lld \t\t --> %s \n", values[i], EventName[i] );
// retval = PAPI_read( EventSet, values );
// if( retval != PAPI_OK ) fprintf( stderr, "PAPI_read failed\n" );
// for( i = 0; i < eventCount; i++ )
// printf( "PAPI counterValue %12lld \t\t --> %s \n", values[i], EventName[i] );
retval = PAPI_stop( EventSet, values );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_stop failed\n" );
for( i = 0; i < eventCount; i++ )
printf( "PAPI counterValue %12lld \t\t --> %s \n", values[i], EventName[i] );
retval = PAPI_cleanup_eventset( EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_cleanup_eventset failed\n" );
retval = PAPI_destroy_eventset( &EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_destroy_eventset failed\n" );
PAPI_shutdown();
#endif
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipHostFree( plan[i].h_Sum_from_device ) );
CHECK_CUDA_ERROR( hipFree( plan[i].d_Sum ) );
CHECK_CUDA_ERROR( hipFree( plan[i].d_Data ) );
// Shut down this GPU
CHECK_CUDA_ERROR( hipStreamDestroy( plan[i].stream ) );
}
sumGPU = 0;
for( i = 0; i < GPU_N; i++ ) {
sumGPU += h_SumGPU[i];
}
printf( " GPU Processing time: %f (ms)\n", gpuTime );
// Compute on Host CPU
printf( "Computing the same result with Host CPU...\n" );
StartTimer();
sumCPU = 0;
for( i = 0; i < GPU_N; i++ ) {
for( j = 0; j < plan[i].dataN; j++ ) {
sumCPU += plan[i].h_Data[j];
}
}
double cpuTime = GetTimer();
if (gpuTime > 0) {
printf( " CPU Processing time: %f (ms) (speedup %.2fX)\n", cpuTime, (cpuTime/gpuTime) );
} else {
printf( " CPU Processing time: %f (ms)\n", cpuTime);
}
// Compare GPU and CPU results
printf( "Comparing GPU and Host CPU results...\n" );
diff = fabs( sumCPU - sumGPU ) / fabs( sumCPU );
printf( " GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU );
printf( " Relative difference: %E \n", diff );
// Cleanup and shutdown
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CUDA_ERROR( hipHostFree( plan[i].h_Data ) );
hipDeviceReset();
}
#ifdef CUPTI_ONLY
free(myevent);
#endif
exit( ( diff < 1e-5 ) ? EXIT_SUCCESS : EXIT_FAILURE );
}
| 10c79f14588f1c92af03a2b81e64e583efd4aeed.cu | /* PAPI Multiple GPU example. This example is taken from the NVIDIA
* documentation (Copyright 1993-2013 NVIDIA Corporation) and has been
* adapted to show the use of CUPTI and PAPI in collecting event
* counters for multiple GPU contexts. PAPI Team (2015)
*/
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cupti.h>
#include <timer.h>
#include "papi.h"
#include "papi_test.h"
#if not defined PAPI
#undef PAPI
#endif
#if not defined CUPTI_ONLY
#undef CUPTI_ONLY
#endif
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
// //////////////////////////////////////////////////////////////////////////////
// Data configuration
// //////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
const int DATA_N = 48576 * 32;
#ifdef PAPI
const int MAX_NUM_EVENTS = 32;
#endif
#define CHECK_CU_ERROR(err, cufunc) \
if (err != CUDA_SUCCESS) { printf ("Error %d for CUDA Driver API function '%s'\n", err, cufunc); return -1; }
#define CHECK_CUDA_ERROR(err) \
if (err != cudaSuccess) { printf ("%s:%i Error %d for CUDA [%s]\n", __FILE__, __LINE__, err, cudaGetErrorString(err) ); return -1; }
#define CHECK_CUPTI_ERROR(err, cuptifunc) \
if (err != CUPTI_SUCCESS) { const char *errStr; cuptiGetResultString(err, &errStr); \
printf ("%s:%i Error %d [%s] for CUPTI API function '%s'\n", __FILE__, __LINE__, err, errStr, cuptifunc); return -1; }
// //////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA SDK sample describing
// reduction optimization strategies
// //////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel( float *d_Result, float *d_Input, int N )
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for( int pos = tid; pos < N; pos += threadN )
sum += d_Input[pos];
d_Result[tid] = sum;
}
// //////////////////////////////////////////////////////////////////////////////
// Program main
// //////////////////////////////////////////////////////////////////////////////
int main( int argc, char **argv )
{
// Solver config
TGPUplan plan[MAX_GPU_COUNT];
// GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
CUcontext ctx[MAX_GPU_COUNT];
printf( "Starting simpleMultiGPU\n" );
// Report on the available CUDA devices
int computeCapabilityMajor = 0, computeCapabilityMinor = 0;
int runtimeVersion = 0, driverVersion = 0;
char deviceName[64];
CUdevice device[MAX_GPU_COUNT];
CHECK_CUDA_ERROR( cudaGetDeviceCount( &GPU_N ) );
if( GPU_N > MAX_GPU_COUNT ) GPU_N = MAX_GPU_COUNT;
printf( "CUDA-capable device count: %i\n", GPU_N );
for ( i=0; i<GPU_N; i++ ) {
CHECK_CU_ERROR( cuDeviceGet( &device[i], i ), "cuDeviceGet" );
CHECK_CU_ERROR( cuDeviceGetName( deviceName, 64, device[i] ), "cuDeviceGetName" );
CHECK_CU_ERROR( cuDeviceComputeCapability( &computeCapabilityMajor, &computeCapabilityMinor, device[i] ), "cuDeviceComputeCapability" );
cudaRuntimeGetVersion( &runtimeVersion );
cudaDriverGetVersion( &driverVersion );
printf( "CUDA Device %d: %s : computeCapability %d.%d runtimeVersion %d.%d driverVersion %d.%d\n", i, deviceName, computeCapabilityMajor, computeCapabilityMinor, runtimeVersion/1000, (runtimeVersion%100)/10, driverVersion/1000, (driverVersion%100)/10 );
if ( computeCapabilityMajor < 2 ) {
printf( "CUDA Device %d compute capability is too low... will not add any more GPUs\n", i );
GPU_N = i;
break;
}
}
uint32_t cupti_linked_version;
cuptiGetVersion( &cupti_linked_version );
printf("CUPTI version: Compiled against version %d; Linked against version %d\n", CUPTI_API_VERSION, cupti_linked_version );
// create one context per device
for (i = 0; i < GPU_N; i++) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR( cuCtxCreate( &(ctx[i]), 0, device[i] ), "cuCtxCreate" );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
printf( "Generating input data...\n" );
// Subdividing input data across GPUs
// Get data sizes for each GPU
for( i = 0; i < GPU_N; i++ )
plan[i].dataN = DATA_N / GPU_N;
// Take into account "odd" data sizes
for( i = 0; i < DATA_N % GPU_N; i++ )
plan[i].dataN++;
// Assign data ranges to GPUs
gpuBase = 0;
for( i = 0; i < GPU_N; i++ ) {
plan[i].h_Sum = h_SumGPU + i; // point within h_SumGPU array
gpuBase += plan[i].dataN;
}
// Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUDA_ERROR( cudaStreamCreate( &plan[i].stream ) );
CHECK_CUDA_ERROR( cudaMalloc( ( void ** ) &plan[i].d_Data, plan[i].dataN * sizeof( float ) ) );
CHECK_CUDA_ERROR( cudaMalloc( ( void ** ) &plan[i].d_Sum, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( cudaMallocHost( ( void ** ) &plan[i].h_Sum_from_device, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( cudaMallocHost( ( void ** ) &plan[i].h_Data, plan[i].dataN * sizeof( float ) ) );
for( j = 0; j < plan[i].dataN; j++ ) {
plan[i].h_Data[j] = ( float ) rand() / ( float ) RAND_MAX;
}
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
#ifdef CUPTI_ONLY
// char const *cuptiEventName = "elapsed_cycles_sm"; // "elapsed_cycles_sm" "inst_executed"; "inst_issued0";
// char const *cuptiEventName = "inst_executed"; // "elapsed_cycles_sm" "inst_executed"; "inst_issued0";
char const *cuptiEventName = "inst_per_warp"; // "elapsed_cycles_sm" "inst_executed"; "inst_issued0";
printf("Setup CUPTI counters internally for event '%s' (CUPTI_ONLY)\n", cuptiEventName);
CUpti_EventGroup eg[MAX_GPU_COUNT];
CUpti_EventID *myevent = (CUpti_EventID*) calloc(GPU_N, sizeof(CUpti_EventID)); // Make space for event ids.
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUPTI_ERROR(cuptiSetEventCollectionMode(ctx[i], CUPTI_EVENT_COLLECTION_MODE_KERNEL), "cuptiSetEventCollectionMode" );
CHECK_CUPTI_ERROR( cuptiEventGroupCreate( ctx[i], &eg[i], 0 ), "cuptiEventGroupCreate" );
cuptiEventGetIdFromName ( device[i], cuptiEventName, &myevent[i] );
printf("GPU %i %s=%u.\n", i, cuptiEventName, myevent[i]);
CHECK_CUPTI_ERROR( cuptiEventGroupAddEvent( eg[i], myevent[i] ), "cuptiEventGroupAddEvent" );
CHECK_CUPTI_ERROR( cuptiEventGroupEnable( eg[i] ), "cuptiEventGroupEnable" );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
#endif
#ifdef PAPI
printf("Setup PAPI counters internally (PAPI)\n");
int EventSet = PAPI_NULL;
int NUM_EVENTS = MAX_GPU_COUNT*MAX_NUM_EVENTS;
long long values[NUM_EVENTS];
int eventCount;
int retval, ee;
/* PAPI Initialization */
retval = PAPI_library_init( PAPI_VER_CURRENT );
if( retval != PAPI_VER_CURRENT ) fprintf( stderr, "PAPI_library_init failed\n" );
printf( "PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR( PAPI_VERSION ), PAPI_VERSION_MINOR( PAPI_VERSION ), PAPI_VERSION_REVISION( PAPI_VERSION ) );
retval = PAPI_create_eventset( &EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_create_eventset failed\n" );
// In this example measure events from each GPU
int numEventEndings = 3;
char const *EventEndings[] = {
"cuda:::metric:inst_per_warp",
"cuda:::event:inst_executed",
"cuda:::event:elapsed_cycles_sm"
};
// Add events at a GPU specific level ... eg cuda:::device:2:elapsed_cycles_sm
char *EventName[NUM_EVENTS];
char tmpEventName[50];
eventCount = 0;
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) ); // Set device
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUPTI_ERROR(cuptiSetEventCollectionMode(ctx[i], CUPTI_EVENT_COLLECTION_MODE_KERNEL), "cuptiSetEventCollectionMode" );
for ( ee=0; ee<numEventEndings; ee++ ) {
snprintf( tmpEventName, 50, "%s:device=%d\0", EventEndings[ee], i );
// printf( "Trying to add event %s to GPU %d in PAPI...", tmpEventName , i ); fflush(NULL);
retval = PAPI_add_named_event( EventSet, tmpEventName );
if (retval==PAPI_OK) {
printf( "Add event success: '%s' GPU %i\n", tmpEventName, i );
EventName[eventCount] = (char *)calloc( 50, sizeof(char) );
snprintf( EventName[eventCount], 50, "%s", tmpEventName );
eventCount++;
} else {
printf( "Add event failure: '%s' GPU %i error=%s\n", tmpEventName, i, PAPI_strerror(retval));
}
}
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// Start PAPI event measurement
retval = PAPI_start( EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_start failed\n" );
#endif
// Start timing and compute on GPU(s)
printf( "Computing with %d GPUs...\n", GPU_N );
StartTimer();
// Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++) {
// Set device
CHECK_CUDA_ERROR( cudaSetDevice( i ));
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
// Copy input data from CPU
CHECK_CUDA_ERROR( cudaMemcpyAsync( plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof( float ), cudaMemcpyHostToDevice, plan[i].stream ) );
// Perform GPU computations
reduceKernel <<< BLOCK_N, THREAD_N, 0, plan[i].stream >>> ( plan[i].d_Sum, plan[i].d_Data, plan[i].dataN );
if ( cudaGetLastError() != cudaSuccess ) { printf( "reduceKernel() execution failed (GPU %d).\n", i ); exit(EXIT_FAILURE); }
// Read back GPU results
CHECK_CUDA_ERROR( cudaMemcpyAsync( plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N * sizeof( float ), cudaMemcpyDeviceToHost, plan[i].stream ) );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// Process GPU results
printf( "Process GPU results on %d GPUs...\n", GPU_N );
for( i = 0; i < GPU_N; i++ ) {
float sum;
// Set device
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
// Wait for all operations to finish
cudaStreamSynchronize( plan[i].stream );
// Finalize GPU reduction for current subvector
sum = 0;
for( j = 0; j < ACCUM_N; j++ ) {
sum += plan[i].h_Sum_from_device[j];
}
*( plan[i].h_Sum ) = ( float ) sum;
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
double gpuTime = GetTimer();
#ifdef CUPTI_ONLY
size_t size = 1024;
size_t sizeBytes = size*sizeof(uint64_t);
uint64_t buffer[size];
uint64_t tmp[size]; for (int jj=0; jj<1024; jj++) tmp[jj]=0;
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CU_ERROR( cuCtxSynchronize( ), "cuCtxSynchronize" );
CHECK_CUPTI_ERROR( cuptiEventGroupReadEvent ( eg[i], CUPTI_EVENT_READ_FLAG_NONE, myevent[i], &sizeBytes, &tmp[0] ), "cuptiEventGroupReadEvent" );
buffer[i] = tmp[0];
printf( "CUPTI %s device %d counterValue %u (on one domain, may need to be multiplied by num of domains)\n", cuptiEventName, i, buffer[i] );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
#endif
#ifdef PAPI
for ( i=0; i<GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CU_ERROR( cuCtxSynchronize( ), "cuCtxSynchronize" );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// retval = PAPI_read( EventSet, values );
// if( retval != PAPI_OK ) fprintf( stderr, "PAPI_read failed\n" );
// for( i = 0; i < eventCount; i++ )
// printf( "PAPI counterValue %12lld \t\t --> %s \n", values[i], EventName[i] );
// retval = PAPI_read( EventSet, values );
// if( retval != PAPI_OK ) fprintf( stderr, "PAPI_read failed\n" );
// for( i = 0; i < eventCount; i++ )
// printf( "PAPI counterValue %12lld \t\t --> %s \n", values[i], EventName[i] );
retval = PAPI_stop( EventSet, values );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_stop failed\n" );
for( i = 0; i < eventCount; i++ )
printf( "PAPI counterValue %12lld \t\t --> %s \n", values[i], EventName[i] );
retval = PAPI_cleanup_eventset( EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_cleanup_eventset failed\n" );
retval = PAPI_destroy_eventset( &EventSet );
if( retval != PAPI_OK ) fprintf( stderr, "PAPI_destroy_eventset failed\n" );
PAPI_shutdown();
#endif
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaFreeHost( plan[i].h_Sum_from_device ) );
CHECK_CUDA_ERROR( cudaFree( plan[i].d_Sum ) );
CHECK_CUDA_ERROR( cudaFree( plan[i].d_Data ) );
// Shut down this GPU
CHECK_CUDA_ERROR( cudaStreamDestroy( plan[i].stream ) );
}
sumGPU = 0;
for( i = 0; i < GPU_N; i++ ) {
sumGPU += h_SumGPU[i];
}
printf( " GPU Processing time: %f (ms)\n", gpuTime );
// Compute on Host CPU
printf( "Computing the same result with Host CPU...\n" );
StartTimer();
sumCPU = 0;
for( i = 0; i < GPU_N; i++ ) {
for( j = 0; j < plan[i].dataN; j++ ) {
sumCPU += plan[i].h_Data[j];
}
}
double cpuTime = GetTimer();
if (gpuTime > 0) {
printf( " CPU Processing time: %f (ms) (speedup %.2fX)\n", cpuTime, (cpuTime/gpuTime) );
} else {
printf( " CPU Processing time: %f (ms)\n", cpuTime);
}
// Compare GPU and CPU results
printf( "Comparing GPU and Host CPU results...\n" );
diff = fabs( sumCPU - sumGPU ) / fabs( sumCPU );
printf( " GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU );
printf( " Relative difference: %E \n", diff );
// Cleanup and shutdown
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CUDA_ERROR( cudaFreeHost( plan[i].h_Data ) );
cudaDeviceReset();
}
#ifdef CUPTI_ONLY
free(myevent);
#endif
exit( ( diff < 1e-5 ) ? EXIT_SUCCESS : EXIT_FAILURE );
}
|
675ff6efb46d08131e528f1ec78d7edda71ae705.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cassert>
__device__
void cube(double* xi) {
*xi = (*xi) * (*xi) * (*xi);
}
__global__
void cube_kernel(double* x, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
cube(&x[tid]);
}
}
int main(int argc, char* argv[]) {
double* x = NULL;
int size = 100;
hipError_t cuda_status = hipSuccess;
cuda_status = hipMallocManaged(&x, sizeof(double) * size);
assert(cuda_status == hipSuccess);
for (int i = 0; i < size; i++) {
x[i] = 2.0;
}
int numThreads = ::min(32, size);
int numBlocks = static_cast<int>(ceil(((double) size)/((double) numThreads)));
hipLaunchKernelGGL(( cube_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, x, size);
cuda_status = hipDeviceSynchronize();
assert(cuda_status == hipSuccess);
double xsum = 0.0;
for (int i = 0; i < size; i++) {
xsum += x[i];
}
std::cout << "sum of elementwise cubed x is: " << xsum << std::endl;
if (xsum == 800.0) std::cout << "SUCCESS!" << std::endl;
else std::cout << "ERROR!" << std::endl;
return 0;
}
| 675ff6efb46d08131e528f1ec78d7edda71ae705.cu | #include <iostream>
#include <cassert>
__device__
void cube(double* xi) {
*xi = (*xi) * (*xi) * (*xi);
}
__global__
void cube_kernel(double* x, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
cube(&x[tid]);
}
}
int main(int argc, char* argv[]) {
double* x = NULL;
int size = 100;
cudaError_t cuda_status = cudaSuccess;
cuda_status = cudaMallocManaged(&x, sizeof(double) * size);
assert(cuda_status == cudaSuccess);
for (int i = 0; i < size; i++) {
x[i] = 2.0;
}
int numThreads = std::min(32, size);
int numBlocks = static_cast<int>(ceil(((double) size)/((double) numThreads)));
cube_kernel<<<numBlocks, numThreads>>>(x, size);
cuda_status = cudaDeviceSynchronize();
assert(cuda_status == cudaSuccess);
double xsum = 0.0;
for (int i = 0; i < size; i++) {
xsum += x[i];
}
std::cout << "sum of elementwise cubed x is: " << xsum << std::endl;
if (xsum == 800.0) std::cout << "SUCCESS!" << std::endl;
else std::cout << "ERROR!" << std::endl;
return 0;
}
|
c25c4431555de72512f9f180a5eb828127e836ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Filename: main.cu **************************************************************************** /
*
* INPUT:
* -Particulas.in:
* cantParticles
* type x y z Vx Vy Vz q ; where
* dt ; (x,y,z) = posicin respecto de algn (0,0,0)
* temp0 ; (Vx,Vy,Vz) = Velocidades iniciales
* tautp ; dt = delta_tiempo
* tempi ; q = carga
* ; temp0 = temperatura target
* ; tempi = temperatura inicial (No se usa an)
* ; tautp = factor de correccin de velocidades
*
*
*
* -TablaCoeficientesLennard
* type sigma epsilon mass min max ; donde min y max indican de qu valor
* ; a qu valor hay que densificar las muestras
* ; (NO ESTA IMPLEMENTADO AUN)
*
* ALGORITMO:
* 1-Levantar Coeficientes
* 2-Armar matriz de lennard para cant_samples_r muestras
* Para cada tipo de partcula:
* Calcular en funcion de los coeficientes el potencial para cant_samples_r valores r
* 3-Levantar partculas
* Ordenar y armar ndices
* Para cada iteracin de MD:
* 4-Calcular distancias:
* Cada partcula contra todas las otras
* Armar matriz de distancias
* 5-Calcular las derivadas respecto de r para cada par de partculas
* 6-Calcular fuerza para cada particula:
* Cada partcula contra todas las otras: matriz 3D
* Obtener fuerza resultante para cada partcula: vector 3D
* 7-Calcular nuevas posiciones: vector 3D
*
***************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <vector>
#include <algorithm>
#include <cmath>
#include <string>
#include <iomanip>
#include <sys/time.h>
/** **************************************************************** **/
/** ************* DEFAULT GLOBAL VARIABLES VALUES ****************** **/
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 16
#define BLOCK_SIZE (BLOCK_SIZE_X*BLOCK_SIZE_Y)
#define TEXTURE_MEM_SIZE 262000
#define DIF_FINITAS_DELTA 4
/** Variables fsicas **/
#define CANT_TYPES 37
#define MAx 45
#define MIn 0.001
#define DIST (MAx - MIn)
#define DELTA_TIEMPO 0.0001
#define TEMP 100
#define TAO 0.1
#define BOX_MAX 999 // distancia mxima del 0 para cada coordenada
// Determinamos un cubo de volumen = (2*BOX_MAX) ^3
/** Filenames **/
char* lennardTableFileName = "Input_Mache/TablaCoeficientesLennard";
char* particlesFileName = "Input_Mache/particles.in";
char* debugOutputFilename = "Output_Mache/debug.out";
char* outputFilename = "Output_Mache/results.out";
char* crdFilename = "Output_Mache/mdcrd";
char* timeFilename = "Output_Mache/times.out";
using namespace std;
// streamsize ss = cout.precision();
/** **************************************************************** **/
/** ******************** GLOBAL VARIABLES ************************** **/
texture <float, hipTextureType2D,hipReadModeElementType> texRef;
double delta_tiempo = DELTA_TIEMPO;
double temp0 = TEMP;
double tempi;
double tautp = TAO;
double Boltzmann_cte = 0.0019872041;
double box_max_x = BOX_MAX;
double box_max_y = BOX_MAX;
double box_max_z = BOX_MAX;
bool box = true;
double cut = 12;
int cant_steps = 1;
int cant_types = CANT_TYPES;
bool CPU=false;
bool derivative = false;
bool analytic = false;
bool results = false;
bool amberResults = false;
bool coordinates = false;
bool periodicity = false;
/** **************************************************************** **/
/** ************************* DEVICE ******************************* **/
/**
* RECIBE UN VALOR DE EPSILON Y SIGMA (e,s) Y EL ARREGLO CON TODOS LOS DEMAS VALORES (* EPS,* SIG)
* GUARDA EL POTENCIAL(EN LJ_POT) DE e,s VS TODOS LOS VALORES DE EPS Y SIG
*/
__global__
void lennard_Kernel(float* LJ_POT, double* EPS, double* SIG,
double e, double s, double var, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Variables */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
double r = (double) MIn+x*var;
/* Resultado */
LJ_POT[y*width +x] = (float) 4.0*eps12*( pow((sig12/r),12) - pow((sig12/r),6));
}
/** **************************************************************** **/
/**
* RECIBE UN VALOR DE EPSILON Y SIGMA (e,s) Y EL ARREGLO CON TODOS LOS DEMAS VALORES (* EPS,* SIG)
* GUARDA LA DERIVADA DEL POTENCIAL(EN dLJ_POT) DE e,s VS TODOS LOS VALORES DE EPS Y SIG
*/
__global__
void derivatives_lennard_Kernel(float* dLJ_POT, double* EPS, double* SIG,
double e, double s, double var, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Variables */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
double r = (double) MIn+x*var;
/* Resultado */
dLJ_POT[y*width +x] = (float) 24.0*eps12*( pow(sig12,6)/ pow(r,7) - 2 * pow(sig12,12)/ pow(r,13));
}
/** **************************************************************** **/
__global__
void close_distances_kernel(double* X, double* Y, double* Z, double* R,
double* position_x, double* position_y, double* position_z,
double box_x, double box_y, double box_z, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= width || j >= height) {return;}
unsigned int pos = j*width+i;
double _X = position_x[i] - position_x[j];
double _Y = position_y[i] - position_y[j];
double _Z = position_z[i] - position_z[j];
_X = _X - box_x * round((double) _X/box_x);
_Y = _Y - box_y * round((double) _Y/box_y);
_Z = _Z - box_z * round((double) _Z/box_z);
X[pos] = _X;
Y[pos] = _Y;
Z[pos] = _Z;
R[pos] = (double) sqrt( _X*_X + _Y*_Y + _Z*_Z );
}
/** **************************************************************** **/
__global__
void distances_kernel(double* R, double* X, double* Y, double* Z,
double* x1, double* y1, double* z1, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
double x_ = x1[x] - x1[y];
double y_ = y1[x] - y1[y];
double z_ = z1[x] - z1[y];
X[y*width+x] = x_;
Y[y*width+x] = y_;
Z[y*width+x] = z_;
R[y*width+x] = (double) sqrt( x_*x_ + y_*y_ + z_*z_ );
}
/** **************************************************************** **/
__global__
void derivative_E_r(double* dEr, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
/*
double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
*/
double E_r_up = (double) tex2D( texRef, index_x + DIF_FINITAS_DELTA, t_o_p_2 );
double E_r_dwn = (double) tex2D( texRef, index_x - DIF_FINITAS_DELTA, t_o_p_2 );
double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / cant_samples_r;
dEr[y*width+x] = (E_r_up - E_r_dwn) / (r_dif);
}
/** **************************************************************** **/
/*********************************************
* ESTA FUNCION RECIBE CALCULA dER A PARTIR DE LOS DATOS TABULADOS DE LA MATRIZ EN MEMORIA (dHLJPot)
* *****************************************/
void direct_derivative_E_r_MEMORY(float* dLJPot, double* dEr, double* r, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height, int x, int y)
{
/* Elemento de la matriz a calcular */
//unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
//unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
//if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
//float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
//float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + t_o_p_1; //this one decides which row on these
float posInicial=t_o_p_2 * cant_samples_r; //comienzo de la fila??
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
//float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
int index=0;
double r=r[y*width+x];
if(r> MAx)
dEr[y*width+x]=dLJPot[posInicial+cant_samples_r -1];
else
if(r<MIn)
dEr[y*width+x]=dLJPot[posInicial];
else
dEr[y*width+x]=dLJPot[posInicial+round((r-min)*(cant_samples_r/DIST))];
//dEr[y*width+x] = (double) tex2D( texRef, index_x, t_o_p_2 );
}
//******************************************************
__global__
void direct_derivative_E_r(double* dEr, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
/* double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
*/
dEr[y*width+x] = (double) tex2D( texRef, index_x, t_o_p_2 );
}
/** **************************************************************** **/
__global__
void E_r(double* Er, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y]; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x]; //this one decides which row on these
float row = t_o_p_2 + 0.5 + (t_o_p_1* cant_types);
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
/*
double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
*/
Er[y*width+x] = (double) tex2D( texRef, index_x, row );
}
/* ***************************************************************** **/
/** +ANALYTIC */
/** **************************************************************** **/
__global__
void derivative_E_r_analytic(double* dEr, double* r, double cut, int* item_to_type, int cant_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
dEr[y*width+x] = (double) 24.0*eps12*( pow(sig12,6)/ pow(r[y*width+x],7) - 2 * pow(sig12,12)/ pow(r[y*width+x],13));
}
__global__
void E_r_analytic(double* Er, double* r, double cut, int* item_to_type, int cant_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
Er[y*width+x] = (double) 4.0*eps12*( pow((sig12/r[y*width+x]),12) - pow((sig12/r[y*width+x]),6));
}
/** **************************************************************** **/
/** -ANALYTIC */
/* ***************************************************************** **/
/** **************************************************************** **/
/* Fx = dE(r) / dr * (x1-x2) / r */
__global__
void Parcial_Forces_Kernel(double* force, double* dEr, double* dif, double* r, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
if(x == y) {force[y*width+x] = 0; return;}
//force[y*width+x] = dEr[y*width+x] * dif[y*width+x] ;
force[y*width+x] = dEr[y*width+x] * dif[y*width+x] / r[y*width+x];
}
/** **************************************************************** **/
__global__
void Resultant_Forces_Kernel(double* result, double* forces, int cant)
{
/* Elemento del vector a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x >= cant) {return;}
int i = 0;
double tmp = 0;
int row = x*cant;
for(; i < cant; i++){
tmp += forces[row + i];
}
result[x] = tmp;
}
/** **************************************************************** **/
/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */
__global__
void Resultant_Velocities_Kernel(double* velocity, double* old_velocity, double* force, double* m,
int* item_to_type, double delta_tiempo, int cant_particles)
{
/* Elemento de la matriz a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant_particles) {return;}
double Vt = old_velocity[i];
int type = item_to_type[i];
double dtx = delta_tiempo*20.454999999999;
//double dtx=delta_tiempo;
/* Result */
velocity[i] = Vt + ( (force[i]*dtx) / m[type] );
}
/** **************************************************************** **/
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
__global__
void Resultant_Positions_Kernel(double* positions, double* velocity, double delta_tiempo, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant) {return;}
double dtx = delta_tiempo*20.454999999999;
//double dtx=delta_tiempo;
positions[i] = positions[i] + (velocity[i] * dtx);
}
/** **************************************************************** **/
/* -BOX_MAX 0 BOX_MAX */
/* |-----------------|-----------------| */
__global__
void Adjustin_Positions_Kernel(double* position, double box_max, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant) {return;}
double pos = position[i] - box_max;
if(pos > 0){
position[i] = -box_max + fmod(pos, (double) (2*box_max));
}
if(pos < -2*box_max){
position[i] = box_max + fmod(pos, (double) (2*box_max));
}
}
/** **************************************************************** **/
/* Ek = |v|^2 * m / 2 */
/* Ek_x = (v_x)^2 * m / 2 */
__global__
void Kinetic_Energy_Kernel(double* kE, double* vold, double* v, double* m, int* item_to_type, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
double vi = vold[i] + v[i];
// double vi=v[i];
int type = item_to_type[i];
// kE[i] = vi * vi * m[type] / 2;
kE[i] = vi * vi * m[type] / 8;
}
/** **************************************************************** **/
__global__
void Total_Kinetic_Energy_Kernel(double* kE, double* Ke_x, double* Ke_y, double* Ke_z, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
kE[i] = Ke_x[i] + Ke_y[i] + Ke_z[i];
}
/** **************************************************************** **/
__global__
void Corrected_Velocities_Kernel(double* vold, double* v, double lambda, int cant){
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
vold[i] = v[i];
//vold[i] = v[i] * lambda;
}
/** **************************************************************** **/
/** *************************** HOST ******************************* **/
int main( int argc, char* argv[] )
{
//hipSetDevice(1);
//PROCESO LOS PARAMETROS DE ENTRADA
for(uint i = 0; i < argc; i++){
if(strcmp(argv[i], "-t") == 0){
/* outputTimeFilename */
timeFilename = argv[i+1];
}
if(strcmp(argv[i], "-a") == 0){
/* ANALYTIC mode */
analytic = true;
}
if(strcmp(argv[i], "-d") == 0){
/* DERIVATIVE mode */
derivative = true;
}
if(strcmp(argv[i], "-r") == 0){
/* RESULTS or TIMER mode */
results = true;
amberResults = true;
}
if(strcmp(argv[i], "-ar") == 0){
/* RESULTS */
amberResults = true;
}
if(strcmp(argv[i], "-c") == 0){
/* PRINT mdcrd file */
coordinates = true;
}
if(strcmp(argv[i], "-p") == 0){
/* Periodicity */
periodicity = true;
}
if(strcmp(argv[i], "-cpu") == 0){
/* Periodicity */
CPU = true;
}
}
//IMPRIMO QUE CARAJO ESTOY EJECUTANDO
if (derivative)
cout << "Derivative" << endl;
if (analytic)
cout << "Analytic" << endl;
if(results){
cout << "DEBUG mode ON" << endl;
}
if(amberResults){
cout << "AMBER results ON" << endl;
}
//CONFIGURAR OUTPUT
fstream out;
fstream crd;
//if(results or amberResults){
/* Output file */
out.open(outputFilename,fstream::out);
streamsize ss = out.precision();
out << setprecision(20);
//}
if(coordinates){
/* CRD output file */
crd.open(crdFilename,fstream::out);
crd << setprecision(3);
crd.setf( std::ios::fixed, std:: ios::floatfield );
crd << " POS(x) POS(y) POS(z)" << endl;
}
struct timeval tv1, tv2;
fstream taim;
if(!results){ //timer mode ON
/* Time output file */
taim.open(timeFilename, fstream::app | fstream::out);
taim << setprecision(20);
}
/* Levantamos Coeficientes de Lennard */
ifstream table (lennardTableFileName);
table >> cant_types;
/**Variables y memoria*/
size_t cant_types_size = cant_types * sizeof(double);
vector<string> h_type;
h_type.resize(cant_types);
double* h_sigma = (double*) ( malloc(cant_types_size));
double* h_epsilon = (double*) ( malloc(cant_types_size));
double* h_mass = (double*) ( malloc(cant_types_size));
/**Levantamos datos*/
for(int j = 0; j<cant_types ; j++){
table >> h_type[j];
table >> h_sigma[j];
table >> h_epsilon[j];
table >> h_mass[j];
}
table.close();
// *****************
//VARIABLES PARA GUARDAR ENERGIA TOTAL
double diferencia, etotalX , etotinicial;
//******************************
/*******************************/
/*Armamos matrices de lennard */
/******************************/
/**Variables y memoria**/
int cant_samples_r = TEXTURE_MEM_SIZE/(sizeof(float)); // cant of original sample values (mximo permitido por mem de textura)
double var = DIST / ((double) cant_samples_r); // variation of r
size_t cant_samples_r_size = cant_samples_r * sizeof(float);
float* h_dLJPot;
float* h_LJPot;
if(derivative)
h_dLJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float)
else
h_LJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float)
int width = cant_samples_r;
int height = cant_types;
dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y);
dim3 dimGrid( (int) ceil((double)width / (double)dimBlock.x), (int) ceil((double)height / (double)dimBlock.y) );
double* d_EPS; //ARRAY PARA TODOS LOS VALORES DE EPSILON
double* d_SIG; //ARRAY PARA TODOS LOS VALORES DE SIGMA
float* d_LJPot;
float* d_dLJPot;
hipMalloc(&d_EPS, cant_types_size);
hipMalloc(&d_SIG, cant_types_size);
hipMemcpy(d_EPS, h_epsilon, cant_types_size, hipMemcpyHostToDevice);
hipMemcpy(d_SIG, h_sigma, cant_types_size, hipMemcpyHostToDevice);
if(derivative)
hipMalloc(&d_dLJPot, cant_samples_r_size * cant_types);
else
hipMalloc(&d_LJPot, cant_samples_r_size * cant_types);
/** Rellenamos datos con CUDA **/
//CANTIDAD TOTAL DE THREADS: EN X=cant_samples_r EN Y=cant_types
if(derivative) { //LLENO LA TEXTURA CON LAS DERIVADAS PARA CADA PAR DE TIPOS
for(int a = 0; a<cant_types; a++){
hipLaunchKernelGGL(( derivatives_lennard_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dLJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height);
hipMemcpy( (float*) &(h_dLJPot[(a*cant_samples_r*cant_types)]), d_dLJPot, cant_types * cant_samples_r_size, hipMemcpyDeviceToHost);
}
} else {
//LLENO LA TEXTURA CON LAS DERIVADAS DEL POTENCIAL PARA CADA PAR DE TIPOS
for(int a = 0; a<cant_types; a++){
hipLaunchKernelGGL(( lennard_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_LJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height);
hipMemcpy( (float*) &(h_LJPot[(a*cant_samples_r*cant_types)]), d_LJPot, cant_types * cant_samples_r_size, hipMemcpyDeviceToHost);
}
}
/** Liberamos memoria de CUDA **/
hipFree(&d_EPS);
hipFree(&d_SIG);
hipFree(&d_LJPot);
/** DEBUG **/
if(results){
if(derivative)
out << " derivative LENNARD " << endl;
else
out << " LENNARD " << endl;
for(int a = 0; a<cant_types; a++){
out << " Type = " << h_type[a] << endl << " ";
for(int i = 0; i<cant_types; i++){
for(int j = 0; j<cant_samples_r; j+= cant_samples_r/8){
if(derivative)
out << h_dLJPot[(a*cant_types*cant_samples_r)+(i*cant_samples_r)+j] << ", ";
else
out << h_LJPot[(a*cant_types*cant_samples_r)+(i*cant_samples_r)+j] << ", ";
}
out << endl << " ";
}
out << "***********************************************************************************" << endl;
}
}
/*Levantamos partculas*/
fstream particles;
particles.open(particlesFileName);
/** Variables y memoria **/
uint cant_particles;
double* h_position_x;
double* h_position_y;
double* h_position_z;
double* h_velocity_x;
double* h_velocity_y;
double* h_velocity_z;
double* h_velocity_old_x;
double* h_velocity_old_y;
double* h_velocity_old_z;
double* h_chargue;
double h_box_x;
double h_box_y;
double h_box_z;
double h_box_alpha;
double h_box_beta;
double h_box_gamma;
vector<string> h_particle_type;
particles >> cant_particles; //PRIMER LINEA DE particles.in ES EL NUMERO DE PARTICULAS QUE HAY
size_t cant_particles_size = cant_particles * sizeof(double);
h_position_x = (double*)malloc(cant_particles_size);
h_position_y = (double*)malloc(cant_particles_size);
h_position_z = (double*)malloc(cant_particles_size);
h_velocity_x = (double*)malloc(cant_particles_size);
h_velocity_y = (double*)malloc(cant_particles_size);
h_velocity_z = (double*)malloc(cant_particles_size);
h_velocity_old_x = (double*)malloc(cant_particles_size);
h_velocity_old_y = (double*)malloc(cant_particles_size);
h_velocity_old_z = (double*)malloc(cant_particles_size);
h_chargue = (double*)malloc(cant_particles_size);
h_particle_type.resize(cant_particles);
/** Guardamos datos en memoria : coordenadas, velocidades, tipos, cargas **/
for(uint i = 0; i < cant_particles ; i++) {
particles >> h_particle_type[i];
particles >> h_position_x[i];
particles >> h_position_y[i];
particles >> h_position_z[i];
particles >> h_velocity_old_x[i];
particles >> h_velocity_old_y[i];
particles >> h_velocity_old_z[i];
particles >> h_chargue[i];
}
/** Perioricidad **/
//TODO: por ahora usamos cubo,
//situamos el cero en el centro del mismo
//Recibimos en orden x, y, z
particles >> box;
if(box){
cout << " Levantamos caja" << endl;
particles >> h_box_x;
particles >> h_box_y;
particles >> h_box_z;
particles >> h_box_alpha;
particles >> h_box_beta;
particles >> h_box_gamma;
if( h_box_alpha != 90 or h_box_beta != 90 or h_box_gamma != 90){
cout << " Se forzaron los angulos para que sea un CUBO: " << endl;
}
box_max_x = h_box_x/2;
box_max_y = h_box_y/2;
box_max_z = h_box_z/2;
}
/** Parametros **/
particles >> cant_steps;
particles >> delta_tiempo;
particles >> temp0;
particles >> tempi;
particles >> tautp;
particles >> cut;
particles.close();
// if(results){
// /** DEBUG **/
// out << " INITIAL VALUES" << endl;
// for(int i = 0; i<cant_particles; i++){
// out << " Type: " << h_particle_type[i] << " | Pos: (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")";
// out << " | Vel: (" << h_velocity_old_x[i] << " , " << h_velocity_old_y[i] << " , " << h_velocity_old_z[i] << ")" << endl;
// }
// out << endl;
//
// /** DEBUG **/
// }
// if(results){
// /** DEBUG **/
// out << " CANT of TYPES" << endl;
// for(int i = 0; i < h_type.size(); i++){
// out << " " << h_type[i] << " " << cant_of_typ[i] << endl;
// }
// out << endl;
/** DEBUG **/
// }
/* Armamos estructura de items para saber de qu tipo
/* es la partcula en la que estamos en CUDA */
/** h_particle_type = H H H H H K K K K K O O O O O O O O O ... **/
/** h_item_particle = 1 1 1 1 1 3 3 3 3 3 9 9 9 9 9 9 9 9 9 ... **/
//ARMO UN ARRAY CON EL TIPO DE CADA PARTICULA
int * h_item_particle = (int*)malloc(cant_particles * sizeof(int));
int * d_item_particle;
hipMalloc(&d_item_particle, cant_particles * sizeof(int));
/** Convertimos anotamos type de la partcula como un int que sera el index dentro de h_type **/
for(int i = 0; i< cant_particles; i++){
for(int j = 0; j< h_type.size(); j++){
if(h_type[j] == h_particle_type[i]){
h_item_particle[i] = j;
break;
}
}
}
hipMemcpy(d_item_particle, h_item_particle, cant_particles * sizeof(int), hipMemcpyHostToDevice);
// if(results){
// /** DEBUG **/
// out << " ITEM to TYPE" << endl;
// for(int i = 0; i < cant_particles; i++){
// out << " Particle[" << i << "] | Type: " << h_type[h_item_particle[i]] << " (index :" << h_item_particle[i] << ") " << endl;
// }
// out << endl;
// /** DEBUG **/
// }
/* ************************************************ */
/* MANEJO DE MEMORIA EN EL DISPOSITIVO GPU */
/* ************************************************ */
/** Variables **/
size_t s_size = cant_particles_size * cant_particles;
/** Positions **/
double* d_position_x;
double* d_position_y;
double* d_position_z;
hipMalloc(&d_position_x, cant_particles_size);
hipMalloc(&d_position_y, cant_particles_size);
hipMalloc(&d_position_z, cant_particles_size);
hipMemcpy(d_position_x, h_position_x, cant_particles_size, hipMemcpyHostToDevice);
hipMemcpy(d_position_y, h_position_y, cant_particles_size, hipMemcpyHostToDevice);
hipMemcpy(d_position_z, h_position_z, cant_particles_size, hipMemcpyHostToDevice);
/** Positions **/
double* d_pos_close_x;
double* d_pos_close_y;
double* d_pos_close_z;
hipMalloc(&d_pos_close_x, cant_particles_size);
hipMalloc(&d_pos_close_y, cant_particles_size);
hipMalloc(&d_pos_close_z, cant_particles_size);
/** Particle's mass **/
double* d_mass;
hipMalloc(&d_mass, cant_types_size);
hipMemcpy(d_mass, h_mass, cant_types_size, hipMemcpyHostToDevice);
/** Velocities **/
double* d_velocity_x;
double* d_velocity_y;
double* d_velocity_z;
double* d_velocity_old_x;
double* d_velocity_old_y;
double* d_velocity_old_z;
hipMalloc(&d_velocity_x, cant_particles_size);
hipMalloc(&d_velocity_y, cant_particles_size);
hipMalloc(&d_velocity_z, cant_particles_size);
hipMalloc(&d_velocity_old_x, cant_particles_size);
hipMalloc(&d_velocity_old_y, cant_particles_size);
hipMalloc(&d_velocity_old_z, cant_particles_size);
hipMemcpy(d_velocity_old_x, h_velocity_old_x, cant_particles_size, hipMemcpyHostToDevice);
hipMemcpy(d_velocity_old_y, h_velocity_old_y, cant_particles_size, hipMemcpyHostToDevice);
hipMemcpy(d_velocity_old_z, h_velocity_old_z, cant_particles_size, hipMemcpyHostToDevice);
/** Distances **/
double* d_distance_x;
double* d_distance_y;
double* d_distance_z;
double* d_distance_r;
hipMalloc(&d_distance_x, s_size);
hipMalloc(&d_distance_y, s_size);
hipMalloc(&d_distance_z, s_size);
hipMalloc(&d_distance_r, s_size);
/** Derivatives **/
double* d_dEr;
hipMalloc(&d_dEr, s_size);
/** VDWAALS **/
double* d_Er;
hipMalloc(&d_Er, s_size);
/** Forces **/
double* d_Force_x;
double* d_Force_y;
double* d_Force_z;
hipMalloc(&d_Force_x, s_size);
hipMalloc(&d_Force_y, s_size);
hipMalloc(&d_Force_z, s_size);
double* d_Force_x_resultant;
double* d_Force_y_resultant;
double* d_Force_z_resultant;
hipMalloc(&d_Force_x_resultant, cant_particles_size);
hipMalloc(&d_Force_y_resultant, cant_particles_size);
hipMalloc(&d_Force_z_resultant, cant_particles_size);
/** Kinetic Energy **/
double* d_kinetic_energy;
double* d_kinetic_energy_x;
double* d_kinetic_energy_y;
double* d_kinetic_energy_z;
hipMalloc(&d_kinetic_energy, cant_particles_size);
hipMalloc(&d_kinetic_energy_x, cant_particles_size);
hipMalloc(&d_kinetic_energy_y, cant_particles_size);
hipMalloc(&d_kinetic_energy_z, cant_particles_size);
/* ************************************************ */
/* MANEJO DE MEMORIA EN EL HOST */
/* ************************************************ */
/** Distances **/
double (*h_distance_x)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_y)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_z)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_r)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
/** Forces **/
double (*h_Force_x)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_Force_y)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_Force_z)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double* h_Force_x_resultant = (double*)malloc(cant_particles_size);
double* h_Force_y_resultant = (double*)malloc(cant_particles_size);
double* h_Force_z_resultant = (double*)malloc(cant_particles_size);
/** Kinetic Energy **/
double* h_kinetic_energy = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_x = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_y = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_z = (double*)malloc(cant_particles_size);
/* ************************************************ */
/* Calculamos ENERGIA CINETICA deseada */
/* ************************************************ */
/* Ek = Kb * T (3N - Nc) / 2 */
double Nc = 5;
double factor_conv_T_Ek = 2 / (Boltzmann_cte * (3 *cant_particles - Nc) );
if(amberResults){
double kinetic_Energy = Boltzmann_cte * temp0 * (3*cant_particles - Nc) / 2;
/** DEBUG **/
out << " THEORETICAL VALUES:" << endl << endl;
out << " * Kb = " << Boltzmann_cte << endl << endl;
out << " * Temperature = " << temp0 << endl << endl;
out << " * Kinetic Energy = " << kinetic_Energy << endl << endl;
out << " * Factor_conv_T_Ek = " << factor_conv_T_Ek << endl << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Seteamos la memoria de TEXTURA */
/* ************************************************ */
hipArray* cuLennard_i;
// if(!analytic){
/** Usamos texturas **/
hipChannelFormatDesc channelDesc = hipCreateChannelDesc( 32, 0, 0, 0, hipChannelFormatKindFloat );
hipMallocArray(&cuLennard_i, &channelDesc, cant_samples_r, cant_types*cant_types); //width x height
texRef.addressMode[0] = hipAddressModeClamp;
//texRef.addressMode[0] = hipAddressModeBorder;
texRef.filterMode = hipFilterModeLinear; //hipFilterModePoint; // //Tipo de interpolacin
if(derivative) {
hipMemcpyToArray(cuLennard_i, 0, 0, h_dLJPot, cant_types * cant_types * cant_samples_r_size, hipMemcpyHostToDevice);
} else {
hipMemcpyToArray(cuLennard_i, 0, 0, h_LJPot, cant_types * cant_types * cant_samples_r_size, hipMemcpyHostToDevice);
}
/** Bindeamos la textura **/
hipBindTextureToArray(texRef, cuLennard_i, channelDesc);
// }
if(amberResults){
out << endl << " ESTARTIN DE PROGRAM" << endl;
out << " Amaunt of itereishons = " << cant_steps << endl << endl;
}
// for(int i=0 ; i<1000000 ; i++){
// for(int j=0 ; j<1000 ; j++){
//}
// }
/** Esperamos a que termine de bindear la textura **/
hipDeviceSynchronize();
if(!results){ //timer mode ON
/** Arrancamos medicion del tiempo **/
gettimeofday(&tv1, NULL);
}
for(int step = 0; step < cant_steps; step++){
/* ********************************************************************************************************** */
/* ****************************************** INICIO Iteracion DM ******************************************* */
/* ********************************************************************************************************** */
if(amberResults){
out << "/* ************************************************************************************************ */" << endl;
out << "/* ************************************* INICIO Iteracion " << step << " ************************************ */" << endl;
out << "/* ************************************************************************************************ */" << endl;
}
dimBlock.x = BLOCK_SIZE_X;
dimBlock.y = BLOCK_SIZE_Y;
/* ************************************************ */
/* Calculamos Matriz de Distancias entre partculas */
/* ************************************************ */
/**Variables y memoria*/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
if(!periodicity){
hipLaunchKernelGGL(( distances_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_distance_r, d_distance_x, d_distance_y, d_distance_z,
d_position_x, d_position_y, d_position_z, width, height);
} else {
/**Rellenamos datos**/
hipLaunchKernelGGL(( close_distances_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_distance_x, d_distance_y, d_distance_z, d_distance_r,
d_position_x, d_position_y, d_position_z,
h_box_x, h_box_y, h_box_z, width, height);
}
//TRAIGO AL HOST LAS DISTANCIAS PORQUE LAS VOY A NECESITAR PARA HACER EL CALCULO DE dEr EN CPU
if (CPU)
hipMemcpy(h_distance_r, d_distance_r, s_size, hipMemcpyDeviceToHost);
//if(results){
/** DEBUG **/
/*hipMemcpy(h_distance_r, d_distance_r, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_distance_x, d_distance_x, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_distance_y, d_distance_y, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_distance_z, d_distance_z, s_size, hipMemcpyDeviceToHost);
if (step %10000 == 0){
out << " DISTANCES - R" << endl << " ";
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_distance_r[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
}*/
/*
out << " DISTANCES - X" << endl << " ";
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_distance_x[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
out << " DISTANCES - Y" << endl << " ";
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_distance_y[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
out << " DISTANCES - Z" << endl << " ";
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_distance_z[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
*/
/* double (*matriz)[cant_particles] = (double (*)[cant_particles]) h_distance_r;
for(int i = 0; i<cant_particles; i+= cant_particles){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j+= cant_particles){
out << matriz[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
*/
/** DEBUG **/
//}
if(CPU)
double (*h_dEr)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
/* ************************************************ */
/* Calculamos Derivadas */
/* ************************************************ */
/** Variables y memoria **/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
if(analytic){
hipLaunchKernelGGL(( derivative_E_r_analytic), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
if(CPU) //VERSION ANALITICA SOBRE CPU
derivative_E_r_analytic_MEMORY(h_dEr, h_distance_r, cut, h_item_particle, cant_samples_r, h_EPS, h_SIG, width, height);
// if(amberResults){
// /** Calculo la energia E(r) para debug **/
hipLaunchKernelGGL(( E_r_analytic), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
} else {
// /** Calculo de la derivada dE(r)/dr usando diferencias finitas **/
if(derivative){
//derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
if (CPU){
int x,y;
for (x=0;x<cant_particles;x++)
for(y=0;y<cant_particles;y++)
direct_derivative_E_r_MEMORY(h_dLJPot,h_dEr, h_distance_r,cut,h_item_particle, cant_samples_r,cant_types,width,height, x, y );
//mando los resultados a gpu
hipMemcpy( d_dEr,h_dEr, s_size, hipMemcpyHostToDevice);
}
else
hipLaunchKernelGGL(( direct_derivative_E_r), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
hipLaunchKernelGGL(( E_r_analytic), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
} else {
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
hipLaunchKernelGGL(( derivative_E_r), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
// if(amberResults){
// /** Calculo la energia E(r) para debug **/
hipLaunchKernelGGL(( E_r), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
// }
}
//
}
// if(amberResults){
//if(!derivative){
/** DEBUG **/
//out << " Lennard-Jones" << endl << " ";
double vdwaals = 0;
double (*h_Er)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
hipMemcpy(h_Er, d_Er, s_size, hipMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i++){
// out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
// out << h_Er[i][j] << "\t";
if(i<=j)
vdwaals += h_Er[i][j];
}
// out << endl << " ";
}
// out << endl;
if(step == 0)
etotinicial= vdwaals;
if(step % 10000 == 0){
etotalX=vdwaals;
// out << " STEP = " << step << endl;
// out << " VDWAALS = " << vdwaals << endl << endl;
}
free(h_Er);
/** DEBUG **/
// }
//}
if(results){
/** DEBUG **/
out << " DERIVATIVES" << endl << " ";
double (*h_dEr)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
hipMemcpy(h_dEr, d_dEr, s_size, hipMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i+= cant_particles/8){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j+= cant_particles/8){
out << h_dEr[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
free(h_dEr);
/** DEBUG **/
}
if(results){
/** DEBUG **/
hipMemcpy(h_velocity_old_x, d_velocity_old_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_old_y, d_velocity_old_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_old_z, d_velocity_old_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " OLD VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_velocity_old_x[i] << " , " << h_velocity_old_y[i] << " , " << h_velocity_old_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos FUERZAS resultantes */
/* ************************************************ */
/* Fx = dE(r) / dr * (x1-x2) / r *
* Fy = dE(r) / dr * (y1-y2) / r *
* Fz = dE(r) / dr * (z1-z2) / r */
/* Calculo de vectores parciales */
/**Variables y memoria*/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
/** Calculo del vector F **/
hipLaunchKernelGGL(( Parcial_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_x, d_dEr, d_distance_x, d_distance_r, width, height);
hipLaunchKernelGGL(( Parcial_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_y, d_dEr, d_distance_y, d_distance_r, width, height);
hipLaunchKernelGGL(( Parcial_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_z, d_dEr, d_distance_z, d_distance_r, width, height);
//if(results){
/** DEBUG **/
/*double fuerzaTot=0;
hipMemcpy(h_Force_x, d_Force_x, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_Force_y, d_Force_y, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_Force_z, d_Force_z, s_size, hipMemcpyDeviceToHost);
out << " FORCES" << endl << " ";
for(int i = 0; i<cant_particles; i++){
for(int j = 0; j<cant_particles; j++){
if(i<=j)
fuerzaTot+=h_Force_x[i][j] + h_Force_y[i][j] + h_Force_z[i][j];
out << h_Force_x[i][j] << "\n" << h_Force_y[i][j] << "\n" << h_Force_z[i][j] << "\n";
// out << "(" << h_Force_x[i][j] << " , " << h_Force_y[i][j] << " , " << h_Force_z[i][j] << ")\t";
}
out << endl << " ";
}
out << endl;
*/
/** DEBUG **/
//}
// out << "LA SUMA TOTAL DE FUERZAS ES: " << fuerzaTot << endl;
/* Calculo del vector F */
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
hipLaunchKernelGGL(( Resultant_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_x_resultant, d_Force_x, cant_particles);
hipLaunchKernelGGL(( Resultant_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_y_resultant, d_Force_y, cant_particles);
hipLaunchKernelGGL(( Resultant_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_z_resultant, d_Force_z, cant_particles);
// if(results){
/** DEBUG **/
hipMemcpy(h_Force_x_resultant, d_Force_x_resultant, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_Force_y_resultant, d_Force_y_resultant, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_Force_z_resultant, d_Force_z_resultant, cant_particles_size, hipMemcpyDeviceToHost);
//out << " RESULTANT FORCES" << endl;
for(int i = 0; i<cant_particles; i++){
out << h_Force_x_resultant[i] <<"\n" <<h_Force_y_resultant[i] << "\n" << h_Force_z_resultant[i] << endl;
//out << i+1 << ": (" << h_Force_x_resultant[i] << " , " << h_Force_y_resultant[i] << " , " << h_Force_z_resultant[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
// }
/* ************************************************ */
/* Calculamos VELOCIDADES Resultantes */
/* ************************************************ */
/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
//out << "dtx= " << delta_tiempo*20.455 << endl;
/** Piso las velocidades acumuladas al tiempo t con las nuevas de t+Dt */
hipLaunchKernelGGL(( Resultant_Velocities_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velocity_x, d_velocity_old_x, d_Force_x_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
hipLaunchKernelGGL(( Resultant_Velocities_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velocity_y, d_velocity_old_y, d_Force_y_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
hipLaunchKernelGGL(( Resultant_Velocities_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velocity_z, d_velocity_old_z, d_Force_z_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
if(results){
/** DEBUG **/
hipMemcpy(h_velocity_x, d_velocity_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_y, d_velocity_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_z, d_velocity_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " RESULTANT VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_velocity_x[i] << " , " << h_velocity_y[i] << " , " << h_velocity_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos POSICIONES Resultantes */
/* ************************************************ */
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
/* (TODO: ajustar condiciones de perioricidad */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
hipLaunchKernelGGL(( Resultant_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_x, d_velocity_x, delta_tiempo, cant_particles);
hipLaunchKernelGGL(( Resultant_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_y, d_velocity_y, delta_tiempo, cant_particles);
hipLaunchKernelGGL(( Resultant_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_z, d_velocity_z, delta_tiempo, cant_particles);
if(results){
/** DEBUG **/
hipMemcpy(h_position_x, d_position_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_position_y, d_position_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_position_z, d_position_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " RESULTANT POSITIONS" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_particle_type[i] << " (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
if(periodicity){
/* ************************************************ */
/* Calculamos POSICIONES con PERIORICIDAD */
/* ************************************************ */
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
hipLaunchKernelGGL(( Adjustin_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_x, box_max_x, cant_particles);
hipLaunchKernelGGL(( Adjustin_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_y, box_max_y, cant_particles);
hipLaunchKernelGGL(( Adjustin_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_z, box_max_z, cant_particles);
}
if(coordinates){
/** DEBUG **/
hipMemcpy(h_position_x, d_position_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_position_y, d_position_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_position_z, d_position_z, cant_particles_size, hipMemcpyDeviceToHost);
if(results){
out << " RESULTANT POSITIONS in the CUBE" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_particle_type[i] << " (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")" << endl;
}
out << endl;
}
for(int i = 0; i<cant_particles; i+=2){
crd << " " << h_position_x[i] << " " << h_position_y[i] << " " << h_position_z[i];
if(i+1 < cant_particles){
crd << " " << h_position_x[i+1] << " " << h_position_y[i+1] << " " << h_position_z[i+1] << endl;
} else
crd << endl;
}
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Ek de cada partcula */
/* ************************************************ */
/* Ek = |vp|^2 * m / 2 con vp = (vold+v)/2 */
/* Ek_x = (v_x)^2 * m / 2 */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Calculamos la energa cintica para las tres coordenadas de cada partcula **/
/** Puede hacerse directamente as, sin calcular mdulo por propiedades algebraicas **/
hipLaunchKernelGGL(( Kinetic_Energy_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_kinetic_energy_x, d_velocity_old_x, d_velocity_x, d_mass, d_item_particle, cant_particles);
hipLaunchKernelGGL(( Kinetic_Energy_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_kinetic_energy_y, d_velocity_old_y, d_velocity_y, d_mass, d_item_particle, cant_particles);
hipLaunchKernelGGL(( Kinetic_Energy_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_kinetic_energy_z, d_velocity_old_z, d_velocity_z, d_mass, d_item_particle, cant_particles);
if(results){
/** DEBUG **/
hipMemcpy(h_kinetic_energy_x, d_kinetic_energy_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_kinetic_energy_y, d_kinetic_energy_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_kinetic_energy_z, d_kinetic_energy_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " KINETIC ENERGY" << endl;
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
out << i+1 << ": (" << h_kinetic_energy_x[i] << " , " << h_kinetic_energy_y[i] << " , " << h_kinetic_energy_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Ek Resultante */
/* ************************************************ */
/* Ek_TOT = sum (Ek_i) */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Calculamos la Energa cintica total de cada partcula **/
hipLaunchKernelGGL(( Total_Kinetic_Energy_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_kinetic_energy, d_kinetic_energy_x, d_kinetic_energy_y, d_kinetic_energy_z, cant_particles);
/* */
/** Calculamos la Energa cintica total del sistema **/
hipMemcpy(h_kinetic_energy, d_kinetic_energy, cant_particles_size, hipMemcpyDeviceToHost);
double Ek_TOT = 0;
for(int i = 0; i<cant_particles; i++){
Ek_TOT += h_kinetic_energy[i];
}
if(results){
/** DEBUG **/
out << " KINETIC ENERGY" << endl;
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
out << " " << h_kinetic_energy[i] << endl;
}
out << endl;
/** DEBUG **/
}
//if(amberResults){
if(step==0)
etotinicial=etotinicial + Ek_TOT;
if (step %10000 == 0){
etotalX=etotalX + Ek_TOT;
diferencia= etotalX - etotinicial;
//out << " Total Kinetic Energy(t) = " << Ek_TOT << endl << endl;
//out << " Diferencia energia total= " << diferencia << endl;
}
// }
/* ************************************************ */
/* Calculamos Temperatura Resultante */
/* ************************************************ */
/* T(t) = 2*Ek_TOT / (Kb*(3N-Nc)) */
double Temp_TOT = Ek_TOT * factor_conv_T_Ek;
//if(amberResults){
/** DEBUG **/
if(step % 10000 == 0)
out << " Temp(t) = " << Temp_TOT << endl << endl;
/** DEBUG **/
// }
/* *********************************************** */
/* Calculamos Factor de Correccion */
/* *********************************************** */
/* lambda = sqrt( 1 + 2 * dt / tautp * (T/T(t) -1) ) */
double lambda = sqrt( 1 + delta_tiempo / tautp * (temp0/Temp_TOT -1) );
if(amberResults){
/** DEBUG **/
out << " lambda(t) = " << lambda << endl << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Velocidades Corregidas */
/* ************************************************ */
/* vi = lambda * vi */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Piso las velocidades acumuladas al tiempo t+Dt con las nuevas de t+Dt corregidas */
hipLaunchKernelGGL(( Corrected_Velocities_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velocity_old_x, d_velocity_x, lambda, cant_particles);
hipLaunchKernelGGL(( Corrected_Velocities_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velocity_old_y, d_velocity_y, lambda, cant_particles);
hipLaunchKernelGGL(( Corrected_Velocities_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velocity_old_z, d_velocity_z, lambda, cant_particles);
if(results){
/** DEBUG **/
hipMemcpy(h_velocity_x, d_velocity_old_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_y, d_velocity_old_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_z, d_velocity_old_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " CORRECTED RESULTANT VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i << ": (" << h_velocity_x[i] << " , " << h_velocity_y[i] << " , " << h_velocity_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
dimBlock.x = BLOCK_SIZE_X;
dimBlock.y = BLOCK_SIZE_Y;
/* ********************************************************************************************************** */
/* ******************************************* FIN Iteracion DM ********************************************* */
/* ********************************************************************************************************** */
}
if(!results){ //timer mode ON
gettimeofday(&tv2, NULL);
taim << cut << " " << (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec) << endl;
}
// if(!analytic){
/** Unbindeamos Textura y liberamos memoria **/
hipUnbindTexture(texRef);
hipFreeArray(cuLennard_i);
// }
if(results or amberResults){
out.close();
}
if(coordinates){
crd.close();
}
/* ************************************************ */
/* Liberamos memoria en Dispositivo */
/* ************************************************ */
hipFree(&d_item_particle);
/** Positions **/
hipFree(&d_position_x);
hipFree(&d_position_y);
hipFree(&d_position_z);
/** Distances **/
hipFree(&d_distance_x);
hipFree(&d_distance_y);
hipFree(&d_distance_z);
hipFree(&d_distance_r);
/** Particle's mass **/
hipFree(d_mass);
/** Velocities **/
hipFree(d_velocity_x);
hipFree(d_velocity_y);
hipFree(d_velocity_z);
/** Derivatives **/
hipFree(&d_dEr);
hipFree(&d_Er);
/** Forces **/
hipFree(&d_Force_x);
hipFree(&d_Force_y);
hipFree(&d_Force_z);
hipFree(d_Force_x_resultant);
hipFree(d_Force_y_resultant);
hipFree(d_Force_z_resultant);
/** Kinetic Energy **/
hipFree(d_kinetic_energy);
hipFree(d_kinetic_energy_x);
hipFree(d_kinetic_energy_y);
hipFree(d_kinetic_energy_z);
/* ************************************************ */
/* Liberamos memoria en Host */
/* ************************************************ */
free(h_sigma);
free(h_epsilon);
free(h_mass);
/** Matriz de Lennard Jones **/
if(derivative)
free(h_dLJPot);
else
free(h_LJPot);
free(h_item_particle);
/** Positions **/
free(h_position_x);
free(h_position_y);
free(h_position_z);
/** Distances **/
free(h_distance_x);
free(h_distance_y);
free(h_distance_z);
free(h_distance_r);
/** Velocities **/
free(h_velocity_x);
free(h_velocity_y);
free(h_velocity_z);
/** Chargue **/
free(h_chargue);
/** Forces **/
free(h_Force_x);
free(h_Force_y);
free(h_Force_z);
free(h_Force_x_resultant);
free(h_Force_y_resultant);
free(h_Force_z_resultant);
/** Kinetic Energy **/
free(h_kinetic_energy);
free(h_kinetic_energy_x);
free(h_kinetic_energy_y);
free(h_kinetic_energy_z);
return 0;
}
| c25c4431555de72512f9f180a5eb828127e836ab.cu | /* Filename: main.cu **************************************************************************** /
*
* INPUT:
* -Particulas.in:
* cantParticles
* type x y z Vx Vy Vz q ; where
* dt ; (x,y,z) = posición respecto de algún (0,0,0)
* temp0 ; (Vx,Vy,Vz) = Velocidades iniciales
* tautp ; dt = delta_tiempo
* tempi ; q = carga
* ; temp0 = temperatura target
* ; tempi = temperatura inicial (No se usa aún)
* ; tautp = factor de corrección de velocidades
*
*
*
* -TablaCoeficientesLennard
* type sigma epsilon mass min max ; donde min y max indican de qué valor
* ; a qué valor hay que densificar las muestras
* ; (NO ESTA IMPLEMENTADO AUN)
*
* ALGORITMO:
* 1-Levantar Coeficientes
* 2-Armar matriz de lennard para cant_samples_r muestras
* Para cada tipo de partícula:
* Calcular en funcion de los coeficientes el potencial para cant_samples_r valores r
* 3-Levantar partículas
* Ordenar y armar índices
* Para cada iteración de MD:
* 4-Calcular distancias:
* Cada partícula contra todas las otras
* Armar matriz de distancias
* 5-Calcular las derivadas respecto de r para cada par de partículas
* 6-Calcular fuerza para cada particula:
* Cada partícula contra todas las otras: matriz 3D
* Obtener fuerza resultante para cada partícula: vector 3D
* 7-Calcular nuevas posiciones: vector 3D
*
***************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <vector>
#include <algorithm>
#include <cmath>
#include <string>
#include <iomanip>
#include <sys/time.h>
/** **************************************************************** **/
/** ************* DEFAULT GLOBAL VARIABLES VALUES ****************** **/
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 16
#define BLOCK_SIZE (BLOCK_SIZE_X*BLOCK_SIZE_Y)
#define TEXTURE_MEM_SIZE 262000
#define DIF_FINITAS_DELTA 4
/** Variables físicas **/
#define CANT_TYPES 37
#define MAx 45
#define MIn 0.001
#define DIST (MAx - MIn)
#define DELTA_TIEMPO 0.0001
#define TEMP 100
#define TAO 0.1
#define BOX_MAX 999 // distancia máxima del 0 para cada coordenada
// Determinamos un cubo de volumen = (2*BOX_MAX) ^3
/** Filenames **/
char* lennardTableFileName = "Input_Mache/TablaCoeficientesLennard";
char* particlesFileName = "Input_Mache/particles.in";
char* debugOutputFilename = "Output_Mache/debug.out";
char* outputFilename = "Output_Mache/results.out";
char* crdFilename = "Output_Mache/mdcrd";
char* timeFilename = "Output_Mache/times.out";
using namespace std;
// streamsize ss = cout.precision();
/** **************************************************************** **/
/** ******************** GLOBAL VARIABLES ************************** **/
texture <float, cudaTextureType2D,cudaReadModeElementType> texRef;
double delta_tiempo = DELTA_TIEMPO;
double temp0 = TEMP;
double tempi;
double tautp = TAO;
double Boltzmann_cte = 0.0019872041;
double box_max_x = BOX_MAX;
double box_max_y = BOX_MAX;
double box_max_z = BOX_MAX;
bool box = true;
double cut = 12;
int cant_steps = 1;
int cant_types = CANT_TYPES;
bool CPU=false;
bool derivative = false;
bool analytic = false;
bool results = false;
bool amberResults = false;
bool coordinates = false;
bool periodicity = false;
/** **************************************************************** **/
/** ************************* DEVICE ******************************* **/
/**
* RECIBE UN VALOR DE EPSILON Y SIGMA (e,s) Y EL ARREGLO CON TODOS LOS DEMAS VALORES (* EPS,* SIG)
* GUARDA EL POTENCIAL(EN LJ_POT) DE e,s VS TODOS LOS VALORES DE EPS Y SIG
*/
__global__
void lennard_Kernel(float* LJ_POT, double* EPS, double* SIG,
double e, double s, double var, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Variables */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
double r = (double) MIn+x*var;
/* Resultado */
LJ_POT[y*width +x] = (float) 4.0*eps12*( pow((sig12/r),12) - pow((sig12/r),6));
}
/** **************************************************************** **/
/**
* RECIBE UN VALOR DE EPSILON Y SIGMA (e,s) Y EL ARREGLO CON TODOS LOS DEMAS VALORES (* EPS,* SIG)
* GUARDA LA DERIVADA DEL POTENCIAL(EN dLJ_POT) DE e,s VS TODOS LOS VALORES DE EPS Y SIG
*/
__global__
void derivatives_lennard_Kernel(float* dLJ_POT, double* EPS, double* SIG,
double e, double s, double var, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Variables */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
double r = (double) MIn+x*var;
/* Resultado */
dLJ_POT[y*width +x] = (float) 24.0*eps12*( pow(sig12,6)/ pow(r,7) - 2 * pow(sig12,12)/ pow(r,13));
}
/** **************************************************************** **/
__global__
void close_distances_kernel(double* X, double* Y, double* Z, double* R,
double* position_x, double* position_y, double* position_z,
double box_x, double box_y, double box_z, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= width || j >= height) {return;}
unsigned int pos = j*width+i;
double _X = position_x[i] - position_x[j];
double _Y = position_y[i] - position_y[j];
double _Z = position_z[i] - position_z[j];
_X = _X - box_x * round((double) _X/box_x);
_Y = _Y - box_y * round((double) _Y/box_y);
_Z = _Z - box_z * round((double) _Z/box_z);
X[pos] = _X;
Y[pos] = _Y;
Z[pos] = _Z;
R[pos] = (double) sqrt( _X*_X + _Y*_Y + _Z*_Z );
}
/** **************************************************************** **/
__global__
void distances_kernel(double* R, double* X, double* Y, double* Z,
double* x1, double* y1, double* z1, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
double x_ = x1[x] - x1[y];
double y_ = y1[x] - y1[y];
double z_ = z1[x] - z1[y];
X[y*width+x] = x_;
Y[y*width+x] = y_;
Z[y*width+x] = z_;
R[y*width+x] = (double) sqrt( x_*x_ + y_*y_ + z_*z_ );
}
/** **************************************************************** **/
__global__
void derivative_E_r(double* dEr, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
/*
double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
*/
double E_r_up = (double) tex2D( texRef, index_x + DIF_FINITAS_DELTA, t_o_p_2 );
double E_r_dwn = (double) tex2D( texRef, index_x - DIF_FINITAS_DELTA, t_o_p_2 );
double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / cant_samples_r;
dEr[y*width+x] = (E_r_up - E_r_dwn) / (r_dif);
}
/** **************************************************************** **/
/*********************************************
* ESTA FUNCION RECIBE CALCULA dER A PARTIR DE LOS DATOS TABULADOS DE LA MATRIZ EN MEMORIA (dHLJPot)
* *****************************************/
void direct_derivative_E_r_MEMORY(float* dLJPot, double* dEr, double* r, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height, int x, int y)
{
/* Elemento de la matriz a calcular */
//unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
//unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
//if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
//float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
//float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + t_o_p_1; //this one decides which row on these
float posInicial=t_o_p_2 * cant_samples_r; //comienzo de la fila??
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
//float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
int index=0;
double r=r[y*width+x];
if(r> MAx)
dEr[y*width+x]=dLJPot[posInicial+cant_samples_r -1];
else
if(r<MIn)
dEr[y*width+x]=dLJPot[posInicial];
else
dEr[y*width+x]=dLJPot[posInicial+round((r-min)*(cant_samples_r/DIST))];
//dEr[y*width+x] = (double) tex2D( texRef, index_x, t_o_p_2 );
}
//******************************************************
__global__
void direct_derivative_E_r(double* dEr, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
/* double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
*/
dEr[y*width+x] = (double) tex2D( texRef, index_x, t_o_p_2 );
}
/** **************************************************************** **/
__global__
void E_r(double* Er, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y]; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x]; //this one decides which row on these
float row = t_o_p_2 + 0.5 + (t_o_p_1* cant_types);
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
/*
double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
*/
Er[y*width+x] = (double) tex2D( texRef, index_x, row );
}
/* ***************************************************************** **/
/** +ANALYTIC */
/** **************************************************************** **/
__global__
void derivative_E_r_analytic(double* dEr, double* r, double cut, int* item_to_type, int cant_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
dEr[y*width+x] = (double) 24.0*eps12*( pow(sig12,6)/ pow(r[y*width+x],7) - 2 * pow(sig12,12)/ pow(r[y*width+x],13));
}
__global__
void E_r_analytic(double* Er, double* r, double cut, int* item_to_type, int cant_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
Er[y*width+x] = (double) 4.0*eps12*( pow((sig12/r[y*width+x]),12) - pow((sig12/r[y*width+x]),6));
}
/** **************************************************************** **/
/** -ANALYTIC */
/* ***************************************************************** **/
/** **************************************************************** **/
/* Fx = dE(r) / dr * (x1-x2) / r */
__global__
void Parcial_Forces_Kernel(double* force, double* dEr, double* dif, double* r, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
if(x == y) {force[y*width+x] = 0; return;}
//force[y*width+x] = dEr[y*width+x] * dif[y*width+x] ;
force[y*width+x] = dEr[y*width+x] * dif[y*width+x] / r[y*width+x];
}
/** **************************************************************** **/
__global__
void Resultant_Forces_Kernel(double* result, double* forces, int cant)
{
/* Elemento del vector a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x >= cant) {return;}
int i = 0;
double tmp = 0;
int row = x*cant;
for(; i < cant; i++){
tmp += forces[row + i];
}
result[x] = tmp;
}
/** **************************************************************** **/
/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */
__global__
void Resultant_Velocities_Kernel(double* velocity, double* old_velocity, double* force, double* m,
int* item_to_type, double delta_tiempo, int cant_particles)
{
/* Elemento de la matriz a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant_particles) {return;}
double Vt = old_velocity[i];
int type = item_to_type[i];
double dtx = delta_tiempo*20.454999999999;
//double dtx=delta_tiempo;
/* Result */
velocity[i] = Vt + ( (force[i]*dtx) / m[type] );
}
/** **************************************************************** **/
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
__global__
void Resultant_Positions_Kernel(double* positions, double* velocity, double delta_tiempo, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant) {return;}
double dtx = delta_tiempo*20.454999999999;
//double dtx=delta_tiempo;
positions[i] = positions[i] + (velocity[i] * dtx);
}
/** **************************************************************** **/
/* -BOX_MAX 0 BOX_MAX */
/* |-----------------|-----------------| */
__global__
void Adjustin_Positions_Kernel(double* position, double box_max, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant) {return;}
double pos = position[i] - box_max;
if(pos > 0){
position[i] = -box_max + fmod(pos, (double) (2*box_max));
}
if(pos < -2*box_max){
position[i] = box_max + fmod(pos, (double) (2*box_max));
}
}
/** **************************************************************** **/
/* Ek = |v|^2 * m / 2 */
/* Ek_x = (v_x)^2 * m / 2 */
__global__
void Kinetic_Energy_Kernel(double* kE, double* vold, double* v, double* m, int* item_to_type, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
double vi = vold[i] + v[i];
// double vi=v[i];
int type = item_to_type[i];
// kE[i] = vi * vi * m[type] / 2;
kE[i] = vi * vi * m[type] / 8;
}
/** **************************************************************** **/
__global__
void Total_Kinetic_Energy_Kernel(double* kE, double* Ke_x, double* Ke_y, double* Ke_z, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
kE[i] = Ke_x[i] + Ke_y[i] + Ke_z[i];
}
/** **************************************************************** **/
__global__
void Corrected_Velocities_Kernel(double* vold, double* v, double lambda, int cant){
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
vold[i] = v[i];
//vold[i] = v[i] * lambda;
}
/** **************************************************************** **/
/** *************************** HOST ******************************* **/
int main( int argc, char* argv[] )
{
//cudaSetDevice(1);
//PROCESO LOS PARAMETROS DE ENTRADA
for(uint i = 0; i < argc; i++){
if(strcmp(argv[i], "-t") == 0){
/* outputTimeFilename */
timeFilename = argv[i+1];
}
if(strcmp(argv[i], "-a") == 0){
/* ANALYTIC mode */
analytic = true;
}
if(strcmp(argv[i], "-d") == 0){
/* DERIVATIVE mode */
derivative = true;
}
if(strcmp(argv[i], "-r") == 0){
/* RESULTS or TIMER mode */
results = true;
amberResults = true;
}
if(strcmp(argv[i], "-ar") == 0){
/* RESULTS */
amberResults = true;
}
if(strcmp(argv[i], "-c") == 0){
/* PRINT mdcrd file */
coordinates = true;
}
if(strcmp(argv[i], "-p") == 0){
/* Periodicity */
periodicity = true;
}
if(strcmp(argv[i], "-cpu") == 0){
/* Periodicity */
CPU = true;
}
}
//IMPRIMO QUE CARAJO ESTOY EJECUTANDO
if (derivative)
cout << "Derivative" << endl;
if (analytic)
cout << "Analytic" << endl;
if(results){
cout << "DEBUG mode ON" << endl;
}
if(amberResults){
cout << "AMBER results ON" << endl;
}
//CONFIGURAR OUTPUT
fstream out;
fstream crd;
//if(results or amberResults){
/* Output file */
out.open(outputFilename,fstream::out);
streamsize ss = out.precision();
out << setprecision(20);
//}
if(coordinates){
/* CRD output file */
crd.open(crdFilename,fstream::out);
crd << setprecision(3);
crd.setf( std::ios::fixed, std:: ios::floatfield );
crd << " POS(x) POS(y) POS(z)" << endl;
}
struct timeval tv1, tv2;
fstream taim;
if(!results){ //timer mode ON
/* Time output file */
taim.open(timeFilename, fstream::app | fstream::out);
taim << setprecision(20);
}
/* Levantamos Coeficientes de Lennard */
ifstream table (lennardTableFileName);
table >> cant_types;
/**Variables y memoria*/
size_t cant_types_size = cant_types * sizeof(double);
vector<string> h_type;
h_type.resize(cant_types);
double* h_sigma = (double*) ( malloc(cant_types_size));
double* h_epsilon = (double*) ( malloc(cant_types_size));
double* h_mass = (double*) ( malloc(cant_types_size));
/**Levantamos datos*/
for(int j = 0; j<cant_types ; j++){
table >> h_type[j];
table >> h_sigma[j];
table >> h_epsilon[j];
table >> h_mass[j];
}
table.close();
// *****************
//VARIABLES PARA GUARDAR ENERGIA TOTAL
double diferencia, etotalX , etotinicial;
//******************************
/*******************************/
/*Armamos matrices de lennard */
/******************************/
/**Variables y memoria**/
int cant_samples_r = TEXTURE_MEM_SIZE/(sizeof(float)); // cant of original sample values (máximo permitido por mem de textura)
double var = DIST / ((double) cant_samples_r); // variation of r
size_t cant_samples_r_size = cant_samples_r * sizeof(float);
float* h_dLJPot;
float* h_LJPot;
if(derivative)
h_dLJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float)
else
h_LJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float)
int width = cant_samples_r;
int height = cant_types;
dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y);
dim3 dimGrid( (int) ceil((double)width / (double)dimBlock.x), (int) ceil((double)height / (double)dimBlock.y) );
double* d_EPS; //ARRAY PARA TODOS LOS VALORES DE EPSILON
double* d_SIG; //ARRAY PARA TODOS LOS VALORES DE SIGMA
float* d_LJPot;
float* d_dLJPot;
cudaMalloc(&d_EPS, cant_types_size);
cudaMalloc(&d_SIG, cant_types_size);
cudaMemcpy(d_EPS, h_epsilon, cant_types_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_SIG, h_sigma, cant_types_size, cudaMemcpyHostToDevice);
if(derivative)
cudaMalloc(&d_dLJPot, cant_samples_r_size * cant_types);
else
cudaMalloc(&d_LJPot, cant_samples_r_size * cant_types);
/** Rellenamos datos con CUDA **/
//CANTIDAD TOTAL DE THREADS: EN X=cant_samples_r EN Y=cant_types
if(derivative) { //LLENO LA TEXTURA CON LAS DERIVADAS PARA CADA PAR DE TIPOS
for(int a = 0; a<cant_types; a++){
derivatives_lennard_Kernel<<<dimGrid, dimBlock>>>(d_dLJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height);
cudaMemcpy( (float*) &(h_dLJPot[(a*cant_samples_r*cant_types)]), d_dLJPot, cant_types * cant_samples_r_size, cudaMemcpyDeviceToHost);
}
} else {
//LLENO LA TEXTURA CON LAS DERIVADAS DEL POTENCIAL PARA CADA PAR DE TIPOS
for(int a = 0; a<cant_types; a++){
lennard_Kernel<<<dimGrid, dimBlock>>>(d_LJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height);
cudaMemcpy( (float*) &(h_LJPot[(a*cant_samples_r*cant_types)]), d_LJPot, cant_types * cant_samples_r_size, cudaMemcpyDeviceToHost);
}
}
/** Liberamos memoria de CUDA **/
cudaFree(&d_EPS);
cudaFree(&d_SIG);
cudaFree(&d_LJPot);
/** DEBUG **/
if(results){
if(derivative)
out << " derivative LENNARD " << endl;
else
out << " LENNARD " << endl;
for(int a = 0; a<cant_types; a++){
out << " Type = " << h_type[a] << endl << " ";
for(int i = 0; i<cant_types; i++){
for(int j = 0; j<cant_samples_r; j+= cant_samples_r/8){
if(derivative)
out << h_dLJPot[(a*cant_types*cant_samples_r)+(i*cant_samples_r)+j] << ", ";
else
out << h_LJPot[(a*cant_types*cant_samples_r)+(i*cant_samples_r)+j] << ", ";
}
out << endl << " ";
}
out << "***********************************************************************************" << endl;
}
}
/*Levantamos partículas*/
fstream particles;
particles.open(particlesFileName);
/** Variables y memoria **/
uint cant_particles;
double* h_position_x;
double* h_position_y;
double* h_position_z;
double* h_velocity_x;
double* h_velocity_y;
double* h_velocity_z;
double* h_velocity_old_x;
double* h_velocity_old_y;
double* h_velocity_old_z;
double* h_chargue;
double h_box_x;
double h_box_y;
double h_box_z;
double h_box_alpha;
double h_box_beta;
double h_box_gamma;
vector<string> h_particle_type;
particles >> cant_particles; //PRIMER LINEA DE particles.in ES EL NUMERO DE PARTICULAS QUE HAY
size_t cant_particles_size = cant_particles * sizeof(double);
h_position_x = (double*)malloc(cant_particles_size);
h_position_y = (double*)malloc(cant_particles_size);
h_position_z = (double*)malloc(cant_particles_size);
h_velocity_x = (double*)malloc(cant_particles_size);
h_velocity_y = (double*)malloc(cant_particles_size);
h_velocity_z = (double*)malloc(cant_particles_size);
h_velocity_old_x = (double*)malloc(cant_particles_size);
h_velocity_old_y = (double*)malloc(cant_particles_size);
h_velocity_old_z = (double*)malloc(cant_particles_size);
h_chargue = (double*)malloc(cant_particles_size);
h_particle_type.resize(cant_particles);
/** Guardamos datos en memoria : coordenadas, velocidades, tipos, cargas **/
for(uint i = 0; i < cant_particles ; i++) {
particles >> h_particle_type[i];
particles >> h_position_x[i];
particles >> h_position_y[i];
particles >> h_position_z[i];
particles >> h_velocity_old_x[i];
particles >> h_velocity_old_y[i];
particles >> h_velocity_old_z[i];
particles >> h_chargue[i];
}
/** Perioricidad **/
//TODO: por ahora usamos cubo,
//situamos el cero en el centro del mismo
//Recibimos en orden x, y, z
particles >> box;
if(box){
cout << " Levantamos caja" << endl;
particles >> h_box_x;
particles >> h_box_y;
particles >> h_box_z;
particles >> h_box_alpha;
particles >> h_box_beta;
particles >> h_box_gamma;
if( h_box_alpha != 90 or h_box_beta != 90 or h_box_gamma != 90){
cout << " Se forzaron los angulos para que sea un CUBO: " << endl;
}
box_max_x = h_box_x/2;
box_max_y = h_box_y/2;
box_max_z = h_box_z/2;
}
/** Parametros **/
particles >> cant_steps;
particles >> delta_tiempo;
particles >> temp0;
particles >> tempi;
particles >> tautp;
particles >> cut;
particles.close();
// if(results){
// /** DEBUG **/
// out << " INITIAL VALUES" << endl;
// for(int i = 0; i<cant_particles; i++){
// out << " Type: " << h_particle_type[i] << " | Pos: (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")";
// out << " | Vel: (" << h_velocity_old_x[i] << " , " << h_velocity_old_y[i] << " , " << h_velocity_old_z[i] << ")" << endl;
// }
// out << endl;
//
// /** DEBUG **/
// }
// if(results){
// /** DEBUG **/
// out << " CANT of TYPES" << endl;
// for(int i = 0; i < h_type.size(); i++){
// out << " " << h_type[i] << " " << cant_of_typ[i] << endl;
// }
// out << endl;
/** DEBUG **/
// }
/* Armamos estructura de items para saber de qué tipo
/* es la partícula en la que estamos en CUDA */
/** h_particle_type = H H H H H K K K K K O O O O O O O O O ... **/
/** h_item_particle = 1 1 1 1 1 3 3 3 3 3 9 9 9 9 9 9 9 9 9 ... **/
//ARMO UN ARRAY CON EL TIPO DE CADA PARTICULA
int * h_item_particle = (int*)malloc(cant_particles * sizeof(int));
int * d_item_particle;
cudaMalloc(&d_item_particle, cant_particles * sizeof(int));
/** Convertimos anotamos type de la partícula como un int que sería el index dentro de h_type **/
for(int i = 0; i< cant_particles; i++){
for(int j = 0; j< h_type.size(); j++){
if(h_type[j] == h_particle_type[i]){
h_item_particle[i] = j;
break;
}
}
}
cudaMemcpy(d_item_particle, h_item_particle, cant_particles * sizeof(int), cudaMemcpyHostToDevice);
// if(results){
// /** DEBUG **/
// out << " ITEM to TYPE" << endl;
// for(int i = 0; i < cant_particles; i++){
// out << " Particle[" << i << "] | Type: " << h_type[h_item_particle[i]] << " (index :" << h_item_particle[i] << ") " << endl;
// }
// out << endl;
// /** DEBUG **/
// }
/* ************************************************ */
/* MANEJO DE MEMORIA EN EL DISPOSITIVO GPU */
/* ************************************************ */
/** Variables **/
size_t s_size = cant_particles_size * cant_particles;
/** Positions **/
double* d_position_x;
double* d_position_y;
double* d_position_z;
cudaMalloc(&d_position_x, cant_particles_size);
cudaMalloc(&d_position_y, cant_particles_size);
cudaMalloc(&d_position_z, cant_particles_size);
cudaMemcpy(d_position_x, h_position_x, cant_particles_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_position_y, h_position_y, cant_particles_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_position_z, h_position_z, cant_particles_size, cudaMemcpyHostToDevice);
/** Positions **/
double* d_pos_close_x;
double* d_pos_close_y;
double* d_pos_close_z;
cudaMalloc(&d_pos_close_x, cant_particles_size);
cudaMalloc(&d_pos_close_y, cant_particles_size);
cudaMalloc(&d_pos_close_z, cant_particles_size);
/** Particle's mass **/
double* d_mass;
cudaMalloc(&d_mass, cant_types_size);
cudaMemcpy(d_mass, h_mass, cant_types_size, cudaMemcpyHostToDevice);
/** Velocities **/
double* d_velocity_x;
double* d_velocity_y;
double* d_velocity_z;
double* d_velocity_old_x;
double* d_velocity_old_y;
double* d_velocity_old_z;
cudaMalloc(&d_velocity_x, cant_particles_size);
cudaMalloc(&d_velocity_y, cant_particles_size);
cudaMalloc(&d_velocity_z, cant_particles_size);
cudaMalloc(&d_velocity_old_x, cant_particles_size);
cudaMalloc(&d_velocity_old_y, cant_particles_size);
cudaMalloc(&d_velocity_old_z, cant_particles_size);
cudaMemcpy(d_velocity_old_x, h_velocity_old_x, cant_particles_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_velocity_old_y, h_velocity_old_y, cant_particles_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_velocity_old_z, h_velocity_old_z, cant_particles_size, cudaMemcpyHostToDevice);
/** Distances **/
double* d_distance_x;
double* d_distance_y;
double* d_distance_z;
double* d_distance_r;
cudaMalloc(&d_distance_x, s_size);
cudaMalloc(&d_distance_y, s_size);
cudaMalloc(&d_distance_z, s_size);
cudaMalloc(&d_distance_r, s_size);
/** Derivatives **/
double* d_dEr;
cudaMalloc(&d_dEr, s_size);
/** VDWAALS **/
double* d_Er;
cudaMalloc(&d_Er, s_size);
/** Forces **/
double* d_Force_x;
double* d_Force_y;
double* d_Force_z;
cudaMalloc(&d_Force_x, s_size);
cudaMalloc(&d_Force_y, s_size);
cudaMalloc(&d_Force_z, s_size);
double* d_Force_x_resultant;
double* d_Force_y_resultant;
double* d_Force_z_resultant;
cudaMalloc(&d_Force_x_resultant, cant_particles_size);
cudaMalloc(&d_Force_y_resultant, cant_particles_size);
cudaMalloc(&d_Force_z_resultant, cant_particles_size);
/** Kinetic Energy **/
double* d_kinetic_energy;
double* d_kinetic_energy_x;
double* d_kinetic_energy_y;
double* d_kinetic_energy_z;
cudaMalloc(&d_kinetic_energy, cant_particles_size);
cudaMalloc(&d_kinetic_energy_x, cant_particles_size);
cudaMalloc(&d_kinetic_energy_y, cant_particles_size);
cudaMalloc(&d_kinetic_energy_z, cant_particles_size);
/* ************************************************ */
/* MANEJO DE MEMORIA EN EL HOST */
/* ************************************************ */
/** Distances **/
double (*h_distance_x)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_y)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_z)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_r)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
/** Forces **/
double (*h_Force_x)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_Force_y)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_Force_z)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double* h_Force_x_resultant = (double*)malloc(cant_particles_size);
double* h_Force_y_resultant = (double*)malloc(cant_particles_size);
double* h_Force_z_resultant = (double*)malloc(cant_particles_size);
/** Kinetic Energy **/
double* h_kinetic_energy = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_x = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_y = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_z = (double*)malloc(cant_particles_size);
/* ************************************************ */
/* Calculamos ENERGIA CINETICA deseada */
/* ************************************************ */
/* Ek = Kb * T (3N - Nc) / 2 */
double Nc = 5;
double factor_conv_T_Ek = 2 / (Boltzmann_cte * (3 *cant_particles - Nc) );
if(amberResults){
double kinetic_Energy = Boltzmann_cte * temp0 * (3*cant_particles - Nc) / 2;
/** DEBUG **/
out << " THEORETICAL VALUES:" << endl << endl;
out << " * Kb = " << Boltzmann_cte << endl << endl;
out << " * Temperature = " << temp0 << endl << endl;
out << " * Kinetic Energy = " << kinetic_Energy << endl << endl;
out << " * Factor_conv_T_Ek = " << factor_conv_T_Ek << endl << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Seteamos la memoria de TEXTURA */
/* ************************************************ */
cudaArray* cuLennard_i;
// if(!analytic){
/** Usamos texturas **/
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc( 32, 0, 0, 0, cudaChannelFormatKindFloat );
cudaMallocArray(&cuLennard_i, &channelDesc, cant_samples_r, cant_types*cant_types); //width x height
texRef.addressMode[0] = cudaAddressModeClamp;
//texRef.addressMode[0] = cudaAddressModeBorder;
texRef.filterMode = cudaFilterModeLinear; //cudaFilterModePoint; // //Tipo de interpolación
if(derivative) {
cudaMemcpyToArray(cuLennard_i, 0, 0, h_dLJPot, cant_types * cant_types * cant_samples_r_size, cudaMemcpyHostToDevice);
} else {
cudaMemcpyToArray(cuLennard_i, 0, 0, h_LJPot, cant_types * cant_types * cant_samples_r_size, cudaMemcpyHostToDevice);
}
/** Bindeamos la textura **/
cudaBindTextureToArray(texRef, cuLennard_i, channelDesc);
// }
if(amberResults){
out << endl << " ESTARTIN DE PROGRAM" << endl;
out << " Amaunt of itereishons = " << cant_steps << endl << endl;
}
// for(int i=0 ; i<1000000 ; i++){
// for(int j=0 ; j<1000 ; j++){
//}
// }
/** Esperamos a que termine de bindear la textura **/
cudaDeviceSynchronize();
if(!results){ //timer mode ON
/** Arrancamos medicion del tiempo **/
gettimeofday(&tv1, NULL);
}
for(int step = 0; step < cant_steps; step++){
/* ********************************************************************************************************** */
/* ****************************************** INICIO Iteracion DM ******************************************* */
/* ********************************************************************************************************** */
if(amberResults){
out << "/* ************************************************************************************************ */" << endl;
out << "/* ************************************* INICIO Iteracion " << step << " ************************************ */" << endl;
out << "/* ************************************************************************************************ */" << endl;
}
dimBlock.x = BLOCK_SIZE_X;
dimBlock.y = BLOCK_SIZE_Y;
/* ************************************************ */
/* Calculamos Matriz de Distancias entre partículas */
/* ************************************************ */
/**Variables y memoria*/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
if(!periodicity){
distances_kernel<<<dimGrid, dimBlock>>>(d_distance_r, d_distance_x, d_distance_y, d_distance_z,
d_position_x, d_position_y, d_position_z, width, height);
} else {
/**Rellenamos datos**/
close_distances_kernel<<<dimGrid, dimBlock>>>(d_distance_x, d_distance_y, d_distance_z, d_distance_r,
d_position_x, d_position_y, d_position_z,
h_box_x, h_box_y, h_box_z, width, height);
}
//TRAIGO AL HOST LAS DISTANCIAS PORQUE LAS VOY A NECESITAR PARA HACER EL CALCULO DE dEr EN CPU
if (CPU)
cudaMemcpy(h_distance_r, d_distance_r, s_size, cudaMemcpyDeviceToHost);
//if(results){
/** DEBUG **/
/*cudaMemcpy(h_distance_r, d_distance_r, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_distance_x, d_distance_x, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_distance_y, d_distance_y, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_distance_z, d_distance_z, s_size, cudaMemcpyDeviceToHost);
if (step %10000 == 0){
out << " DISTANCES - R" << endl << " ";
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_distance_r[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
}*/
/*
out << " DISTANCES - X" << endl << " ";
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_distance_x[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
out << " DISTANCES - Y" << endl << " ";
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_distance_y[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
out << " DISTANCES - Z" << endl << " ";
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_distance_z[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
*/
/* double (*matriz)[cant_particles] = (double (*)[cant_particles]) h_distance_r;
for(int i = 0; i<cant_particles; i+= cant_particles){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j+= cant_particles){
out << matriz[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
*/
/** DEBUG **/
//}
if(CPU)
double (*h_dEr)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
/* ************************************************ */
/* Calculamos Derivadas */
/* ************************************************ */
/** Variables y memoria **/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
if(analytic){
derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
if(CPU) //VERSION ANALITICA SOBRE CPU
derivative_E_r_analytic_MEMORY(h_dEr, h_distance_r, cut, h_item_particle, cant_samples_r, h_EPS, h_SIG, width, height);
// if(amberResults){
// /** Calculo la energia E(r) para debug **/
E_r_analytic<<<dimGrid, dimBlock>>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
} else {
// /** Calculo de la derivada dE(r)/dr usando diferencias finitas **/
if(derivative){
//derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
if (CPU){
int x,y;
for (x=0;x<cant_particles;x++)
for(y=0;y<cant_particles;y++)
direct_derivative_E_r_MEMORY(h_dLJPot,h_dEr, h_distance_r,cut,h_item_particle, cant_samples_r,cant_types,width,height, x, y );
//mando los resultados a gpu
cudaMemcpy( d_dEr,h_dEr, s_size, cudaMemcpyHostToDevice);
}
else
direct_derivative_E_r<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
E_r_analytic<<<dimGrid, dimBlock>>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
} else {
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
derivative_E_r<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
// if(amberResults){
// /** Calculo la energia E(r) para debug **/
E_r<<<dimGrid, dimBlock>>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
// }
}
//
}
// if(amberResults){
//if(!derivative){
/** DEBUG **/
//out << " Lennard-Jones" << endl << " ";
double vdwaals = 0;
double (*h_Er)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
cudaMemcpy(h_Er, d_Er, s_size, cudaMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i++){
// out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
// out << h_Er[i][j] << "\t";
if(i<=j)
vdwaals += h_Er[i][j];
}
// out << endl << " ";
}
// out << endl;
if(step == 0)
etotinicial= vdwaals;
if(step % 10000 == 0){
etotalX=vdwaals;
// out << " STEP = " << step << endl;
// out << " VDWAALS = " << vdwaals << endl << endl;
}
free(h_Er);
/** DEBUG **/
// }
//}
if(results){
/** DEBUG **/
out << " DERIVATIVES" << endl << " ";
double (*h_dEr)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
cudaMemcpy(h_dEr, d_dEr, s_size, cudaMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i+= cant_particles/8){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j+= cant_particles/8){
out << h_dEr[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
free(h_dEr);
/** DEBUG **/
}
if(results){
/** DEBUG **/
cudaMemcpy(h_velocity_old_x, d_velocity_old_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_old_y, d_velocity_old_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_old_z, d_velocity_old_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " OLD VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_velocity_old_x[i] << " , " << h_velocity_old_y[i] << " , " << h_velocity_old_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos FUERZAS resultantes */
/* ************************************************ */
/* Fx = dE(r) / dr * (x1-x2) / r *
* Fy = dE(r) / dr * (y1-y2) / r *
* Fz = dE(r) / dr * (z1-z2) / r */
/* Calculo de vectores parciales */
/**Variables y memoria*/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
/** Calculo del vector F **/
Parcial_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_x, d_dEr, d_distance_x, d_distance_r, width, height);
Parcial_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_y, d_dEr, d_distance_y, d_distance_r, width, height);
Parcial_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_z, d_dEr, d_distance_z, d_distance_r, width, height);
//if(results){
/** DEBUG **/
/*double fuerzaTot=0;
cudaMemcpy(h_Force_x, d_Force_x, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Force_y, d_Force_y, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Force_z, d_Force_z, s_size, cudaMemcpyDeviceToHost);
out << " FORCES" << endl << " ";
for(int i = 0; i<cant_particles; i++){
for(int j = 0; j<cant_particles; j++){
if(i<=j)
fuerzaTot+=h_Force_x[i][j] + h_Force_y[i][j] + h_Force_z[i][j];
out << h_Force_x[i][j] << "\n" << h_Force_y[i][j] << "\n" << h_Force_z[i][j] << "\n";
// out << "(" << h_Force_x[i][j] << " , " << h_Force_y[i][j] << " , " << h_Force_z[i][j] << ")\t";
}
out << endl << " ";
}
out << endl;
*/
/** DEBUG **/
//}
// out << "LA SUMA TOTAL DE FUERZAS ES: " << fuerzaTot << endl;
/* Calculo del vector F */
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
Resultant_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_x_resultant, d_Force_x, cant_particles);
Resultant_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_y_resultant, d_Force_y, cant_particles);
Resultant_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_z_resultant, d_Force_z, cant_particles);
// if(results){
/** DEBUG **/
cudaMemcpy(h_Force_x_resultant, d_Force_x_resultant, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Force_y_resultant, d_Force_y_resultant, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Force_z_resultant, d_Force_z_resultant, cant_particles_size, cudaMemcpyDeviceToHost);
//out << " RESULTANT FORCES" << endl;
for(int i = 0; i<cant_particles; i++){
out << h_Force_x_resultant[i] <<"\n" <<h_Force_y_resultant[i] << "\n" << h_Force_z_resultant[i] << endl;
//out << i+1 << ": (" << h_Force_x_resultant[i] << " , " << h_Force_y_resultant[i] << " , " << h_Force_z_resultant[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
// }
/* ************************************************ */
/* Calculamos VELOCIDADES Resultantes */
/* ************************************************ */
/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
//out << "dtx= " << delta_tiempo*20.455 << endl;
/** Piso las velocidades acumuladas al tiempo t con las nuevas de t+Dt */
Resultant_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_x, d_velocity_old_x, d_Force_x_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
Resultant_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_y, d_velocity_old_y, d_Force_y_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
Resultant_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_z, d_velocity_old_z, d_Force_z_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
if(results){
/** DEBUG **/
cudaMemcpy(h_velocity_x, d_velocity_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_y, d_velocity_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_z, d_velocity_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " RESULTANT VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_velocity_x[i] << " , " << h_velocity_y[i] << " , " << h_velocity_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos POSICIONES Resultantes */
/* ************************************************ */
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
/* (TODO: ajustar condiciones de perioricidad */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
Resultant_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_x, d_velocity_x, delta_tiempo, cant_particles);
Resultant_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_y, d_velocity_y, delta_tiempo, cant_particles);
Resultant_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_z, d_velocity_z, delta_tiempo, cant_particles);
if(results){
/** DEBUG **/
cudaMemcpy(h_position_x, d_position_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_y, d_position_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_z, d_position_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " RESULTANT POSITIONS" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_particle_type[i] << " (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
if(periodicity){
/* ************************************************ */
/* Calculamos POSICIONES con PERIORICIDAD */
/* ************************************************ */
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
Adjustin_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_x, box_max_x, cant_particles);
Adjustin_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_y, box_max_y, cant_particles);
Adjustin_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_z, box_max_z, cant_particles);
}
if(coordinates){
/** DEBUG **/
cudaMemcpy(h_position_x, d_position_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_y, d_position_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_z, d_position_z, cant_particles_size, cudaMemcpyDeviceToHost);
if(results){
out << " RESULTANT POSITIONS in the CUBE" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_particle_type[i] << " (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")" << endl;
}
out << endl;
}
for(int i = 0; i<cant_particles; i+=2){
crd << " " << h_position_x[i] << " " << h_position_y[i] << " " << h_position_z[i];
if(i+1 < cant_particles){
crd << " " << h_position_x[i+1] << " " << h_position_y[i+1] << " " << h_position_z[i+1] << endl;
} else
crd << endl;
}
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Ek de cada partícula */
/* ************************************************ */
/* Ek = |vp|^2 * m / 2 con vp = (vold+v)/2 */
/* Ek_x = (v_x)^2 * m / 2 */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Calculamos la energía cinética para las tres coordenadas de cada partícula **/
/** Puede hacerse directamente así, sin calcular módulo por propiedades algebraicas **/
Kinetic_Energy_Kernel<<<dimGrid, dimBlock>>>(d_kinetic_energy_x, d_velocity_old_x, d_velocity_x, d_mass, d_item_particle, cant_particles);
Kinetic_Energy_Kernel<<<dimGrid, dimBlock>>>(d_kinetic_energy_y, d_velocity_old_y, d_velocity_y, d_mass, d_item_particle, cant_particles);
Kinetic_Energy_Kernel<<<dimGrid, dimBlock>>>(d_kinetic_energy_z, d_velocity_old_z, d_velocity_z, d_mass, d_item_particle, cant_particles);
if(results){
/** DEBUG **/
cudaMemcpy(h_kinetic_energy_x, d_kinetic_energy_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_kinetic_energy_y, d_kinetic_energy_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_kinetic_energy_z, d_kinetic_energy_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " KINETIC ENERGY" << endl;
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
out << i+1 << ": (" << h_kinetic_energy_x[i] << " , " << h_kinetic_energy_y[i] << " , " << h_kinetic_energy_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Ek Resultante */
/* ************************************************ */
/* Ek_TOT = sum (Ek_i) */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Calculamos la Energía cinética total de cada partícula **/
Total_Kinetic_Energy_Kernel<<<dimGrid, dimBlock>>>(d_kinetic_energy, d_kinetic_energy_x, d_kinetic_energy_y, d_kinetic_energy_z, cant_particles);
/* */
/** Calculamos la Energía cinética total del sistema **/
cudaMemcpy(h_kinetic_energy, d_kinetic_energy, cant_particles_size, cudaMemcpyDeviceToHost);
double Ek_TOT = 0;
for(int i = 0; i<cant_particles; i++){
Ek_TOT += h_kinetic_energy[i];
}
if(results){
/** DEBUG **/
out << " KINETIC ENERGY" << endl;
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
out << " " << h_kinetic_energy[i] << endl;
}
out << endl;
/** DEBUG **/
}
//if(amberResults){
if(step==0)
etotinicial=etotinicial + Ek_TOT;
if (step %10000 == 0){
etotalX=etotalX + Ek_TOT;
diferencia= etotalX - etotinicial;
//out << " Total Kinetic Energy(t) = " << Ek_TOT << endl << endl;
//out << " Diferencia energia total= " << diferencia << endl;
}
// }
/* ************************************************ */
/* Calculamos Temperatura Resultante */
/* ************************************************ */
/* T(t) = 2*Ek_TOT / (Kb*(3N-Nc)) */
double Temp_TOT = Ek_TOT * factor_conv_T_Ek;
//if(amberResults){
/** DEBUG **/
if(step % 10000 == 0)
out << " Temp(t) = " << Temp_TOT << endl << endl;
/** DEBUG **/
// }
/* *********************************************** */
/* Calculamos Factor de Correccion */
/* *********************************************** */
/* lambda = sqrt( 1 + 2 * dt / tautp * (T/T(t) -1) ) */
double lambda = sqrt( 1 + delta_tiempo / tautp * (temp0/Temp_TOT -1) );
if(amberResults){
/** DEBUG **/
out << " lambda(t) = " << lambda << endl << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Velocidades Corregidas */
/* ************************************************ */
/* vi = lambda * vi */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Piso las velocidades acumuladas al tiempo t+Dt con las nuevas de t+Dt corregidas */
Corrected_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_old_x, d_velocity_x, lambda, cant_particles);
Corrected_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_old_y, d_velocity_y, lambda, cant_particles);
Corrected_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_old_z, d_velocity_z, lambda, cant_particles);
if(results){
/** DEBUG **/
cudaMemcpy(h_velocity_x, d_velocity_old_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_y, d_velocity_old_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_z, d_velocity_old_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " CORRECTED RESULTANT VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i << ": (" << h_velocity_x[i] << " , " << h_velocity_y[i] << " , " << h_velocity_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
dimBlock.x = BLOCK_SIZE_X;
dimBlock.y = BLOCK_SIZE_Y;
/* ********************************************************************************************************** */
/* ******************************************* FIN Iteracion DM ********************************************* */
/* ********************************************************************************************************** */
}
if(!results){ //timer mode ON
gettimeofday(&tv2, NULL);
taim << cut << " " << (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec) << endl;
}
// if(!analytic){
/** Unbindeamos Textura y liberamos memoria **/
cudaUnbindTexture(texRef);
cudaFreeArray(cuLennard_i);
// }
if(results or amberResults){
out.close();
}
if(coordinates){
crd.close();
}
/* ************************************************ */
/* Liberamos memoria en Dispositivo */
/* ************************************************ */
cudaFree(&d_item_particle);
/** Positions **/
cudaFree(&d_position_x);
cudaFree(&d_position_y);
cudaFree(&d_position_z);
/** Distances **/
cudaFree(&d_distance_x);
cudaFree(&d_distance_y);
cudaFree(&d_distance_z);
cudaFree(&d_distance_r);
/** Particle's mass **/
cudaFree(d_mass);
/** Velocities **/
cudaFree(d_velocity_x);
cudaFree(d_velocity_y);
cudaFree(d_velocity_z);
/** Derivatives **/
cudaFree(&d_dEr);
cudaFree(&d_Er);
/** Forces **/
cudaFree(&d_Force_x);
cudaFree(&d_Force_y);
cudaFree(&d_Force_z);
cudaFree(d_Force_x_resultant);
cudaFree(d_Force_y_resultant);
cudaFree(d_Force_z_resultant);
/** Kinetic Energy **/
cudaFree(d_kinetic_energy);
cudaFree(d_kinetic_energy_x);
cudaFree(d_kinetic_energy_y);
cudaFree(d_kinetic_energy_z);
/* ************************************************ */
/* Liberamos memoria en Host */
/* ************************************************ */
free(h_sigma);
free(h_epsilon);
free(h_mass);
/** Matriz de Lennard Jones **/
if(derivative)
free(h_dLJPot);
else
free(h_LJPot);
free(h_item_particle);
/** Positions **/
free(h_position_x);
free(h_position_y);
free(h_position_z);
/** Distances **/
free(h_distance_x);
free(h_distance_y);
free(h_distance_z);
free(h_distance_r);
/** Velocities **/
free(h_velocity_x);
free(h_velocity_y);
free(h_velocity_z);
/** Chargue **/
free(h_chargue);
/** Forces **/
free(h_Force_x);
free(h_Force_y);
free(h_Force_z);
free(h_Force_x_resultant);
free(h_Force_y_resultant);
free(h_Force_z_resultant);
/** Kinetic Energy **/
free(h_kinetic_energy);
free(h_kinetic_energy_x);
free(h_kinetic_energy_y);
free(h_kinetic_energy_z);
return 0;
}
|
d0d0d0c88ead568dc0d7c8200ad35e786bc47de8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cstdio>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
for(int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < N;
idx += gridDim.x * blockDim.x){
f[idx] = double3{0.0, 0.0, 0.0};
for (int i = 0; i < N; ++i) {
if(i != idx){
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double r = sqrt(dx * dx + dy * dy + dz * dz);
double inv_r = 1.0 / r;
f[idx].x += dx * inv_r * inv_r * inv_r;
f[idx].y += dy * inv_r * inv_r * inv_r;
f[idx].z += dz * inv_r * inv_r * inv_r;
}
}
}
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
hipLaunchKernelGGL(( computeForcesKernel), dim3(numBlocks), dim3(numThreads), 0, 0, N, p, f);
}
| d0d0d0c88ead568dc0d7c8200ad35e786bc47de8.cu | #include <cuda_runtime.h>
#include <cstdio>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
for(int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < N;
idx += gridDim.x * blockDim.x){
f[idx] = double3{0.0, 0.0, 0.0};
for (int i = 0; i < N; ++i) {
if(i != idx){
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double r = sqrt(dx * dx + dy * dy + dz * dz);
double inv_r = 1.0 / r;
f[idx].x += dx * inv_r * inv_r * inv_r;
f[idx].y += dy * inv_r * inv_r * inv_r;
f[idx].z += dz * inv_r * inv_r * inv_r;
}
}
}
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
computeForcesKernel<<<numBlocks, numThreads>>>(N, p, f);
}
|
2b07077e50b97e634610ea8714d18c99677d5b56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Author: Jacob Perricone
* A
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <rocblas.h>
#include "fstream"
#include "headers.h"
#include <errno.h>
/* Define constants max threads per block in each dimension*/
#define MAX_THREADS_PER_BLOCK_X (1024)
#define MAX_THREADS_PER_BLOCK_Y (1024)
#define MAX_THREADS_PER_BLOCK_Z (64)
#define MAX_BLOCKS (65535)
#define MAX_BLOCKS (65535)
#define GLOBAL_MEM_SIZE (4232052736)
#define CONSTANT_MEM_SIZE (65536)
#define SHARED_MEM_SIZE (49152)
#define THREADS_PER_BLOCK_Y 1024/4
#define THREADS_PER_BLOCK_X NUM_DEFECTS
#define TILE_WIDTH 16
hipblasHandle_t handle;
void runEM_CPU(float *TFDF, int const num_complaints,
int const vocab_size, float *company_vec,
int const num_companies, float *issue_vec,
int const num_issues, float *product_vec,
int const num_products, int const num_defects,
float *defect_priors_cpu, float *defect_posteriors_cpu,
float *company_posteriors_cpu, float *issue_posteriors_cpu,
float *product_posteriors_cpu, float *word_posteriors_cpu,
const float tol, const int maxIter, const char * log_file_name,
const char *results_file_name){
double delta_likelihood = INFINITY;
double old_likelihood = -INFINITY;
double new_likelihood = 0;
float *x_i_posteriors_cpu, *max_elements, *z, *TFDF_SUM, *d_posterior_sum;
double tmp;
double max;
double denominator;
double numerator;
double epsilon = 1.e-6;
int iter = 0;
float time_e, time_m, total_estep_time, total_mstep_time;
clock_t begin, end;
x_i_posteriors_cpu = (float *) malloc(sizeof(float) * num_complaints * num_defects);
z = (float *) malloc(sizeof(float) * num_complaints * num_defects);
max_elements = (float *) malloc(sizeof(float) * num_complaints);
TFDF_SUM = (float *) malloc(sizeof(float)*num_complaints);
d_posterior_sum = (float *) malloc(sizeof(float)*num_defects);
tmp = 0;
for (int j =0; j < num_complaints; j++){
for (int i =0; i < vocab_size; i++){
tmp += TFDF[INDX(j, i, num_complaints)];
}
TFDF_SUM[j] = tmp;
}
FILE *s ;
s = fopen(log_file_name, "w");
fprintf(s, "-----------CPU: Beggining Expectation Maximization Routine on %d complaints and %d words---------\n",
num_complaints, vocab_size);
while (delta_likelihood > (float) .00001 || iter < 10) {
iter++;
memset(x_i_posteriors_cpu, 0, sizeof(float) * num_complaints * num_defects);
memset(z, 0, sizeof(float) * num_complaints * num_defects);
denominator = 0.0;
numerator = 0.0;
new_likelihood = 0.0;
begin = clock();
for (int i = 0; i < num_complaints; i++) {
for (int j = 0; j < num_defects; j++) {
x_i_posteriors_cpu[INDX(j, i, num_defects)] += logf(
issue_posteriors_cpu[INDX(j, (int)issue_vec[INDX(i, 0, num_complaints)], num_defects)]);
x_i_posteriors_cpu[INDX(j, i, num_defects)] += logf(
product_posteriors_cpu[INDX(j, (int) product_vec[INDX(i, 0, num_complaints)], num_defects)]);
x_i_posteriors_cpu[INDX(j, i, num_defects)] += logf(
company_posteriors_cpu[INDX(j, (int) company_vec[INDX(i, 0, num_complaints)], num_defects)]);
tmp = 0;
for (int k = 0; k < vocab_size; k++) {
tmp += TFDF[INDX(i, k, num_complaints)] * logf(word_posteriors_cpu[INDX(j, i, num_defects)]);
}
x_i_posteriors_cpu[INDX(j, i, num_defects)] += tmp;
z[INDX(j, i, num_defects)] =
logf(defect_priors_cpu[INDX(j, 0, num_defects)]) + x_i_posteriors_cpu[INDX(j, i, num_defects)];
}
max = -INFINITY;
for (int k = 0; k < num_defects; k++) {
if (z[INDX(k, i, num_defects)] > max)
max = z[INDX(k, i, num_defects)];
}
max_elements[i] = (float) max;
new_likelihood += max_elements[i];
tmp = 0;
for (int k = 0; k < num_defects; k++) {
numerator = 0;
denominator = 0;
if (z[INDX(k, i, num_defects)] - max_elements[i] < -11) {
defect_posteriors_cpu[INDX(k, i, num_defects)] = 0.0;
} else {
numerator = expf(z[INDX(k, i, num_defects)] - max_elements[i]);
tmp += numerator;
denominator += numerator;
for (int l = 0; l < num_defects && l != k; l++){
if (z[INDX(l, i, num_defects)] - max_elements[i] > -11) {
denominator += expf(z[INDX(l, i, num_defects)] - max_elements[i]);
}
}
defect_posteriors_cpu[INDX(k, i, num_defects)] = numerator / denominator;
}
}
new_likelihood += logf(tmp);
}
end = clock();
time_e =(float)(end - begin) / CLOCKS_PER_SEC;
total_estep_time += time_e;
fprintf(s, "---------Total time For E_STEP on CPU is %f sec ---------\n", time_e);
delta_likelihood = fabsf(old_likelihood - new_likelihood);
fprintf(s,"(OLD LIKELIHOOD = %f, UPDATED LIKELIHOOD = %f , Change in Likelihood =%f)\n",
old_likelihood,new_likelihood, delta_likelihood);
printf("Change in Likelihood is %f:\n", (new_likelihood-old_likelihood));
old_likelihood= new_likelihood;
/* M STEP*/
memset(d_posterior_sum, 0, sizeof(float)*num_defects);
fprintf(s, "--------------DOING M-STEP WITH CPU---------------------- \n");
begin = clock();
for (int j=0; j < num_defects; j++){
for(int i=0; i < vocab_size; i++){
numerator = 0;
denominator = 0;
for (int k= 0; k < num_complaints; k++){
numerator += defect_posteriors_cpu[INDX(j, k, num_defects)]*TFDF[INDX(k, i, num_complaints)];
denominator += defect_posteriors_cpu[INDX(j, k, num_defects)]*TFDF_SUM[k];
}
word_posteriors_cpu[INDX(j,i, num_defects)] = (1 + numerator )/(vocab_size + denominator);
}
for(int i=0; i < num_companies; i++){
numerator = 0;
denominator = 0;
for (int k= 0; k < num_complaints; k++){
if ((int)company_vec[k] == i) {
numerator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
denominator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
company_posteriors_cpu[INDX(j,i, num_defects)] = (1 + numerator)/ (num_companies + denominator);
}
for (int k = 0; k < num_complaints; k++){
d_posterior_sum[j] += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
for(int i=0; i < num_products; i++){
numerator = 0;
denominator = 0;
for (int k= 0; k < num_complaints; k++){
if ((int)company_vec[k] == i) {
numerator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
denominator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
product_posteriors_cpu[INDX(j,i, num_defects)] = (1 + numerator)/ (num_products + denominator);
}
for(int i=0; i < num_issues; i++){
numerator = 0;
denominator = 0;
for (int k= 0; k < num_complaints; k++){
if ((int)issue_vec[k] == i) {
numerator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
denominator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
product_posteriors_cpu[INDX(j,i, num_defects)] = (1 + numerator)/ (num_issues + denominator);
}
if (d_posterior_sum[j] < epsilon){
printf("YOO %f \n", d_posterior_sum[j]);
defect_priors_cpu[j] = epsilon;
}else{
defect_priors_cpu[j] = d_posterior_sum[j]/num_complaints;
}
}
end = clock();
time_m =(float)(end - begin) / CLOCKS_PER_SEC;
total_mstep_time += time_m;
fprintf(s, "---------Total time For M_STEP on CPU is %f sec ---------\n", time_e);
}
fprintf(s, "Total time till convergece is %f sec | %d iterations \n", total_mstep_time + total_estep_time, iter);
fprintf(s, "Average Time of eStep is %f sec: \n", total_estep_time/iter);
fprintf(s, "Average Time of MStep is %f sec: \n", total_mstep_time/iter);
fprintf(s, "Finally Likelihood %f\n", old_likelihood);
fprintf(s, "Change in likelihood %f\n", delta_likelihood);
fclose(s);
FILE *f = fopen(results_file_name, "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for (int i=0; i < num_companies; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"COMPANY, %d, DEFECT, %d, POSTERIOR, %f \n", i, j,company_posteriors_cpu[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < num_issues; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"ISSUE, %d, DEFECT,%d, POSTERIOR, %f \n", i, j,issue_posteriors_cpu[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < vocab_size; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"WORD, %d, DEFECT %d, POSTERIOR, %f \n", i, j,word_posteriors_cpu[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < num_complaints; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"Complaint, %d, DEFECT, %d, POSTERIOR: %f \n", i, j,defect_posteriors_cpu[INDX(j, i, num_defects)] );
}
}
for (int j=0; j < num_defects; j++){
fprintf(f,"DEFECT, %d, , Prior : %f \n", j,defect_priors_cpu[j] );
}
fclose(f);
free(x_i_posteriors_cpu);
free(z);
free(max_elements);
free(TFDF_SUM);
free(d_posterior_sum);
}
__global__ void eStep2(int const num_complaints, int const vocab_size,
int const num_defects, float *d_defect_priors,
float *d_defect_posteriors, float *d_company_posteriors,
float *d_issue_posteriors, float *d_product_posteriors,
float *d_word_posteriors, float *d_TFDF,
float *d_company_vec, float *d_issue_vec,
float *d_product_vec, float *d_likelihood
) {
int d_row = threadIdx.x;
int d_offset = blockIdx.x * blockDim.y;
int d_index = threadIdx.y;
int d_sample = d_offset + d_index;
__shared__ double x_posterior[NUM_DEFECTS][THREADS_PER_BLOCK_Y];
__shared__ double z[NUM_DEFECTS][THREADS_PER_BLOCK_Y];
__shared__ double max_elem[THREADS_PER_BLOCK_Y];
__shared__ float block_likelihood[THREADS_PER_BLOCK_Y];
__shared__ float c_post[NUM_DEFECTS][NUM_COMPANIES_SMALL];
__shared__ float i_post[NUM_DEFECTS][NUM_ISSUES_SMALL];
__shared__ float p_post[NUM_DEFECTS][NUM_PRODUCTS_SMALL];
__shared__ int p_vec[THREADS_PER_BLOCK_Y];
__shared__ int c_vec[THREADS_PER_BLOCK_Y];
__shared__ int i_vec[THREADS_PER_BLOCK_Y];
if (d_sample < num_complaints) {
/* COPY THIS SHIT TO SHARED_MEM */
/* Keep track of p(x_i | d_j) */
c_vec[d_index] = (int) d_company_vec[INDX(d_sample, 1, 1)];
p_vec[d_index] = (int) d_product_vec[INDX(d_sample, 1, 1)];
i_vec[d_index] = (int) d_issue_vec[INDX(d_sample, 1, 1)];
__syncthreads();
if (d_index < NUM_PRODUCTS_SMALL) {
c_post[d_row][d_index] = d_company_posteriors[INDX(d_row, d_index, num_defects)];
i_post[d_row][d_index] = d_issue_posteriors[INDX(d_row, d_index, num_defects)];
p_post[d_row][d_index] = d_product_posteriors[INDX(d_row, d_index, num_defects)];
}else if (d_index < NUM_COMPANIES_SMALL ) {
i_post[d_row][d_index] = d_issue_posteriors[INDX(d_row, d_index, num_defects)];
c_post[d_row][d_index] = d_company_posteriors[INDX(d_row, d_index, num_defects)];
}else if (d_index < NUM_ISSUES_SMALL){
i_post[d_row][d_index] = d_issue_posteriors[INDX(d_row, d_index, num_defects)];
}
__syncthreads();
x_posterior[d_row][d_index] = logf(c_post[d_row][c_vec[d_index]])
+ logf(i_post[d_row][i_vec[d_index]]) + logf(p_post[d_row][p_vec[d_index]]);
float sum = 0;
for (int i = 0; i < vocab_size; i++) {
sum += d_TFDF[INDX(d_sample, i, num_complaints)] * logf(d_word_posteriors[INDX(d_row, i, num_defects)]);
}
x_posterior[d_row][d_index] += sum;
/* Apply smoothing operations */
z[d_row][d_index] = logf(d_defect_priors[d_row]) + x_posterior[d_row][d_index];
__syncthreads();
double max = -INFINITY;
for (int i = 0; i < num_defects; i++) {
if (z[i][d_index] > max) {
max = z[i][d_index];
}
}
block_likelihood[d_index] = 0;
__syncthreads();
max_elem[d_index] = max;
//printf( "DEFECT %d %d %f %f %f %f \n", d_row, d_sample, z[0][d_index], z[1][d_index], z[2][d_index],max_elem[d_index] );
double denom = 0.0;
if (z[d_row][d_index] - max_elem[d_index] > -11) {
block_likelihood[d_index] += expf(z[d_row][d_index] - max_elem[d_index]);
for (int i = 0; i < num_defects; i++) {
if (z[i][d_index] - max_elem[d_index] > -11) {
denom += expf(z[i][d_index] - max_elem[d_index]);
}
}
d_defect_posteriors[INDX(d_row, d_sample, num_defects)] = expf(z[d_row][d_index] - max_elem[d_index]) / denom;
} else {
d_defect_posteriors[INDX(d_row, d_sample, num_defects)] = 0.0;
}
__syncthreads();
if (threadIdx.y==0 && threadIdx.x == 0) {
for (int i = 0; i < THREADS_PER_BLOCK_Y && (d_offset + i) < num_complaints; i++) {
d_likelihood[blockIdx.x] += max_elem[i] + logf(block_likelihood[i]);
}
}
}
}
/* Do it without Shared Mem For now */
__global__ void eStep(int const num_complaints,
int const vocab_size,
int const num_defects,
float *d_defect_priors,
float *d_defect_posteriors,
float *d_company_posteriors,
float *d_issue_posteriors,
float *d_product_posteriors,
float *d_word_posteriors,
float *d_TFDF,
float *d_company_vec,
float *d_issue_vec,
float *d_product_vec,
float *d_likelihood
) {
int d_row = threadIdx.x;
int d_offset = blockIdx.x * blockDim.y;
int d_index = threadIdx.y;
int d_sample = d_offset + d_index;
__shared__ double x_posterior[NUM_DEFECTS][THREADS_PER_BLOCK_Y];
__shared__ double z[NUM_DEFECTS][THREADS_PER_BLOCK_Y];
__shared__ double max_elem[THREADS_PER_BLOCK_Y];
__shared__ float block_likelihood[THREADS_PER_BLOCK_Y];
if (d_sample < NUM_COMPLAINTS_SMALL) {
/* COPY THIS TO SHARED_MEM */
/* Keep track of p(x_i | d_j) */
x_posterior[d_row][d_index] =
logf(d_company_posteriors[INDX(d_row, (int) d_company_vec[INDX(d_sample, 1, 1)], num_defects)]);
x_posterior[d_row][d_index] += logf(
d_issue_posteriors[INDX(d_row, (int) d_issue_vec[INDX(d_sample, 1, 1)], num_defects)]);
x_posterior[d_row][d_index] += logf(
d_product_posteriors[INDX(d_row, (int) d_product_vec[INDX(d_sample, 1, 1)], num_defects)]);
double sum = 0;
for (int i = 0; i < vocab_size; i++) {
sum += d_TFDF[INDX(d_sample, i, num_complaints)] * logf(d_word_posteriors[INDX(d_row, i, num_defects)]);
}
x_posterior[d_row][d_index] += sum;
/* Apply smoothing operations */
z[d_row][d_index] = logf(d_defect_priors[d_row]) + x_posterior[d_row][d_index];
__syncthreads();
double max = -INFINITY;
for (int i = 0; i < num_defects; i++) {
if (z[i][d_index] > max) {
max = z[i][d_index];
}
}
block_likelihood[d_index] = 0;
__syncthreads();
max_elem[d_index] = max;
double denom = 0.0;
if (z[d_row][d_index] - max_elem[d_index] > -11) {
block_likelihood[d_index] += expf(z[d_row][d_index] - max_elem[d_index]);
for (int i = 0; i < num_defects; i++) {
if (z[i][d_index] - max_elem[d_index] > -11) {
denom += expf(z[i][d_index] - max_elem[d_index]);
}
}
d_defect_posteriors[INDX(d_row, d_sample, num_defects)] = expf(z[d_row][d_index] - max_elem[d_index]) / denom;
} else {
d_defect_posteriors[INDX(d_row, d_sample, num_defects)] = 0.0;
}
__syncthreads();
if (threadIdx.y==0 && threadIdx.x == 0) {
for (int i = 0; i < THREADS_PER_BLOCK_Y && (d_offset + i) < num_complaints; i++) {
d_likelihood[blockIdx.x] += max_elem[i] + logf(block_likelihood[i]);
}
}
}
}
/* Find the sum of the colums of a matrix, I am going to launch a block size equal to the number
* of rows of matrix in, but this is not always feasible. Esnure that num_rows == blockDim.y. */
__global__ void reduce_columns(const int num_rows, const int num_columns, const float * in, float * out){
extern __shared__ float sArray[];
int globalIndex = blockIdx.y * blockDim.y + threadIdx.y;
/* zero out the smem array */
sArray[threadIdx.y] = 0.0;
/* Stride over the array
*/
float tmp = 0;
for( int i = globalIndex; i < num_columns; i += blockDim.y)
{
tmp += in[INDX(blockIdx.x,i,num_rows)];
} /* end for */
sArray[threadIdx.y] = tmp;
__syncthreads();
/* do the final reduction in SMEM */
for( int i = blockDim.y/2; i > 0; i = i / 2 )
{
if( threadIdx.y < i )
{
sArray[threadIdx.y] += sArray[threadIdx.y + i];
// sArray[threadIdx.x] += sArray[threadIdx.x + i];
} /* end if */
__syncthreads();
} /* end for */
/* thread0 of each threadblock writes the result to global memory */
if( threadIdx.y == 0 ){
// printf("BLOCK X has value, %f", (float) sArray[0]);
out[blockIdx.x] = sArray[0];
}
return;
}
/* Tile Matrix Multiplication on GPU
*
* Params:
* a_rows = number of d_A
* a_columns = number of columns of d_A
* b_rows = number of rows in d_B
* b_columns = number of columns in d_B
* d_C = matrix to save into
* c_rows = rows of matrix
* */
__global__ void mat_mul(const int a_rows, const int a_columns,
const int b_rows, const int b_columns,
float * d_A, float * d_B, float * d_C,
const int c_rows, const int c_columns) {
/* setup some constants for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * TILE_WIDTH;
const int ibx = blockIdx.x * TILE_WIDTH;
const int row = iby + ty;
const int col = ibx + tx;
/* shared memory arrays for A and B */
__shared__ double as[TILE_WIDTH][TILE_WIDTH];
__shared__ double bs[TILE_WIDTH][TILE_WIDTH];
/* space for C to be held in registers */
float value = 0;
int tmp_col;
int tmp_row;
for (int i = 0; i < ceil(a_columns/(float)TILE_WIDTH); i++) {
tmp_col = i * TILE_WIDTH + tx;
if (tmp_col < a_columns && row < a_rows) {
as[ty][tx] = d_A[tmp_col * a_rows + row];
} else {
as[ty][tx] = 0.0;
}
tmp_row = i * TILE_WIDTH + ty;
if (tmp_row < b_rows && col < b_columns) {
bs[ty][tx] = d_B[col * b_rows + tmp_row];
} else {
bs[ty][tx] = 0.0;
}
__syncthreads();
for (int j = 0; j < TILE_WIDTH; j++) {
value += as[threadIdx.y][j] * bs[j][threadIdx.x];
}
__syncthreads();
}
if (row < c_rows && col < c_columns) {
int row_map = blockIdx.y * blockDim.y + threadIdx.y;
int col_map = blockIdx.x * blockDim.x + threadIdx.x;
d_C[INDX(row_map, col_map, c_rows)] = value;
}
}
/* Update the issues posteriors by doing matrix multiplication and division in one step
* same params as mat_mul with the aditional float * division, which should be the number of rows as d_C
* the constants applies the operation
* (numerator_constant + x_{i,j) / (denominator_constant + divisor_i)
*/
__global__ void update_entities(const int a_rows, const int a_columns,
const int b_rows, const int b_columns,
float * d_A, float * d_B,
float * d_C, const int c_rows,
const int c_columns, float * divisor,
float numerator_constant, float denominator_constant)
{
/* setup some constants for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * TILE_WIDTH;
const int ibx = blockIdx.x * TILE_WIDTH;
const int row = iby + ty;
const int col = ibx + tx;
/* shared memory arrays for A and B */
__shared__ double as[TILE_WIDTH][TILE_WIDTH];
__shared__ double bs[TILE_WIDTH][TILE_WIDTH];
/* space for C to be held in registers */
float value = 0;
int tmp_col;
int tmp_row;
for (int i = 0; i < ceil(a_columns/(float)TILE_WIDTH); i++) {
tmp_col = i * TILE_WIDTH + tx;
if (tmp_col < a_columns && row < a_rows) {
as[ty][tx] = d_A[tmp_col * a_rows + row];
} else {
as[ty][tx] = 0.0;
}
tmp_row = i * TILE_WIDTH + ty;
if (tmp_row < b_rows && col < b_columns) {
bs[ty][tx] = d_B[col * b_rows + tmp_row];
} else {
bs[ty][tx] = 0.0;
}
__syncthreads();
for (int j = 0; j < TILE_WIDTH; j++) {
value += as[threadIdx.y][j] * bs[j][threadIdx.x];
}
__syncthreads();
}
if (row < c_rows && col < c_columns) {
int row_map = blockIdx.y * blockDim.y + threadIdx.y;
int col_map = blockIdx.x * blockDim.x + threadIdx.x;
d_C[INDX(row_map, col_map, c_rows)] = (numerator_constant + value) / (divisor[row_map] + denominator_constant);
}
/* c
*/
}
/* Sums across columns of a matrix using cublas matrix operations */
void cublas_column_reduce( float * d_A, const int num_rows, const int num_columns, float * d_y){
const float alpha = 1.0;
float beta = 0.0;
/* Make a vector of ones for multiplication */
float *h_x, *d_x;
h_x = (float *) malloc(sizeof(float) * num_columns);
for (int i = 0; i < num_columns; i++)
h_x[INDX(i,0,num_columns)] = (float) 1.0;
/* Copy to device memory */
checkCUDA(hipMalloc((void **) &d_x, sizeof(float) * num_columns));
checkCUDA(hipMemcpy(d_x, h_x, sizeof(float) * num_columns, hipMemcpyHostToDevice));
/* Start Timers */
checkCUBLAS(hipblasCreate(&handle));
checkCUBLAS(hipblasSgemv(handle, HIPBLAS_OP_N,
num_rows, num_columns,
&alpha,
d_A, num_rows,
d_x, 1.0,
&beta,
d_y, 1.0));
/* End Timers */
/*Free the ones vector*/
checkCUDA(hipFree(d_x));
free(h_x);
/* Host copy isn't needed */
}
/* Fills randomly generated data between 0-1 into the dynamic float array data, of size: (rows, cols)
*
* Params:
* float * data: array to be filled
* int rows: number of rows in array
* int cols: number of columns to be filled
* int ld: number of rows per column
*/
void RandomInit(float *data, int rows, int cols, int ld) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
data[INDX(i, j, ld)] = (static_cast <float> (rand()) / static_cast <float> (RAND_MAX)) + 1.e-2;
}
}
}
/* See header file for description*/
void runEM(float *TFDF, int const num_complaints,
int const vocab_size, float *company_vec,
int const num_companies, float *issue_vec,
int const num_issues, float *product_vec,
int const num_products, int const num_defects,
float *defect_priors, float *defect_posteriors,
float *company_posteriors, float *issue_posteriors,
float *product_posteriors, float *word_posteriors,
const float tol, const int maxIter, const char * log_file_name,
const char *results_file_name
) {
/* checkCUBLAS( hipblasCreate( &cublasHandle ) );*/
printf("------------------------------------ \n");
printf("COPYING HOST DATA TO DEVICE MEMORY\n");
int thread_size;
float elapsedTime;
float old_likelihood, delta_likelihood;
float *d_defect_priors, *d_defect_posteriors,
*d_company_posteriors, *d_product_posteriors,
*d_issue_posteriors,*d_word_posteriors;
/* ALLOCATE AND COPY FOR PRIORS AND POSTERIORS */
checkCUDA(hipMalloc((void **) &d_defect_priors, sizeof(float) * num_defects));
checkCUDA(hipMemcpy(d_defect_priors, defect_priors, sizeof(float) * num_defects,
hipMemcpyHostToDevice));
checkCUDA(hipMalloc((void **) &d_defect_posteriors, sizeof(float) * num_defects * num_complaints));
checkCUDA(hipMemcpy(d_defect_posteriors, defect_posteriors, sizeof(float) * num_defects * num_complaints,
hipMemcpyHostToDevice));
checkCUDA(hipMalloc((void **) &d_company_posteriors, sizeof(float) * num_defects * num_companies));
checkCUDA(hipMemcpy(d_company_posteriors, company_posteriors, sizeof(float) * num_defects * num_companies,
hipMemcpyHostToDevice));
checkCUDA(hipMalloc((void **) &d_product_posteriors, sizeof(float) * num_defects * num_products));
checkCUDA(hipMemcpy(d_product_posteriors, product_posteriors, sizeof(float) * num_defects * num_products,
hipMemcpyHostToDevice));
checkCUDA(hipMalloc((void **) &d_issue_posteriors, sizeof(float) * num_defects * num_issues));
checkCUDA(hipMemcpy(d_issue_posteriors, issue_posteriors, sizeof(float) * num_defects * num_issues,
hipMemcpyHostToDevice));
checkCUDA(hipMalloc((void **) &d_word_posteriors, sizeof(float) * num_defects * vocab_size));
checkCUDA(hipMemcpy(d_word_posteriors, word_posteriors, sizeof(float) * num_defects * vocab_size,
hipMemcpyHostToDevice));
/* ALLOCATE AND COPY FOR EMPIRICAL DATA (i.e the entity data of all complaints ) */
float *d_TFDF, *d_company_vec,*d_product_vec,*d_issue_vec;
checkCUDA(hipMalloc((void **) &d_TFDF, sizeof(float) * num_complaints * vocab_size));
checkCUDA(hipMemcpy(d_TFDF, TFDF, sizeof(float) * num_complaints * vocab_size, hipMemcpyHostToDevice));
checkCUDA(hipMalloc((void **) &d_company_vec, sizeof(float) * num_complaints));
checkCUDA(hipMemcpy((void **) d_company_vec, company_vec, sizeof(float) * num_complaints, hipMemcpyHostToDevice));
checkCUDA(hipMalloc((void **) &d_product_vec, sizeof(float) * num_complaints));
checkCUDA(hipMemcpy(d_product_vec, product_vec, sizeof(float) * num_complaints, hipMemcpyHostToDevice));
checkCUDA(hipMalloc((void **) &d_issue_vec, sizeof(float) * num_complaints));
checkCUDA(hipMemcpy((void **) d_issue_vec, issue_vec, sizeof(float) * num_complaints, hipMemcpyHostToDevice));
/* CREATE THE EXPANDED MATRICES */
/* Calculate Host Values that will not change */
float* h_expanded_company, *h_expanded_product, *h_expanded_issue, *h_ones_matrix, *h_ones_vector;
checkCUDA(hipHostMalloc((void**)&h_expanded_company, sizeof(float) * num_companies*num_complaints));
memset(h_expanded_company, 0, sizeof(float)*num_companies*num_complaints);
checkCUDA(hipHostMalloc((void**)&h_expanded_product, sizeof(float) * num_products*num_complaints));
memset(h_expanded_product, 0, sizeof(float)*num_products*num_complaints);
checkCUDA(hipHostMalloc((void**)&h_expanded_issue, sizeof(float)*num_issues*num_complaints));
memset(h_expanded_issue, 0, sizeof(float)*num_issues*num_complaints);
checkCUDA(hipHostMalloc((void**)&h_ones_matrix, sizeof(float) * num_defects * vocab_size));
memset(h_ones_matrix, 0, sizeof(float)*num_defects*vocab_size);
checkCUDA(hipHostMalloc((void**)&h_ones_vector, sizeof(float) * num_defects ));
memset(h_ones_vector, 0, sizeof(float)*num_defects);
for (int i = 0; i < num_defects; i++) {
for (int j = 0; j < vocab_size; j++) {
h_ones_matrix[INDX(i, j, num_defects)] = (float) 1.0;
}
h_ones_vector[INDX(i, 0, num_defects)] = (float) 1.0;
}
for (int i = 0; i < num_complaints; i++){
h_expanded_company[INDX(i, (int) company_vec[INDX(i,1,1)], num_complaints)] = 1;
h_expanded_issue[INDX(i, (int) issue_vec[INDX(i,1,1)], num_complaints)] = 1;
h_expanded_product[INDX(i, (int) product_vec[INDX(i,1,1)],num_complaints)] = 1;
}
/* Allocate and copy to device */
float * d_expanded_company, * d_expanded_product, * d_expanded_issue;
checkCUDA(hipMalloc(&d_expanded_company, sizeof(float)*num_complaints*num_companies));
checkCUDA(hipMemcpy(d_expanded_company, h_expanded_company,
sizeof(float)*num_complaints*num_companies, hipMemcpyHostToDevice));
checkCUDA(hipMalloc(&d_expanded_product, sizeof(float)*num_complaints*num_products));
checkCUDA(hipMemcpy(d_expanded_product, h_expanded_product,
sizeof(float)*num_complaints*num_products, hipMemcpyHostToDevice));
checkCUDA(hipMalloc(&d_expanded_issue, sizeof(float)*num_complaints*num_issues));
checkCUDA(hipMemcpy(d_expanded_issue, h_expanded_issue,
sizeof(float)*num_complaints*num_issues, hipMemcpyHostToDevice));
/* FInd the sum across all words for each complaint */
float *d_TFDF_SUM;
checkCUDA(hipMalloc((void **) &d_TFDF_SUM, sizeof(float) * num_complaints));
/* create thread elements */
dim3 blocks(num_complaints, 1, 1);
thread_size = 256;
dim3 threads(1, thread_size,1);
hipEvent_t start, stop;
hipEvent_t start_total, stop_total;
hipError_t err;
FILE *s ;
s = fopen(log_file_name, "w");
checkCUDA(hipEventCreate(&start));
checkCUDA(hipEventCreate(&stop));
checkCUDA(hipEventRecord(start, 0));
hipLaunchKernelGGL(( reduce_columns), dim3(blocks), dim3(threads), sizeof(float)*threads.y, 0, num_complaints, vocab_size, d_TFDF, d_TFDF_SUM);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
checkCUDA(hipEventRecord(stop, 0));
checkCUDA(hipEventSynchronize(stop));
checkCUDA(hipEventElapsedTime(&elapsedTime, start, stop));
/* print GPU CUBLAS timing information */
fprintf(s, "Total time GPU KERNAL for TFDF SUM is %f sec\n", elapsedTime / 1000.0f);
checkCUDA(hipEventDestroy(start));
checkCUDA(hipEventDestroy(stop));
/* Sanity check to make sure the sum reduce worked */
// /* Find the sum_{words} for each complaint of TFDF using CUBLAS */
// float * d_cublas_sum;
// checkCUDA(hipMalloc(&d_cublas_sum, sizeof(float)*num_complaints));
// cublas_column_reduce(d_TFDF, num_complaints, vocab_size, d_cublas_sum);
old_likelihood = -INFINITY;
delta_likelihood = 10000000;
float *new_likelihood;
float *d_likelihood;
fprintf(stdout, "-----------Beggining Expectation Maximization Routine on %d complaints and %d words---------\n",
num_complaints, vocab_size);
checkCUDA(hipEventCreate(&start_total));
checkCUDA(hipEventCreate(&stop_total));
checkCUDA(hipEventRecord(start_total, 0));
int iter = 0;
double total_estep_time = 0.0;
double total_mstep_time = 0.0;
while (delta_likelihood > (float) .00001 || iter < 10) {
iter = iter + 1;
threads.x = num_defects;
threads.y = THREADS_PER_BLOCK_Y;
blocks.x = ceil(num_complaints / threads.y) + 1;
blocks.y = 1;
new_likelihood = (float *)malloc(sizeof(float)*blocks.x);
checkCUDA(hipMalloc((void **) &d_likelihood, sizeof(float) * blocks.x));
checkCUDA(hipMemset(d_likelihood, 0, sizeof(float) * blocks.x));
checkCUDA(hipEventCreate(&start));
checkCUDA(hipEventCreate(&stop));
checkCUDA(hipEventRecord(start, 0));
hipLaunchKernelGGL(( eStep) , dim3(blocks), dim3(threads) , 0, 0, num_complaints, vocab_size,
num_defects, d_defect_priors,
d_defect_posteriors, d_company_posteriors,
d_issue_posteriors, d_product_posteriors,
d_word_posteriors, d_TFDF,
d_company_vec, d_issue_vec,
d_product_vec, d_likelihood);
err = hipGetLastError();
if (err != hipSuccess) {
printf("YOO Error: %s\n", hipGetErrorString(err));
exit(0);
}
checkCUDA(hipEventRecord(stop));
hipEventSynchronize(stop);
checkCUDA(hipEventElapsedTime(&elapsedTime, start, stop));
total_estep_time += elapsedTime/ 1000.0f;
fprintf(s, "---------Total time For E_STEP on GPU is %f sec ---------\n", elapsedTime / 1000.0f);
checkCUDA(hipEventDestroy(start));
checkCUDA(hipEventDestroy(stop));
checkCUDA(hipMemcpy(new_likelihood, d_likelihood, sizeof(float)*blocks.x,hipMemcpyDeviceToHost));
float total_likelihood = 0.0;
for (int i = 0; i < blocks.x; i++) {
total_likelihood += new_likelihood[i];
}
delta_likelihood = float(fabsf(old_likelihood -total_likelihood ));
fprintf(s,"(OLD LIKELIHOOD = %f, UPDATED LIKELIHOOD = %f , Change in Likelihood =%f)\n",
old_likelihood,total_likelihood, (total_likelihood-old_likelihood));
// printf("Change in Likelihood is %f:\n", delta_likelihood);
old_likelihood = total_likelihood;
//
// fprintf(s, "--------------DOING M-STEP WITH CUBLAS---------------------- \n");
//
//
checkCUDA(hipEventCreate(&start));
checkCUDA(hipEventCreate(&stop));
checkCUDA(hipEventRecord(start, 0));
M_STEP_CUBLAS(num_complaints, vocab_size,
num_defects, d_defect_priors,
d_defect_posteriors, d_company_posteriors,
d_issue_posteriors, d_product_posteriors,
d_word_posteriors, d_TFDF,
d_expanded_company, d_expanded_issue,
d_expanded_product, d_TFDF_SUM,
num_companies, num_products,
num_issues
);
checkCUDA(hipEventRecord(stop, 0));
checkCUDA(hipEventSynchronize(stop));
float elapsedTime;
checkCUDA(hipEventElapsedTime(&elapsedTime, start, stop));
total_mstep_time += elapsedTime/ 1000.0f;
fprintf(s, "Total time GPU M Step %f sec\n", elapsedTime / 1000.0f);
checkCUDA(hipEventDestroy(start));
checkCUDA(hipEventDestroy(stop));
//
// printf("--------------DOING M-STEP KERNEL---------------------- \n");
//
//
// checkCUDA(hipEventCreate(&start));
// checkCUDA(hipEventCreate(&stop));
// checkCUDA(hipEventRecord(start, 0));
// execute_MStep(num_complaints, vocab_size,
// num_defects, d_defect_priors,
// d_defect_posteriors, d_company_posteriors,
// d_issue_posteriors, d_product_posteriors,
// d_word_posteriors, d_TFDF,
// d_expanded_company, d_expanded_issue,
// d_expanded_product,
// d_TFDF_SUM, num_companies,
// num_products, num_issues,0
// );
//
// checkCUDA(hipEventRecord(stop));
// hipEventSynchronize(stop);
// checkCUDA(hipEventElapsedTime(&elapsedTime, start, stop));
// total_mstep_time += elapsedTime/ 1000.0f;
//
// checkCUDA(hipEventDestroy(start));
// checkCUDA(hipEventDestroy(stop));
// fprintf(s, "--------------Total time For M-STEP on GPU is %f sec\n--------------", elapsedTime / 1000.0f);
checkCUDA(hipFree(d_likelihood));
}
checkCUDA(hipEventRecord(stop_total, 0));
checkCUDA(hipEventSynchronize(stop_total));
checkCUDA(hipEventElapsedTime(&elapsedTime, start_total, stop_total));
fprintf(s, "Total time till convergece is %f sec | %d iterations \n", total_estep_time + total_mstep_time, iter);
fprintf(s, "Average Time of eStep is %f sec: \n", total_estep_time/iter);
fprintf(s, "Average Time of MStep is %f sec: \n", total_mstep_time/iter);
fprintf(s, "Finally Likelihood %f\n", old_likelihood);
fprintf(s, "Change in likelihood %f\n", delta_likelihood);
checkCUDA(hipMemcpy(defect_posteriors, d_defect_posteriors, sizeof(float)*num_defects*num_complaints, hipMemcpyDeviceToHost));
checkCUDA(hipMemcpy(defect_priors, d_defect_priors, sizeof(float)*num_defects, hipMemcpyDeviceToHost));
checkCUDA(hipMemcpy(word_posteriors, d_word_posteriors, sizeof(float)*num_defects*vocab_size, hipMemcpyDeviceToHost));
checkCUDA(hipMemcpy(product_posteriors, d_product_posteriors, sizeof(float)*num_defects*num_products, hipMemcpyDeviceToHost));
checkCUDA(hipMemcpy(issue_posteriors, d_issue_posteriors, sizeof(float)*num_defects*num_issues, hipMemcpyDeviceToHost));
checkCUDA(hipMemcpy(company_posteriors, d_company_posteriors, sizeof(float)*num_defects*num_products, hipMemcpyDeviceToHost));
fclose(s);
FILE *f = fopen(results_file_name, "w");
fprintf(stdout, "---------DONE---------\n");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for (int i=0; i < num_companies; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"COMPANY, %d, DEFECT, %d, POSTERIOR, %f \n", i, j,company_posteriors[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < num_issues; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"ISSUE, %d, DEFECT,%d, POSTERIOR, %f \n", i, j,issue_posteriors[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < vocab_size; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"WORD, %d, DEFECT %d, POSTERIOR, %f \n", i, j,word_posteriors[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < num_complaints; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"Complaint, %d, DEFECT, %d, POSTERIOR: %f \n", i, j,defect_posteriors[INDX(j, i, num_defects)] );
}
}
for (int j=0; j < num_defects; j++){
fprintf(f,"DEFECT, %d, , Prior : %f \n", j,defect_priors[j] );
}
fclose(f);
checkCUDA(hipFree(d_defect_posteriors));
checkCUDA(hipFree(d_defect_priors));
checkCUDA(hipFree(d_word_posteriors));
checkCUDA(hipFree(d_product_posteriors));
checkCUDA(hipFree(d_issue_posteriors));
checkCUDA(hipFree(d_TFDF));
checkCUDA(hipFree(d_TFDF_SUM));
checkCUDA(hipFree(d_expanded_company));
checkCUDA(hipFree(d_expanded_issue));
checkCUDA(hipFree(d_expanded_product));
checkCUDA(hipFree(d_issue_vec));
checkCUDA(hipFree(d_company_vec));
checkCUDA(hipFree(d_product_vec));
}
__global__ void elementwise_division(int const num_rows, int const num_columns,
float *d_denominator, float *d_numerator,
float *output, float numerator_lambda,
float denominator_lamda)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_columns; i += blockDim.x * gridDim.x) {
output[INDX(threadIdx.y, i, num_rows)] =
(float) ((numerator_lambda + d_numerator[INDX(threadIdx.y, i, num_rows)]) /
(denominator_lamda + d_denominator[INDX(threadIdx.y,0, num_rows)]));
}
}
void execute_MStep(int const num_complaints, int const vocab_size,
int const num_defects, float *d_defect_priors,
float *d_defect_posteriors, float *d_company_posteriors,
float *d_issue_posteriors, float *d_product_posteriors,
float *d_word_posteriors, float *d_TFDF,
float *d_expanded_company_vec, float *d_expanded_issue_vec,
float *d_expanded_product_vec,
float *d_TFDF_SUM, int const num_companies,
int const num_products, int const num_issues,
int check_kernal)
{
hipError_t err;
double temp;
float epsilon = 1e-8;
float *d_denominator, *d_defect_posterior_sum;
float *h_denominator, * h_defect_posterior_sum,*h_defect_prior;
dim3 threads, blocks;
/* printf("----------UPDATING THE WORD POSTERIORS INSIDE MATRIX MULTIPLICATION---------------\n"); */
h_denominator = (float *) malloc(sizeof(float) * num_defects);
checkCUDA(hipMalloc(&d_denominator, sizeof(float) * num_defects));
threads.x = TILE_WIDTH;
threads.y = TILE_WIDTH;
blocks.x = ceil(1 + threads.x - 1) / threads.x;
blocks.y = ceil((num_defects + threads.y - 1) / threads.y);
hipLaunchKernelGGL(( mat_mul) , dim3(blocks), dim3(threads) , 0, 0, num_defects, num_complaints,
num_complaints, 1,
d_defect_posteriors, d_TFDF_SUM,
d_denominator, num_defects, 1);
if (check_kernal) {
temp = 0.0;
float *h_denominator_cublas;
float *d_denominator_cublas;
h_denominator_cublas = (float *) malloc(sizeof(float) * num_defects);
checkCUDA(hipMalloc(&d_denominator_cublas, sizeof(float) * num_defects))
cublas_mat_mul(num_defects, num_complaints, num_complaints, 1, d_defect_posteriors, d_TFDF_SUM,
d_denominator_cublas, num_defects,
1, 1.0, 0);
checkCUDA(hipMemcpy(h_denominator, d_denominator, sizeof(float) * num_defects, hipMemcpyDeviceToHost));
checkCUDA(hipMemcpy(h_denominator_cublas, d_denominator_cublas, sizeof(float) * num_defects,
hipMemcpyDeviceToHost));
for (int i = 0; i < num_defects; i++) {
temp += (h_denominator[INDX(i, 0, num_defects)] - h_denominator_cublas[INDX(i, 0, num_defects)])
* (h_denominator[INDX(i, 0, num_defects)] - h_denominator_cublas[INDX(i, 0, num_defects)]);
}
printf("error is %f\n", temp);
if (temp > 10) printf("FAIL\n");
else printf("PASSED ACCURACY TEST FOR DENOMINATOR \n");
free(h_denominator_cublas);
checkCUDA(hipFree(d_denominator_cublas));
}
blocks.x = ceil((vocab_size + threads.x - 1) / threads.x);
blocks.y = ceil((num_defects + threads.y - 1) / threads.y);
hipLaunchKernelGGL(( update_entities) , dim3(blocks), dim3(threads) , 0, 0, num_defects, num_complaints,
num_complaints, vocab_size,
d_defect_posteriors, d_TFDF,
d_word_posteriors, num_defects,
vocab_size, d_denominator,
1, vocab_size);
err = hipGetLastError();
if (err != hipSuccess) {
printf("YOO Error: %s\n", hipGetErrorString(err));
exit(0);
}
/* printf("----------------------SUMMING POSTERIOR FOR DEFECT-----------------------\n");*/
blocks.x = num_defects;
blocks.y =1;
threads.x = 1;
threads.y = 256;
checkCUDA(hipMalloc(&d_defect_posterior_sum, sizeof(float)*num_defects));
hipLaunchKernelGGL(( reduce_columns), dim3(blocks), dim3(threads), sizeof(float)*threads.y, 0, num_defects, num_complaints,
d_defect_posteriors, d_defect_posterior_sum);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
/* printf("-------------------------UPDATING ENTITIES---------------------\n"); */
free(h_denominator);
checkCUDA(hipFree(d_denominator));
/*Companies */
threads.x = TILE_WIDTH;
threads.y = TILE_WIDTH;
blocks.x = ceil(num_companies + threads.x - 1) / threads.x;
blocks.y = ceil((num_defects + threads.y - 1) / threads.y);
hipLaunchKernelGGL(( update_entities) , dim3(blocks), dim3(threads) , 0, 0, num_defects, num_complaints,
num_complaints, num_companies,
d_defect_posteriors, d_expanded_company_vec,
d_company_posteriors, num_defects,
num_companies, d_defect_posterior_sum,
1, num_companies);
/*Issues */
blocks.x = ceil(num_issues + threads.x - 1) / threads.x;
hipLaunchKernelGGL(( update_entities) , dim3(blocks), dim3(threads) , 0, 0, num_defects, num_complaints,
num_complaints, num_issues,
d_defect_posteriors, d_expanded_issue_vec,
d_company_posteriors, num_defects,
num_issues, d_defect_posterior_sum,
1, num_issues);
/*Products */
blocks.x = ceil(num_products + threads.x - 1) / threads.x;
hipLaunchKernelGGL(( update_entities) , dim3(blocks), dim3(threads) , 0, 0, num_defects, num_complaints,
num_complaints, num_products,
d_defect_posteriors, d_expanded_product_vec,
d_company_posteriors, num_defects,
num_products, d_defect_posterior_sum,
1, num_products);
/* printf("----------UPDATING Priors--------------\n");*/
h_defect_posterior_sum = (float *) malloc(sizeof(float)*num_defects);
h_defect_prior = (float *) malloc(sizeof(float)*num_defects);
checkCUDA(hipMemcpy(h_defect_posterior_sum, d_defect_posterior_sum,
sizeof(float)*num_defects,hipMemcpyDeviceToHost ));
for(int i=0; i < num_defects; i ++){
if (h_defect_posterior_sum[i] < epsilon){
printf("prior %d is too small: %f \n", i,h_defect_posterior_sum[i] );
h_defect_prior[i] = epsilon;
}else{
h_defect_prior[i] = (float) h_defect_posterior_sum[i]/num_complaints;
}
}
checkCUDA(hipMemcpy(d_defect_priors, h_defect_prior, sizeof(float)*num_defects,hipMemcpyHostToDevice));
/* printf("------------------------------------ \n");*/
checkCUDA(hipFree(d_defect_posterior_sum));
free(h_defect_posterior_sum);
free(h_defect_prior);
}
void cublas_mat_mul(const int a_rows, const int a_columns,
const int b_rows, const int b_columns,
float * d_A, float * d_B,
float * d_C, const int c_rows,
const int c_columns, float alpha,
float beta)
{
checkCUBLAS(hipblasSgemm(
handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
a_rows, b_columns, b_rows,
&alpha, d_A,
a_rows, d_B, b_rows,
&beta, d_C, a_rows
));
}
void M_STEP_CUBLAS(int const num_complaints, int const vocab_size,
int const num_defects, float *d_defect_priors,
float *d_defect_posteriors, float *d_company_posteriors,
float *d_issue_posteriors, float *d_product_posteriors,
float *d_word_posteriors, float *d_TFDF,
float *d_expanded_company_vec, float *d_expanded_issue_vec,
float *d_expanded_product_vec, float *d_TFDF_SUM, int const num_companies,
int const num_products, int const num_issues
) {
hipError_t err;
checkCUBLAS(hipblasCreate(&handle));
float epsilon = 1e-6;
float * h_defect_posterior_sum,*h_defect_prior, *d_numerator,
*d_denominator;
/* printf("----------UPDATING THE WORD POSTERIORS ---------------\n"); */
/* Numerator */
checkCUDA(hipMalloc(&d_numerator, sizeof(float) * num_defects * vocab_size));
cublas_mat_mul(num_defects, num_complaints,
num_complaints, vocab_size,
d_defect_posteriors, d_TFDF,
d_numerator, num_defects,
vocab_size, 1.0, 0.0);
/* Denominator */
checkCUDA(hipMalloc(&d_denominator, sizeof(float) * num_defects))
cublas_mat_mul(num_defects, num_complaints, num_complaints, 1, d_defect_posteriors, d_TFDF_SUM,
d_denominator, num_defects,
1, 1.0, 0);
/* Elementwise divison */
int threads_per_block = THREADS_PER_BLOCK_Y;
dim3 threads( threads_per_block,num_defects, 1);
dim3 blocks((num_complaints / threads_per_block) + 1, 1, 1);
hipLaunchKernelGGL(( elementwise_division), dim3(threads), dim3(blocks) , 0, 0, num_defects, vocab_size,
d_denominator, d_numerator,
d_word_posteriors, (float) 1.0,
(float) vocab_size);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
checkCUDA(hipFree(d_numerator));
checkCUDA(hipFree(d_denominator));
/* printf("----------SUMMING POSTERIOR FOR DEFECT--------------\n");*/
float * h_x;
float *d_x;
h_x = (float *) malloc(sizeof(float) * num_complaints);
for (int i = 0; i < num_complaints; i++) {
h_x[INDX(i, 0, num_complaints)] = (float) 1.0;
}
checkCUDA(hipMalloc(&d_x, sizeof(float) * num_complaints));
checkCUDA(hipMemcpy(d_x, h_x, sizeof(float) * num_complaints, hipMemcpyHostToDevice));
float *d_defect_posterior_sum;
checkCUDA(hipMalloc(&d_defect_posterior_sum, sizeof(float) * num_defects));
cublas_mat_mul(num_defects, num_complaints,
num_complaints, 1,
d_defect_posteriors, d_x,
d_defect_posterior_sum, num_defects,
1, 1.0, 0);
/* -----------------Updating Entities---------------------------- */
checkCUDA(hipMalloc(&d_numerator, sizeof(float)*num_companies*num_defects));
/* Companies */
cublas_mat_mul(num_defects, num_complaints,
num_complaints, num_companies,
d_defect_posteriors, d_expanded_company_vec,
d_numerator, num_defects,
1, 1.0, 0);
hipLaunchKernelGGL(( elementwise_division) , dim3(threads), dim3(blocks) , 0, 0, num_defects, num_companies,
d_defect_posterior_sum, d_numerator,
d_company_posteriors, (float) 1.0,
(float) num_companies);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
/* Products */
checkCUDA(hipFree(d_numerator));
checkCUDA(hipMalloc(&d_numerator, sizeof(float)*num_products*num_defects));
cublas_mat_mul(num_defects, num_complaints,
num_complaints, num_products,
d_defect_posteriors, d_expanded_product_vec,
d_numerator, num_defects,
1, 1.0, 0);
hipLaunchKernelGGL(( elementwise_division) , dim3(threads), dim3(blocks) , 0, 0, num_defects, num_products,
d_defect_posterior_sum, d_numerator,
d_product_posteriors, (float) 1.0,
(float) num_products);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
checkCUDA(hipFree(d_numerator));
/* Issues */
checkCUDA(hipMalloc(&d_numerator, sizeof(float)*num_issues*num_defects));
cublas_mat_mul(num_defects, num_complaints,
num_complaints, num_issues,
d_defect_posteriors, d_expanded_product_vec,
d_numerator, num_defects,
1, 1.0, 0);
hipLaunchKernelGGL(( elementwise_division) , dim3(threads), dim3(blocks) , 0, 0, num_defects, num_issues,
d_defect_posterior_sum, d_numerator,
d_issue_posteriors, (float) 1.0,
(float) num_issues);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
/* -----------------Updating Priors---------------------------- */
h_defect_posterior_sum = (float *) malloc(sizeof(float)*num_defects);
h_defect_prior = (float *) malloc(sizeof(float)*num_defects);
checkCUDA(hipMemcpy(h_defect_posterior_sum, d_defect_posterior_sum,
sizeof(float)*num_defects,hipMemcpyDeviceToHost ));
for(int i=0; i < num_defects; i ++){
if (h_defect_posterior_sum[i] < epsilon){
printf("prior %d is too small: %f \n", i,h_defect_posterior_sum[i] );
h_defect_prior[i] = epsilon;
}else{
h_defect_prior[i] = (float) h_defect_posterior_sum[i] / num_complaints;
}
}
checkCUDA(hipMemcpy(d_defect_priors, h_defect_prior, sizeof(float)*num_defects,hipMemcpyHostToDevice));
checkCUDA(hipFree(d_numerator));
checkCUDA(hipFree(d_defect_posterior_sum));
free(h_defect_posterior_sum);
free(h_defect_prior);
}
void readMatrixFromFile(char *fileName,
float *matrix,
int const rows,
int const cols,
int const ld) {
FILE *ifp;
ifp = fopen(fileName, "r");
if (ifp == NULL) {
fprintf(stderr, "Error opening file %s\n", fileName);
exit(911);
} /* end if */
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
if (!fscanf(ifp, " %f",
&matrix[INDX(row, col, ld)])) {
printf("%d\n", INDX(row, col, ld));
printf("error in element %d and %d\n", row, col);
fprintf(stderr, "error reading training matrix file \n");
perror("scanf:");
exit(911);
}
/* end if */
} /* end for row */
} /* end for col */
fclose(ifp);
return;
}
| 2b07077e50b97e634610ea8714d18c99677d5b56.cu | /*
* Author: Jacob Perricone
* A
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cublas_v2.h>
#include "fstream"
#include "headers.h"
#include <errno.h>
/* Define constants max threads per block in each dimension*/
#define MAX_THREADS_PER_BLOCK_X (1024)
#define MAX_THREADS_PER_BLOCK_Y (1024)
#define MAX_THREADS_PER_BLOCK_Z (64)
#define MAX_BLOCKS (65535)
#define MAX_BLOCKS (65535)
#define GLOBAL_MEM_SIZE (4232052736)
#define CONSTANT_MEM_SIZE (65536)
#define SHARED_MEM_SIZE (49152)
#define THREADS_PER_BLOCK_Y 1024/4
#define THREADS_PER_BLOCK_X NUM_DEFECTS
#define TILE_WIDTH 16
cublasHandle_t handle;
void runEM_CPU(float *TFDF, int const num_complaints,
int const vocab_size, float *company_vec,
int const num_companies, float *issue_vec,
int const num_issues, float *product_vec,
int const num_products, int const num_defects,
float *defect_priors_cpu, float *defect_posteriors_cpu,
float *company_posteriors_cpu, float *issue_posteriors_cpu,
float *product_posteriors_cpu, float *word_posteriors_cpu,
const float tol, const int maxIter, const char * log_file_name,
const char *results_file_name){
double delta_likelihood = INFINITY;
double old_likelihood = -INFINITY;
double new_likelihood = 0;
float *x_i_posteriors_cpu, *max_elements, *z, *TFDF_SUM, *d_posterior_sum;
double tmp;
double max;
double denominator;
double numerator;
double epsilon = 1.e-6;
int iter = 0;
float time_e, time_m, total_estep_time, total_mstep_time;
clock_t begin, end;
x_i_posteriors_cpu = (float *) malloc(sizeof(float) * num_complaints * num_defects);
z = (float *) malloc(sizeof(float) * num_complaints * num_defects);
max_elements = (float *) malloc(sizeof(float) * num_complaints);
TFDF_SUM = (float *) malloc(sizeof(float)*num_complaints);
d_posterior_sum = (float *) malloc(sizeof(float)*num_defects);
tmp = 0;
for (int j =0; j < num_complaints; j++){
for (int i =0; i < vocab_size; i++){
tmp += TFDF[INDX(j, i, num_complaints)];
}
TFDF_SUM[j] = tmp;
}
FILE *s ;
s = fopen(log_file_name, "w");
fprintf(s, "-----------CPU: Beggining Expectation Maximization Routine on %d complaints and %d words---------\n",
num_complaints, vocab_size);
while (delta_likelihood > (float) .00001 || iter < 10) {
iter++;
memset(x_i_posteriors_cpu, 0, sizeof(float) * num_complaints * num_defects);
memset(z, 0, sizeof(float) * num_complaints * num_defects);
denominator = 0.0;
numerator = 0.0;
new_likelihood = 0.0;
begin = clock();
for (int i = 0; i < num_complaints; i++) {
for (int j = 0; j < num_defects; j++) {
x_i_posteriors_cpu[INDX(j, i, num_defects)] += logf(
issue_posteriors_cpu[INDX(j, (int)issue_vec[INDX(i, 0, num_complaints)], num_defects)]);
x_i_posteriors_cpu[INDX(j, i, num_defects)] += logf(
product_posteriors_cpu[INDX(j, (int) product_vec[INDX(i, 0, num_complaints)], num_defects)]);
x_i_posteriors_cpu[INDX(j, i, num_defects)] += logf(
company_posteriors_cpu[INDX(j, (int) company_vec[INDX(i, 0, num_complaints)], num_defects)]);
tmp = 0;
for (int k = 0; k < vocab_size; k++) {
tmp += TFDF[INDX(i, k, num_complaints)] * logf(word_posteriors_cpu[INDX(j, i, num_defects)]);
}
x_i_posteriors_cpu[INDX(j, i, num_defects)] += tmp;
z[INDX(j, i, num_defects)] =
logf(defect_priors_cpu[INDX(j, 0, num_defects)]) + x_i_posteriors_cpu[INDX(j, i, num_defects)];
}
max = -INFINITY;
for (int k = 0; k < num_defects; k++) {
if (z[INDX(k, i, num_defects)] > max)
max = z[INDX(k, i, num_defects)];
}
max_elements[i] = (float) max;
new_likelihood += max_elements[i];
tmp = 0;
for (int k = 0; k < num_defects; k++) {
numerator = 0;
denominator = 0;
if (z[INDX(k, i, num_defects)] - max_elements[i] < -11) {
defect_posteriors_cpu[INDX(k, i, num_defects)] = 0.0;
} else {
numerator = expf(z[INDX(k, i, num_defects)] - max_elements[i]);
tmp += numerator;
denominator += numerator;
for (int l = 0; l < num_defects && l != k; l++){
if (z[INDX(l, i, num_defects)] - max_elements[i] > -11) {
denominator += expf(z[INDX(l, i, num_defects)] - max_elements[i]);
}
}
defect_posteriors_cpu[INDX(k, i, num_defects)] = numerator / denominator;
}
}
new_likelihood += logf(tmp);
}
end = clock();
time_e =(float)(end - begin) / CLOCKS_PER_SEC;
total_estep_time += time_e;
fprintf(s, "---------Total time For E_STEP on CPU is %f sec ---------\n", time_e);
delta_likelihood = fabsf(old_likelihood - new_likelihood);
fprintf(s,"(OLD LIKELIHOOD = %f, UPDATED LIKELIHOOD = %f , Change in Likelihood =%f)\n",
old_likelihood,new_likelihood, delta_likelihood);
printf("Change in Likelihood is %f:\n", (new_likelihood-old_likelihood));
old_likelihood= new_likelihood;
/* M STEP*/
memset(d_posterior_sum, 0, sizeof(float)*num_defects);
fprintf(s, "--------------DOING M-STEP WITH CPU---------------------- \n");
begin = clock();
for (int j=0; j < num_defects; j++){
for(int i=0; i < vocab_size; i++){
numerator = 0;
denominator = 0;
for (int k= 0; k < num_complaints; k++){
numerator += defect_posteriors_cpu[INDX(j, k, num_defects)]*TFDF[INDX(k, i, num_complaints)];
denominator += defect_posteriors_cpu[INDX(j, k, num_defects)]*TFDF_SUM[k];
}
word_posteriors_cpu[INDX(j,i, num_defects)] = (1 + numerator )/(vocab_size + denominator);
}
for(int i=0; i < num_companies; i++){
numerator = 0;
denominator = 0;
for (int k= 0; k < num_complaints; k++){
if ((int)company_vec[k] == i) {
numerator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
denominator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
company_posteriors_cpu[INDX(j,i, num_defects)] = (1 + numerator)/ (num_companies + denominator);
}
for (int k = 0; k < num_complaints; k++){
d_posterior_sum[j] += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
for(int i=0; i < num_products; i++){
numerator = 0;
denominator = 0;
for (int k= 0; k < num_complaints; k++){
if ((int)company_vec[k] == i) {
numerator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
denominator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
product_posteriors_cpu[INDX(j,i, num_defects)] = (1 + numerator)/ (num_products + denominator);
}
for(int i=0; i < num_issues; i++){
numerator = 0;
denominator = 0;
for (int k= 0; k < num_complaints; k++){
if ((int)issue_vec[k] == i) {
numerator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
denominator += defect_posteriors_cpu[INDX(j, k, num_defects)];
}
product_posteriors_cpu[INDX(j,i, num_defects)] = (1 + numerator)/ (num_issues + denominator);
}
if (d_posterior_sum[j] < epsilon){
printf("YOO %f \n", d_posterior_sum[j]);
defect_priors_cpu[j] = epsilon;
}else{
defect_priors_cpu[j] = d_posterior_sum[j]/num_complaints;
}
}
end = clock();
time_m =(float)(end - begin) / CLOCKS_PER_SEC;
total_mstep_time += time_m;
fprintf(s, "---------Total time For M_STEP on CPU is %f sec ---------\n", time_e);
}
fprintf(s, "Total time till convergece is %f sec | %d iterations \n", total_mstep_time + total_estep_time, iter);
fprintf(s, "Average Time of eStep is %f sec: \n", total_estep_time/iter);
fprintf(s, "Average Time of MStep is %f sec: \n", total_mstep_time/iter);
fprintf(s, "Finally Likelihood %f\n", old_likelihood);
fprintf(s, "Change in likelihood %f\n", delta_likelihood);
fclose(s);
FILE *f = fopen(results_file_name, "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for (int i=0; i < num_companies; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"COMPANY, %d, DEFECT, %d, POSTERIOR, %f \n", i, j,company_posteriors_cpu[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < num_issues; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"ISSUE, %d, DEFECT,%d, POSTERIOR, %f \n", i, j,issue_posteriors_cpu[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < vocab_size; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"WORD, %d, DEFECT %d, POSTERIOR, %f \n", i, j,word_posteriors_cpu[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < num_complaints; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"Complaint, %d, DEFECT, %d, POSTERIOR: %f \n", i, j,defect_posteriors_cpu[INDX(j, i, num_defects)] );
}
}
for (int j=0; j < num_defects; j++){
fprintf(f,"DEFECT, %d, , Prior : %f \n", j,defect_priors_cpu[j] );
}
fclose(f);
free(x_i_posteriors_cpu);
free(z);
free(max_elements);
free(TFDF_SUM);
free(d_posterior_sum);
}
__global__ void eStep2(int const num_complaints, int const vocab_size,
int const num_defects, float *d_defect_priors,
float *d_defect_posteriors, float *d_company_posteriors,
float *d_issue_posteriors, float *d_product_posteriors,
float *d_word_posteriors, float *d_TFDF,
float *d_company_vec, float *d_issue_vec,
float *d_product_vec, float *d_likelihood
) {
int d_row = threadIdx.x;
int d_offset = blockIdx.x * blockDim.y;
int d_index = threadIdx.y;
int d_sample = d_offset + d_index;
__shared__ double x_posterior[NUM_DEFECTS][THREADS_PER_BLOCK_Y];
__shared__ double z[NUM_DEFECTS][THREADS_PER_BLOCK_Y];
__shared__ double max_elem[THREADS_PER_BLOCK_Y];
__shared__ float block_likelihood[THREADS_PER_BLOCK_Y];
__shared__ float c_post[NUM_DEFECTS][NUM_COMPANIES_SMALL];
__shared__ float i_post[NUM_DEFECTS][NUM_ISSUES_SMALL];
__shared__ float p_post[NUM_DEFECTS][NUM_PRODUCTS_SMALL];
__shared__ int p_vec[THREADS_PER_BLOCK_Y];
__shared__ int c_vec[THREADS_PER_BLOCK_Y];
__shared__ int i_vec[THREADS_PER_BLOCK_Y];
if (d_sample < num_complaints) {
/* COPY THIS SHIT TO SHARED_MEM */
/* Keep track of p(x_i | d_j) */
c_vec[d_index] = (int) d_company_vec[INDX(d_sample, 1, 1)];
p_vec[d_index] = (int) d_product_vec[INDX(d_sample, 1, 1)];
i_vec[d_index] = (int) d_issue_vec[INDX(d_sample, 1, 1)];
__syncthreads();
if (d_index < NUM_PRODUCTS_SMALL) {
c_post[d_row][d_index] = d_company_posteriors[INDX(d_row, d_index, num_defects)];
i_post[d_row][d_index] = d_issue_posteriors[INDX(d_row, d_index, num_defects)];
p_post[d_row][d_index] = d_product_posteriors[INDX(d_row, d_index, num_defects)];
}else if (d_index < NUM_COMPANIES_SMALL ) {
i_post[d_row][d_index] = d_issue_posteriors[INDX(d_row, d_index, num_defects)];
c_post[d_row][d_index] = d_company_posteriors[INDX(d_row, d_index, num_defects)];
}else if (d_index < NUM_ISSUES_SMALL){
i_post[d_row][d_index] = d_issue_posteriors[INDX(d_row, d_index, num_defects)];
}
__syncthreads();
x_posterior[d_row][d_index] = logf(c_post[d_row][c_vec[d_index]])
+ logf(i_post[d_row][i_vec[d_index]]) + logf(p_post[d_row][p_vec[d_index]]);
float sum = 0;
for (int i = 0; i < vocab_size; i++) {
sum += d_TFDF[INDX(d_sample, i, num_complaints)] * logf(d_word_posteriors[INDX(d_row, i, num_defects)]);
}
x_posterior[d_row][d_index] += sum;
/* Apply smoothing operations */
z[d_row][d_index] = logf(d_defect_priors[d_row]) + x_posterior[d_row][d_index];
__syncthreads();
double max = -INFINITY;
for (int i = 0; i < num_defects; i++) {
if (z[i][d_index] > max) {
max = z[i][d_index];
}
}
block_likelihood[d_index] = 0;
__syncthreads();
max_elem[d_index] = max;
//printf( "DEFECT %d %d %f %f %f %f \n", d_row, d_sample, z[0][d_index], z[1][d_index], z[2][d_index],max_elem[d_index] );
double denom = 0.0;
if (z[d_row][d_index] - max_elem[d_index] > -11) {
block_likelihood[d_index] += expf(z[d_row][d_index] - max_elem[d_index]);
for (int i = 0; i < num_defects; i++) {
if (z[i][d_index] - max_elem[d_index] > -11) {
denom += expf(z[i][d_index] - max_elem[d_index]);
}
}
d_defect_posteriors[INDX(d_row, d_sample, num_defects)] = expf(z[d_row][d_index] - max_elem[d_index]) / denom;
} else {
d_defect_posteriors[INDX(d_row, d_sample, num_defects)] = 0.0;
}
__syncthreads();
if (threadIdx.y==0 && threadIdx.x == 0) {
for (int i = 0; i < THREADS_PER_BLOCK_Y && (d_offset + i) < num_complaints; i++) {
d_likelihood[blockIdx.x] += max_elem[i] + logf(block_likelihood[i]);
}
}
}
}
/* Do it without Shared Mem For now */
__global__ void eStep(int const num_complaints,
int const vocab_size,
int const num_defects,
float *d_defect_priors,
float *d_defect_posteriors,
float *d_company_posteriors,
float *d_issue_posteriors,
float *d_product_posteriors,
float *d_word_posteriors,
float *d_TFDF,
float *d_company_vec,
float *d_issue_vec,
float *d_product_vec,
float *d_likelihood
) {
int d_row = threadIdx.x;
int d_offset = blockIdx.x * blockDim.y;
int d_index = threadIdx.y;
int d_sample = d_offset + d_index;
__shared__ double x_posterior[NUM_DEFECTS][THREADS_PER_BLOCK_Y];
__shared__ double z[NUM_DEFECTS][THREADS_PER_BLOCK_Y];
__shared__ double max_elem[THREADS_PER_BLOCK_Y];
__shared__ float block_likelihood[THREADS_PER_BLOCK_Y];
if (d_sample < NUM_COMPLAINTS_SMALL) {
/* COPY THIS TO SHARED_MEM */
/* Keep track of p(x_i | d_j) */
x_posterior[d_row][d_index] =
logf(d_company_posteriors[INDX(d_row, (int) d_company_vec[INDX(d_sample, 1, 1)], num_defects)]);
x_posterior[d_row][d_index] += logf(
d_issue_posteriors[INDX(d_row, (int) d_issue_vec[INDX(d_sample, 1, 1)], num_defects)]);
x_posterior[d_row][d_index] += logf(
d_product_posteriors[INDX(d_row, (int) d_product_vec[INDX(d_sample, 1, 1)], num_defects)]);
double sum = 0;
for (int i = 0; i < vocab_size; i++) {
sum += d_TFDF[INDX(d_sample, i, num_complaints)] * logf(d_word_posteriors[INDX(d_row, i, num_defects)]);
}
x_posterior[d_row][d_index] += sum;
/* Apply smoothing operations */
z[d_row][d_index] = logf(d_defect_priors[d_row]) + x_posterior[d_row][d_index];
__syncthreads();
double max = -INFINITY;
for (int i = 0; i < num_defects; i++) {
if (z[i][d_index] > max) {
max = z[i][d_index];
}
}
block_likelihood[d_index] = 0;
__syncthreads();
max_elem[d_index] = max;
double denom = 0.0;
if (z[d_row][d_index] - max_elem[d_index] > -11) {
block_likelihood[d_index] += expf(z[d_row][d_index] - max_elem[d_index]);
for (int i = 0; i < num_defects; i++) {
if (z[i][d_index] - max_elem[d_index] > -11) {
denom += expf(z[i][d_index] - max_elem[d_index]);
}
}
d_defect_posteriors[INDX(d_row, d_sample, num_defects)] = expf(z[d_row][d_index] - max_elem[d_index]) / denom;
} else {
d_defect_posteriors[INDX(d_row, d_sample, num_defects)] = 0.0;
}
__syncthreads();
if (threadIdx.y==0 && threadIdx.x == 0) {
for (int i = 0; i < THREADS_PER_BLOCK_Y && (d_offset + i) < num_complaints; i++) {
d_likelihood[blockIdx.x] += max_elem[i] + logf(block_likelihood[i]);
}
}
}
}
/* Find the sum of the colums of a matrix, I am going to launch a block size equal to the number
* of rows of matrix in, but this is not always feasible. Esnure that num_rows == blockDim.y. */
__global__ void reduce_columns(const int num_rows, const int num_columns, const float * in, float * out){
extern __shared__ float sArray[];
int globalIndex = blockIdx.y * blockDim.y + threadIdx.y;
/* zero out the smem array */
sArray[threadIdx.y] = 0.0;
/* Stride over the array
*/
float tmp = 0;
for( int i = globalIndex; i < num_columns; i += blockDim.y)
{
tmp += in[INDX(blockIdx.x,i,num_rows)];
} /* end for */
sArray[threadIdx.y] = tmp;
__syncthreads();
/* do the final reduction in SMEM */
for( int i = blockDim.y/2; i > 0; i = i / 2 )
{
if( threadIdx.y < i )
{
sArray[threadIdx.y] += sArray[threadIdx.y + i];
// sArray[threadIdx.x] += sArray[threadIdx.x + i];
} /* end if */
__syncthreads();
} /* end for */
/* thread0 of each threadblock writes the result to global memory */
if( threadIdx.y == 0 ){
// printf("BLOCK X has value, %f", (float) sArray[0]);
out[blockIdx.x] = sArray[0];
}
return;
}
/* Tile Matrix Multiplication on GPU
*
* Params:
* a_rows = number of d_A
* a_columns = number of columns of d_A
* b_rows = number of rows in d_B
* b_columns = number of columns in d_B
* d_C = matrix to save into
* c_rows = rows of matrix
* */
__global__ void mat_mul(const int a_rows, const int a_columns,
const int b_rows, const int b_columns,
float * d_A, float * d_B, float * d_C,
const int c_rows, const int c_columns) {
/* setup some constants for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * TILE_WIDTH;
const int ibx = blockIdx.x * TILE_WIDTH;
const int row = iby + ty;
const int col = ibx + tx;
/* shared memory arrays for A and B */
__shared__ double as[TILE_WIDTH][TILE_WIDTH];
__shared__ double bs[TILE_WIDTH][TILE_WIDTH];
/* space for C to be held in registers */
float value = 0;
int tmp_col;
int tmp_row;
for (int i = 0; i < ceil(a_columns/(float)TILE_WIDTH); i++) {
tmp_col = i * TILE_WIDTH + tx;
if (tmp_col < a_columns && row < a_rows) {
as[ty][tx] = d_A[tmp_col * a_rows + row];
} else {
as[ty][tx] = 0.0;
}
tmp_row = i * TILE_WIDTH + ty;
if (tmp_row < b_rows && col < b_columns) {
bs[ty][tx] = d_B[col * b_rows + tmp_row];
} else {
bs[ty][tx] = 0.0;
}
__syncthreads();
for (int j = 0; j < TILE_WIDTH; j++) {
value += as[threadIdx.y][j] * bs[j][threadIdx.x];
}
__syncthreads();
}
if (row < c_rows && col < c_columns) {
int row_map = blockIdx.y * blockDim.y + threadIdx.y;
int col_map = blockIdx.x * blockDim.x + threadIdx.x;
d_C[INDX(row_map, col_map, c_rows)] = value;
}
}
/* Update the issues posteriors by doing matrix multiplication and division in one step
* same params as mat_mul with the aditional float * division, which should be the number of rows as d_C
* the constants applies the operation
* (numerator_constant + x_{i,j) / (denominator_constant + divisor_i)
*/
__global__ void update_entities(const int a_rows, const int a_columns,
const int b_rows, const int b_columns,
float * d_A, float * d_B,
float * d_C, const int c_rows,
const int c_columns, float * divisor,
float numerator_constant, float denominator_constant)
{
/* setup some constants for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * TILE_WIDTH;
const int ibx = blockIdx.x * TILE_WIDTH;
const int row = iby + ty;
const int col = ibx + tx;
/* shared memory arrays for A and B */
__shared__ double as[TILE_WIDTH][TILE_WIDTH];
__shared__ double bs[TILE_WIDTH][TILE_WIDTH];
/* space for C to be held in registers */
float value = 0;
int tmp_col;
int tmp_row;
for (int i = 0; i < ceil(a_columns/(float)TILE_WIDTH); i++) {
tmp_col = i * TILE_WIDTH + tx;
if (tmp_col < a_columns && row < a_rows) {
as[ty][tx] = d_A[tmp_col * a_rows + row];
} else {
as[ty][tx] = 0.0;
}
tmp_row = i * TILE_WIDTH + ty;
if (tmp_row < b_rows && col < b_columns) {
bs[ty][tx] = d_B[col * b_rows + tmp_row];
} else {
bs[ty][tx] = 0.0;
}
__syncthreads();
for (int j = 0; j < TILE_WIDTH; j++) {
value += as[threadIdx.y][j] * bs[j][threadIdx.x];
}
__syncthreads();
}
if (row < c_rows && col < c_columns) {
int row_map = blockIdx.y * blockDim.y + threadIdx.y;
int col_map = blockIdx.x * blockDim.x + threadIdx.x;
d_C[INDX(row_map, col_map, c_rows)] = (numerator_constant + value) / (divisor[row_map] + denominator_constant);
}
/* c
*/
}
/* Sums across columns of a matrix using cublas matrix operations */
void cublas_column_reduce( float * d_A, const int num_rows, const int num_columns, float * d_y){
const float alpha = 1.0;
float beta = 0.0;
/* Make a vector of ones for multiplication */
float *h_x, *d_x;
h_x = (float *) malloc(sizeof(float) * num_columns);
for (int i = 0; i < num_columns; i++)
h_x[INDX(i,0,num_columns)] = (float) 1.0;
/* Copy to device memory */
checkCUDA(cudaMalloc((void **) &d_x, sizeof(float) * num_columns));
checkCUDA(cudaMemcpy(d_x, h_x, sizeof(float) * num_columns, cudaMemcpyHostToDevice));
/* Start Timers */
checkCUBLAS(cublasCreate(&handle));
checkCUBLAS(cublasSgemv(handle, CUBLAS_OP_N,
num_rows, num_columns,
&alpha,
d_A, num_rows,
d_x, 1.0,
&beta,
d_y, 1.0));
/* End Timers */
/*Free the ones vector*/
checkCUDA(cudaFree(d_x));
free(h_x);
/* Host copy isn't needed */
}
/* Fills randomly generated data between 0-1 into the dynamic float array data, of size: (rows, cols)
*
* Params:
* float * data: array to be filled
* int rows: number of rows in array
* int cols: number of columns to be filled
* int ld: number of rows per column
*/
void RandomInit(float *data, int rows, int cols, int ld) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
data[INDX(i, j, ld)] = (static_cast <float> (rand()) / static_cast <float> (RAND_MAX)) + 1.e-2;
}
}
}
/* See header file for description*/
void runEM(float *TFDF, int const num_complaints,
int const vocab_size, float *company_vec,
int const num_companies, float *issue_vec,
int const num_issues, float *product_vec,
int const num_products, int const num_defects,
float *defect_priors, float *defect_posteriors,
float *company_posteriors, float *issue_posteriors,
float *product_posteriors, float *word_posteriors,
const float tol, const int maxIter, const char * log_file_name,
const char *results_file_name
) {
/* checkCUBLAS( cublasCreate( &cublasHandle ) );*/
printf("------------------------------------ \n");
printf("COPYING HOST DATA TO DEVICE MEMORY\n");
int thread_size;
float elapsedTime;
float old_likelihood, delta_likelihood;
float *d_defect_priors, *d_defect_posteriors,
*d_company_posteriors, *d_product_posteriors,
*d_issue_posteriors,*d_word_posteriors;
/* ALLOCATE AND COPY FOR PRIORS AND POSTERIORS */
checkCUDA(cudaMalloc((void **) &d_defect_priors, sizeof(float) * num_defects));
checkCUDA(cudaMemcpy(d_defect_priors, defect_priors, sizeof(float) * num_defects,
cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc((void **) &d_defect_posteriors, sizeof(float) * num_defects * num_complaints));
checkCUDA(cudaMemcpy(d_defect_posteriors, defect_posteriors, sizeof(float) * num_defects * num_complaints,
cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc((void **) &d_company_posteriors, sizeof(float) * num_defects * num_companies));
checkCUDA(cudaMemcpy(d_company_posteriors, company_posteriors, sizeof(float) * num_defects * num_companies,
cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc((void **) &d_product_posteriors, sizeof(float) * num_defects * num_products));
checkCUDA(cudaMemcpy(d_product_posteriors, product_posteriors, sizeof(float) * num_defects * num_products,
cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc((void **) &d_issue_posteriors, sizeof(float) * num_defects * num_issues));
checkCUDA(cudaMemcpy(d_issue_posteriors, issue_posteriors, sizeof(float) * num_defects * num_issues,
cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc((void **) &d_word_posteriors, sizeof(float) * num_defects * vocab_size));
checkCUDA(cudaMemcpy(d_word_posteriors, word_posteriors, sizeof(float) * num_defects * vocab_size,
cudaMemcpyHostToDevice));
/* ALLOCATE AND COPY FOR EMPIRICAL DATA (i.e the entity data of all complaints ) */
float *d_TFDF, *d_company_vec,*d_product_vec,*d_issue_vec;
checkCUDA(cudaMalloc((void **) &d_TFDF, sizeof(float) * num_complaints * vocab_size));
checkCUDA(cudaMemcpy(d_TFDF, TFDF, sizeof(float) * num_complaints * vocab_size, cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc((void **) &d_company_vec, sizeof(float) * num_complaints));
checkCUDA(cudaMemcpy((void **) d_company_vec, company_vec, sizeof(float) * num_complaints, cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc((void **) &d_product_vec, sizeof(float) * num_complaints));
checkCUDA(cudaMemcpy(d_product_vec, product_vec, sizeof(float) * num_complaints, cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc((void **) &d_issue_vec, sizeof(float) * num_complaints));
checkCUDA(cudaMemcpy((void **) d_issue_vec, issue_vec, sizeof(float) * num_complaints, cudaMemcpyHostToDevice));
/* CREATE THE EXPANDED MATRICES */
/* Calculate Host Values that will not change */
float* h_expanded_company, *h_expanded_product, *h_expanded_issue, *h_ones_matrix, *h_ones_vector;
checkCUDA(cudaMallocHost((void**)&h_expanded_company, sizeof(float) * num_companies*num_complaints));
memset(h_expanded_company, 0, sizeof(float)*num_companies*num_complaints);
checkCUDA(cudaMallocHost((void**)&h_expanded_product, sizeof(float) * num_products*num_complaints));
memset(h_expanded_product, 0, sizeof(float)*num_products*num_complaints);
checkCUDA(cudaMallocHost((void**)&h_expanded_issue, sizeof(float)*num_issues*num_complaints));
memset(h_expanded_issue, 0, sizeof(float)*num_issues*num_complaints);
checkCUDA(cudaMallocHost((void**)&h_ones_matrix, sizeof(float) * num_defects * vocab_size));
memset(h_ones_matrix, 0, sizeof(float)*num_defects*vocab_size);
checkCUDA(cudaMallocHost((void**)&h_ones_vector, sizeof(float) * num_defects ));
memset(h_ones_vector, 0, sizeof(float)*num_defects);
for (int i = 0; i < num_defects; i++) {
for (int j = 0; j < vocab_size; j++) {
h_ones_matrix[INDX(i, j, num_defects)] = (float) 1.0;
}
h_ones_vector[INDX(i, 0, num_defects)] = (float) 1.0;
}
for (int i = 0; i < num_complaints; i++){
h_expanded_company[INDX(i, (int) company_vec[INDX(i,1,1)], num_complaints)] = 1;
h_expanded_issue[INDX(i, (int) issue_vec[INDX(i,1,1)], num_complaints)] = 1;
h_expanded_product[INDX(i, (int) product_vec[INDX(i,1,1)],num_complaints)] = 1;
}
/* Allocate and copy to device */
float * d_expanded_company, * d_expanded_product, * d_expanded_issue;
checkCUDA(cudaMalloc(&d_expanded_company, sizeof(float)*num_complaints*num_companies));
checkCUDA(cudaMemcpy(d_expanded_company, h_expanded_company,
sizeof(float)*num_complaints*num_companies, cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc(&d_expanded_product, sizeof(float)*num_complaints*num_products));
checkCUDA(cudaMemcpy(d_expanded_product, h_expanded_product,
sizeof(float)*num_complaints*num_products, cudaMemcpyHostToDevice));
checkCUDA(cudaMalloc(&d_expanded_issue, sizeof(float)*num_complaints*num_issues));
checkCUDA(cudaMemcpy(d_expanded_issue, h_expanded_issue,
sizeof(float)*num_complaints*num_issues, cudaMemcpyHostToDevice));
/* FInd the sum across all words for each complaint */
float *d_TFDF_SUM;
checkCUDA(cudaMalloc((void **) &d_TFDF_SUM, sizeof(float) * num_complaints));
/* create thread elements */
dim3 blocks(num_complaints, 1, 1);
thread_size = 256;
dim3 threads(1, thread_size,1);
cudaEvent_t start, stop;
cudaEvent_t start_total, stop_total;
cudaError_t err;
FILE *s ;
s = fopen(log_file_name, "w");
checkCUDA(cudaEventCreate(&start));
checkCUDA(cudaEventCreate(&stop));
checkCUDA(cudaEventRecord(start, 0));
reduce_columns<<<blocks, threads, sizeof(float)*threads.y>>>(num_complaints, vocab_size, d_TFDF, d_TFDF_SUM);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
checkCUDA(cudaEventRecord(stop, 0));
checkCUDA(cudaEventSynchronize(stop));
checkCUDA(cudaEventElapsedTime(&elapsedTime, start, stop));
/* print GPU CUBLAS timing information */
fprintf(s, "Total time GPU KERNAL for TFDF SUM is %f sec\n", elapsedTime / 1000.0f);
checkCUDA(cudaEventDestroy(start));
checkCUDA(cudaEventDestroy(stop));
/* Sanity check to make sure the sum reduce worked */
// /* Find the sum_{words} for each complaint of TFDF using CUBLAS */
// float * d_cublas_sum;
// checkCUDA(cudaMalloc(&d_cublas_sum, sizeof(float)*num_complaints));
// cublas_column_reduce(d_TFDF, num_complaints, vocab_size, d_cublas_sum);
old_likelihood = -INFINITY;
delta_likelihood = 10000000;
float *new_likelihood;
float *d_likelihood;
fprintf(stdout, "-----------Beggining Expectation Maximization Routine on %d complaints and %d words---------\n",
num_complaints, vocab_size);
checkCUDA(cudaEventCreate(&start_total));
checkCUDA(cudaEventCreate(&stop_total));
checkCUDA(cudaEventRecord(start_total, 0));
int iter = 0;
double total_estep_time = 0.0;
double total_mstep_time = 0.0;
while (delta_likelihood > (float) .00001 || iter < 10) {
iter = iter + 1;
threads.x = num_defects;
threads.y = THREADS_PER_BLOCK_Y;
blocks.x = ceil(num_complaints / threads.y) + 1;
blocks.y = 1;
new_likelihood = (float *)malloc(sizeof(float)*blocks.x);
checkCUDA(cudaMalloc((void **) &d_likelihood, sizeof(float) * blocks.x));
checkCUDA(cudaMemset(d_likelihood, 0, sizeof(float) * blocks.x));
checkCUDA(cudaEventCreate(&start));
checkCUDA(cudaEventCreate(&stop));
checkCUDA(cudaEventRecord(start, 0));
eStep <<< blocks, threads >>> (num_complaints, vocab_size,
num_defects, d_defect_priors,
d_defect_posteriors, d_company_posteriors,
d_issue_posteriors, d_product_posteriors,
d_word_posteriors, d_TFDF,
d_company_vec, d_issue_vec,
d_product_vec, d_likelihood);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("YOO Error: %s\n", cudaGetErrorString(err));
exit(0);
}
checkCUDA(cudaEventRecord(stop));
cudaEventSynchronize(stop);
checkCUDA(cudaEventElapsedTime(&elapsedTime, start, stop));
total_estep_time += elapsedTime/ 1000.0f;
fprintf(s, "---------Total time For E_STEP on GPU is %f sec ---------\n", elapsedTime / 1000.0f);
checkCUDA(cudaEventDestroy(start));
checkCUDA(cudaEventDestroy(stop));
checkCUDA(cudaMemcpy(new_likelihood, d_likelihood, sizeof(float)*blocks.x,cudaMemcpyDeviceToHost));
float total_likelihood = 0.0;
for (int i = 0; i < blocks.x; i++) {
total_likelihood += new_likelihood[i];
}
delta_likelihood = float(fabsf(old_likelihood -total_likelihood ));
fprintf(s,"(OLD LIKELIHOOD = %f, UPDATED LIKELIHOOD = %f , Change in Likelihood =%f)\n",
old_likelihood,total_likelihood, (total_likelihood-old_likelihood));
// printf("Change in Likelihood is %f:\n", delta_likelihood);
old_likelihood = total_likelihood;
//
// fprintf(s, "--------------DOING M-STEP WITH CUBLAS---------------------- \n");
//
//
checkCUDA(cudaEventCreate(&start));
checkCUDA(cudaEventCreate(&stop));
checkCUDA(cudaEventRecord(start, 0));
M_STEP_CUBLAS(num_complaints, vocab_size,
num_defects, d_defect_priors,
d_defect_posteriors, d_company_posteriors,
d_issue_posteriors, d_product_posteriors,
d_word_posteriors, d_TFDF,
d_expanded_company, d_expanded_issue,
d_expanded_product, d_TFDF_SUM,
num_companies, num_products,
num_issues
);
checkCUDA(cudaEventRecord(stop, 0));
checkCUDA(cudaEventSynchronize(stop));
float elapsedTime;
checkCUDA(cudaEventElapsedTime(&elapsedTime, start, stop));
total_mstep_time += elapsedTime/ 1000.0f;
fprintf(s, "Total time GPU M Step %f sec\n", elapsedTime / 1000.0f);
checkCUDA(cudaEventDestroy(start));
checkCUDA(cudaEventDestroy(stop));
//
// printf("--------------DOING M-STEP KERNEL---------------------- \n");
//
//
// checkCUDA(cudaEventCreate(&start));
// checkCUDA(cudaEventCreate(&stop));
// checkCUDA(cudaEventRecord(start, 0));
// execute_MStep(num_complaints, vocab_size,
// num_defects, d_defect_priors,
// d_defect_posteriors, d_company_posteriors,
// d_issue_posteriors, d_product_posteriors,
// d_word_posteriors, d_TFDF,
// d_expanded_company, d_expanded_issue,
// d_expanded_product,
// d_TFDF_SUM, num_companies,
// num_products, num_issues,0
// );
//
// checkCUDA(cudaEventRecord(stop));
// cudaEventSynchronize(stop);
// checkCUDA(cudaEventElapsedTime(&elapsedTime, start, stop));
// total_mstep_time += elapsedTime/ 1000.0f;
//
// checkCUDA(cudaEventDestroy(start));
// checkCUDA(cudaEventDestroy(stop));
// fprintf(s, "--------------Total time For M-STEP on GPU is %f sec\n--------------", elapsedTime / 1000.0f);
checkCUDA(cudaFree(d_likelihood));
}
checkCUDA(cudaEventRecord(stop_total, 0));
checkCUDA(cudaEventSynchronize(stop_total));
checkCUDA(cudaEventElapsedTime(&elapsedTime, start_total, stop_total));
fprintf(s, "Total time till convergece is %f sec | %d iterations \n", total_estep_time + total_mstep_time, iter);
fprintf(s, "Average Time of eStep is %f sec: \n", total_estep_time/iter);
fprintf(s, "Average Time of MStep is %f sec: \n", total_mstep_time/iter);
fprintf(s, "Finally Likelihood %f\n", old_likelihood);
fprintf(s, "Change in likelihood %f\n", delta_likelihood);
checkCUDA(cudaMemcpy(defect_posteriors, d_defect_posteriors, sizeof(float)*num_defects*num_complaints, cudaMemcpyDeviceToHost));
checkCUDA(cudaMemcpy(defect_priors, d_defect_priors, sizeof(float)*num_defects, cudaMemcpyDeviceToHost));
checkCUDA(cudaMemcpy(word_posteriors, d_word_posteriors, sizeof(float)*num_defects*vocab_size, cudaMemcpyDeviceToHost));
checkCUDA(cudaMemcpy(product_posteriors, d_product_posteriors, sizeof(float)*num_defects*num_products, cudaMemcpyDeviceToHost));
checkCUDA(cudaMemcpy(issue_posteriors, d_issue_posteriors, sizeof(float)*num_defects*num_issues, cudaMemcpyDeviceToHost));
checkCUDA(cudaMemcpy(company_posteriors, d_company_posteriors, sizeof(float)*num_defects*num_products, cudaMemcpyDeviceToHost));
fclose(s);
FILE *f = fopen(results_file_name, "w");
fprintf(stdout, "---------DONE---------\n");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for (int i=0; i < num_companies; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"COMPANY, %d, DEFECT, %d, POSTERIOR, %f \n", i, j,company_posteriors[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < num_issues; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"ISSUE, %d, DEFECT,%d, POSTERIOR, %f \n", i, j,issue_posteriors[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < vocab_size; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"WORD, %d, DEFECT %d, POSTERIOR, %f \n", i, j,word_posteriors[INDX(j, i, num_defects)] );
}
}
for (int i=0; i < num_complaints; i++){
for (int j=0; j < num_defects; j++){
fprintf(f,"Complaint, %d, DEFECT, %d, POSTERIOR: %f \n", i, j,defect_posteriors[INDX(j, i, num_defects)] );
}
}
for (int j=0; j < num_defects; j++){
fprintf(f,"DEFECT, %d, , Prior : %f \n", j,defect_priors[j] );
}
fclose(f);
checkCUDA(cudaFree(d_defect_posteriors));
checkCUDA(cudaFree(d_defect_priors));
checkCUDA(cudaFree(d_word_posteriors));
checkCUDA(cudaFree(d_product_posteriors));
checkCUDA(cudaFree(d_issue_posteriors));
checkCUDA(cudaFree(d_TFDF));
checkCUDA(cudaFree(d_TFDF_SUM));
checkCUDA(cudaFree(d_expanded_company));
checkCUDA(cudaFree(d_expanded_issue));
checkCUDA(cudaFree(d_expanded_product));
checkCUDA(cudaFree(d_issue_vec));
checkCUDA(cudaFree(d_company_vec));
checkCUDA(cudaFree(d_product_vec));
}
__global__ void elementwise_division(int const num_rows, int const num_columns,
float *d_denominator, float *d_numerator,
float *output, float numerator_lambda,
float denominator_lamda)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_columns; i += blockDim.x * gridDim.x) {
output[INDX(threadIdx.y, i, num_rows)] =
(float) ((numerator_lambda + d_numerator[INDX(threadIdx.y, i, num_rows)]) /
(denominator_lamda + d_denominator[INDX(threadIdx.y,0, num_rows)]));
}
}
void execute_MStep(int const num_complaints, int const vocab_size,
int const num_defects, float *d_defect_priors,
float *d_defect_posteriors, float *d_company_posteriors,
float *d_issue_posteriors, float *d_product_posteriors,
float *d_word_posteriors, float *d_TFDF,
float *d_expanded_company_vec, float *d_expanded_issue_vec,
float *d_expanded_product_vec,
float *d_TFDF_SUM, int const num_companies,
int const num_products, int const num_issues,
int check_kernal)
{
cudaError_t err;
double temp;
float epsilon = 1e-8;
float *d_denominator, *d_defect_posterior_sum;
float *h_denominator, * h_defect_posterior_sum,*h_defect_prior;
dim3 threads, blocks;
/* printf("----------UPDATING THE WORD POSTERIORS INSIDE MATRIX MULTIPLICATION---------------\n"); */
h_denominator = (float *) malloc(sizeof(float) * num_defects);
checkCUDA(cudaMalloc(&d_denominator, sizeof(float) * num_defects));
threads.x = TILE_WIDTH;
threads.y = TILE_WIDTH;
blocks.x = ceil(1 + threads.x - 1) / threads.x;
blocks.y = ceil((num_defects + threads.y - 1) / threads.y);
mat_mul <<< blocks, threads >>> (num_defects, num_complaints,
num_complaints, 1,
d_defect_posteriors, d_TFDF_SUM,
d_denominator, num_defects, 1);
if (check_kernal) {
temp = 0.0;
float *h_denominator_cublas;
float *d_denominator_cublas;
h_denominator_cublas = (float *) malloc(sizeof(float) * num_defects);
checkCUDA(cudaMalloc(&d_denominator_cublas, sizeof(float) * num_defects))
cublas_mat_mul(num_defects, num_complaints, num_complaints, 1, d_defect_posteriors, d_TFDF_SUM,
d_denominator_cublas, num_defects,
1, 1.0, 0);
checkCUDA(cudaMemcpy(h_denominator, d_denominator, sizeof(float) * num_defects, cudaMemcpyDeviceToHost));
checkCUDA(cudaMemcpy(h_denominator_cublas, d_denominator_cublas, sizeof(float) * num_defects,
cudaMemcpyDeviceToHost));
for (int i = 0; i < num_defects; i++) {
temp += (h_denominator[INDX(i, 0, num_defects)] - h_denominator_cublas[INDX(i, 0, num_defects)])
* (h_denominator[INDX(i, 0, num_defects)] - h_denominator_cublas[INDX(i, 0, num_defects)]);
}
printf("error is %f\n", temp);
if (temp > 10) printf("FAIL\n");
else printf("PASSED ACCURACY TEST FOR DENOMINATOR \n");
free(h_denominator_cublas);
checkCUDA(cudaFree(d_denominator_cublas));
}
blocks.x = ceil((vocab_size + threads.x - 1) / threads.x);
blocks.y = ceil((num_defects + threads.y - 1) / threads.y);
update_entities <<< blocks, threads >>> (num_defects, num_complaints,
num_complaints, vocab_size,
d_defect_posteriors, d_TFDF,
d_word_posteriors, num_defects,
vocab_size, d_denominator,
1, vocab_size);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("YOO Error: %s\n", cudaGetErrorString(err));
exit(0);
}
/* printf("----------------------SUMMING POSTERIOR FOR DEFECT-----------------------\n");*/
blocks.x = num_defects;
blocks.y =1;
threads.x = 1;
threads.y = 256;
checkCUDA(cudaMalloc(&d_defect_posterior_sum, sizeof(float)*num_defects));
reduce_columns<<<blocks, threads, sizeof(float)*threads.y>>>(num_defects, num_complaints,
d_defect_posteriors, d_defect_posterior_sum);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
/* printf("-------------------------UPDATING ENTITIES---------------------\n"); */
free(h_denominator);
checkCUDA(cudaFree(d_denominator));
/*Companies */
threads.x = TILE_WIDTH;
threads.y = TILE_WIDTH;
blocks.x = ceil(num_companies + threads.x - 1) / threads.x;
blocks.y = ceil((num_defects + threads.y - 1) / threads.y);
update_entities <<< blocks, threads >>> (num_defects, num_complaints,
num_complaints, num_companies,
d_defect_posteriors, d_expanded_company_vec,
d_company_posteriors, num_defects,
num_companies, d_defect_posterior_sum,
1, num_companies);
/*Issues */
blocks.x = ceil(num_issues + threads.x - 1) / threads.x;
update_entities <<< blocks, threads >>> (num_defects, num_complaints,
num_complaints, num_issues,
d_defect_posteriors, d_expanded_issue_vec,
d_company_posteriors, num_defects,
num_issues, d_defect_posterior_sum,
1, num_issues);
/*Products */
blocks.x = ceil(num_products + threads.x - 1) / threads.x;
update_entities <<<blocks, threads >>> (num_defects, num_complaints,
num_complaints, num_products,
d_defect_posteriors, d_expanded_product_vec,
d_company_posteriors, num_defects,
num_products, d_defect_posterior_sum,
1, num_products);
/* printf("----------UPDATING Priors--------------\n");*/
h_defect_posterior_sum = (float *) malloc(sizeof(float)*num_defects);
h_defect_prior = (float *) malloc(sizeof(float)*num_defects);
checkCUDA(cudaMemcpy(h_defect_posterior_sum, d_defect_posterior_sum,
sizeof(float)*num_defects,cudaMemcpyDeviceToHost ));
for(int i=0; i < num_defects; i ++){
if (h_defect_posterior_sum[i] < epsilon){
printf("prior %d is too small: %f \n", i,h_defect_posterior_sum[i] );
h_defect_prior[i] = epsilon;
}else{
h_defect_prior[i] = (float) h_defect_posterior_sum[i]/num_complaints;
}
}
checkCUDA(cudaMemcpy(d_defect_priors, h_defect_prior, sizeof(float)*num_defects,cudaMemcpyHostToDevice));
/* printf("------------------------------------ \n");*/
checkCUDA(cudaFree(d_defect_posterior_sum));
free(h_defect_posterior_sum);
free(h_defect_prior);
}
void cublas_mat_mul(const int a_rows, const int a_columns,
const int b_rows, const int b_columns,
float * d_A, float * d_B,
float * d_C, const int c_rows,
const int c_columns, float alpha,
float beta)
{
checkCUBLAS(cublasSgemm(
handle, CUBLAS_OP_N, CUBLAS_OP_N,
a_rows, b_columns, b_rows,
&alpha, d_A,
a_rows, d_B, b_rows,
&beta, d_C, a_rows
));
}
void M_STEP_CUBLAS(int const num_complaints, int const vocab_size,
int const num_defects, float *d_defect_priors,
float *d_defect_posteriors, float *d_company_posteriors,
float *d_issue_posteriors, float *d_product_posteriors,
float *d_word_posteriors, float *d_TFDF,
float *d_expanded_company_vec, float *d_expanded_issue_vec,
float *d_expanded_product_vec, float *d_TFDF_SUM, int const num_companies,
int const num_products, int const num_issues
) {
cudaError_t err;
checkCUBLAS(cublasCreate(&handle));
float epsilon = 1e-6;
float * h_defect_posterior_sum,*h_defect_prior, *d_numerator,
*d_denominator;
/* printf("----------UPDATING THE WORD POSTERIORS ---------------\n"); */
/* Numerator */
checkCUDA(cudaMalloc(&d_numerator, sizeof(float) * num_defects * vocab_size));
cublas_mat_mul(num_defects, num_complaints,
num_complaints, vocab_size,
d_defect_posteriors, d_TFDF,
d_numerator, num_defects,
vocab_size, 1.0, 0.0);
/* Denominator */
checkCUDA(cudaMalloc(&d_denominator, sizeof(float) * num_defects))
cublas_mat_mul(num_defects, num_complaints, num_complaints, 1, d_defect_posteriors, d_TFDF_SUM,
d_denominator, num_defects,
1, 1.0, 0);
/* Elementwise divison */
int threads_per_block = THREADS_PER_BLOCK_Y;
dim3 threads( threads_per_block,num_defects, 1);
dim3 blocks((num_complaints / threads_per_block) + 1, 1, 1);
elementwise_division<<<threads, blocks >>> (num_defects, vocab_size,
d_denominator, d_numerator,
d_word_posteriors, (float) 1.0,
(float) vocab_size);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
checkCUDA(cudaFree(d_numerator));
checkCUDA(cudaFree(d_denominator));
/* printf("----------SUMMING POSTERIOR FOR DEFECT--------------\n");*/
float * h_x;
float *d_x;
h_x = (float *) malloc(sizeof(float) * num_complaints);
for (int i = 0; i < num_complaints; i++) {
h_x[INDX(i, 0, num_complaints)] = (float) 1.0;
}
checkCUDA(cudaMalloc(&d_x, sizeof(float) * num_complaints));
checkCUDA(cudaMemcpy(d_x, h_x, sizeof(float) * num_complaints, cudaMemcpyHostToDevice));
float *d_defect_posterior_sum;
checkCUDA(cudaMalloc(&d_defect_posterior_sum, sizeof(float) * num_defects));
cublas_mat_mul(num_defects, num_complaints,
num_complaints, 1,
d_defect_posteriors, d_x,
d_defect_posterior_sum, num_defects,
1, 1.0, 0);
/* -----------------Updating Entities---------------------------- */
checkCUDA(cudaMalloc(&d_numerator, sizeof(float)*num_companies*num_defects));
/* Companies */
cublas_mat_mul(num_defects, num_complaints,
num_complaints, num_companies,
d_defect_posteriors, d_expanded_company_vec,
d_numerator, num_defects,
1, 1.0, 0);
elementwise_division <<< threads, blocks >>>(num_defects, num_companies,
d_defect_posterior_sum, d_numerator,
d_company_posteriors, (float) 1.0,
(float) num_companies);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
/* Products */
checkCUDA(cudaFree(d_numerator));
checkCUDA(cudaMalloc(&d_numerator, sizeof(float)*num_products*num_defects));
cublas_mat_mul(num_defects, num_complaints,
num_complaints, num_products,
d_defect_posteriors, d_expanded_product_vec,
d_numerator, num_defects,
1, 1.0, 0);
elementwise_division <<< threads, blocks >>> (num_defects, num_products,
d_defect_posterior_sum, d_numerator,
d_product_posteriors, (float) 1.0,
(float) num_products);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
checkCUDA(cudaFree(d_numerator));
/* Issues */
checkCUDA(cudaMalloc(&d_numerator, sizeof(float)*num_issues*num_defects));
cublas_mat_mul(num_defects, num_complaints,
num_complaints, num_issues,
d_defect_posteriors, d_expanded_product_vec,
d_numerator, num_defects,
1, 1.0, 0);
elementwise_division <<< threads, blocks >>> (num_defects, num_issues,
d_defect_posterior_sum, d_numerator,
d_issue_posteriors, (float) 1.0,
(float) num_issues);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
/* -----------------Updating Priors---------------------------- */
h_defect_posterior_sum = (float *) malloc(sizeof(float)*num_defects);
h_defect_prior = (float *) malloc(sizeof(float)*num_defects);
checkCUDA(cudaMemcpy(h_defect_posterior_sum, d_defect_posterior_sum,
sizeof(float)*num_defects,cudaMemcpyDeviceToHost ));
for(int i=0; i < num_defects; i ++){
if (h_defect_posterior_sum[i] < epsilon){
printf("prior %d is too small: %f \n", i,h_defect_posterior_sum[i] );
h_defect_prior[i] = epsilon;
}else{
h_defect_prior[i] = (float) h_defect_posterior_sum[i] / num_complaints;
}
}
checkCUDA(cudaMemcpy(d_defect_priors, h_defect_prior, sizeof(float)*num_defects,cudaMemcpyHostToDevice));
checkCUDA(cudaFree(d_numerator));
checkCUDA(cudaFree(d_defect_posterior_sum));
free(h_defect_posterior_sum);
free(h_defect_prior);
}
void readMatrixFromFile(char *fileName,
float *matrix,
int const rows,
int const cols,
int const ld) {
FILE *ifp;
ifp = fopen(fileName, "r");
if (ifp == NULL) {
fprintf(stderr, "Error opening file %s\n", fileName);
exit(911);
} /* end if */
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
if (!fscanf(ifp, " %f",
&matrix[INDX(row, col, ld)])) {
printf("%d\n", INDX(row, col, ld));
printf("error in element %d and %d\n", row, col);
fprintf(stderr, "error reading training matrix file \n");
perror("scanf:");
exit(911);
}
/* end if */
} /* end for row */
} /* end for col */
fclose(ifp);
return;
}
|
60b97143cd66bc4d4a4a823320e7fa3392c33f0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "include/bingham/cuda_wrapper.h"
#include "hiprand/hiprand.h"
#include "bingham/olf.h"
#include <math.h>
#define MAX(x,y) ((x) > (y) ? (x) : (y))
#define MIN(x,y) ((x) < (y) ? (x) : (y))
#define cu_malloc(x, sz, msg) do{ if (hipMalloc(x, sz) != hipSuccess) printf(msg); } while (0)
#define cu_free(x, msg) do{ if (hipFree(x) != hipSuccess) printf(msg); } while (0)
hiprandGenerator_t gen;
//#define CUDA_LAUNCH_BLOCKING 1
__device__ __constant__ int big_primes[100] = {996311, 163573, 481123, 187219, 963323, 103769, 786979, 826363, 874891, 168991, 442501, 318679, 810377, 471073, 914519, 251059, 321983, 220009, 211877, 875339, 605603, 578483, 219619, 860089, 644911, 398819, 544927, 444043, 161717, 301447, 201329, 252731, 301463, 458207, 140053, 906713, 946487, 524389, 522857, 387151, 904283, 415213, 191047, 791543, 433337, 302989, 445853, 178859, 208499, 943589, 957331, 601291, 148439, 296801, 400657, 829637, 112337, 134707, 240047, 669667, 746287, 668243, 488329, 575611, 350219, 758449, 257053, 704287, 252283, 414539, 647771, 791201, 166031, 931313, 787021, 520529, 474667, 484361, 358907, 540271, 542251, 825829, 804709, 664843, 423347, 820367, 562577, 398347, 940349, 880603, 578267, 644783, 611833, 273001, 354329, 506101, 292837, 851017, 262103, 288989};
__device__ __constant__ double b_SR[3] = {0.2878, -5.6214, 7.7247};
__device__ __constant__ double b_SN[3] = {0.1521, -7.1290, 10.7090};
__device__ __constant__ double b_SL[3] = {0.2238, -5.1827, 6.8242};
__device__ __constant__ double b_SA[3] = {0.1618, -6.3992, 8.0207};
__device__ __constant__ double b_SB[3] = {0.2313, -6.3463, 8.0651};
__device__ __constant__ double b_ER[3] = {0.3036, 0.2607, -125.8843};
__device__ __constant__ double b_EN[3] = {0.1246, 1.4406, -185.8350};
__device__ __constant__ double b_EL[3] = {0.2461, 0.2624, -140.0192};
__device__ __constant__ double b_EA[3] = {0.1494, 0.2114, -139.4324};
__device__ __constant__ double b_EB[3] = {0.2165, 0.2600, -135.5203};
__device__ __constant__ double round1_dthresh = .05; //TODO: make this a param
__device__ inline double cu_sigmoid(double x, const double *b)
{
return b[0] + (1 - b[0]) / (1 + exp(-b[1]-b[2]*x));
}
__device__ inline double cu_logistic(double x, double *b)
{
return 1.0 / (1.0 + exp(-x*b[1]-b[0]));
}
/*int gpu_xi[10000000];
int gpu_yi[10000000];
double gpu_cloud[100000000];*/
#define THREADS_KNN 128 // Constant so we can allocate shared memory easier
#define KNN_SIZE 30
const int num_components = 15;
__device__ __constant__ int cu_num_components = num_components;
__device__ __constant__ int xyz_idx = 0, normal_idx = 1, vis_idx = 2, random_walk_idx = 3, edge_idx = 4, edge_vis_idx = 5, edge_occ_idx = 6, L_idx = 7, A_idx = 8, B_idx = 9, fpfh_idx = 10,
specularity_idx = 11, segment_affinity_idx = 12, segment_idx = 13, table_idx = 14;
void copy_double_matrix_to_gpu(cu_double_matrix_t *dev_dest, double **host_src, int n, int m) {
dev_dest->n = n;
dev_dest->m = m;
if (hipMalloc(&(dev_dest->ptr), m*n*sizeof(double)) != hipSuccess) {
printf("double 2d malloc\n");
}
if (hipMemcpy(dev_dest->ptr, host_src[0], n * m * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
printf("double 2d copy\n");
}
}
void copy_int_matrix_to_gpu(cu_int_matrix_t *dev_dest, int **host_src, int n, int m) {
dev_dest->n = n;
dev_dest->m = m;
if (hipMalloc(&(dev_dest->ptr), m*n*sizeof(int)) != hipSuccess) {
printf("int 2d malloc \n");
}
if (hipMemcpy(dev_dest->ptr, host_src[0], n * m * sizeof(int), hipMemcpyHostToDevice) != hipSuccess) {
printf("int 2d copy\n");
}
}
void copy_double_matrix3d_to_gpu(cu_double_matrix3d_t *dev_dest, double ***host_src, int n, int m, int p) {
dev_dest->n = n; dev_dest->m = m; dev_dest->p = p;
if (hipMalloc(&(dev_dest->ptr), n * m * p * sizeof(double)) != hipSuccess) {
printf("3d malloc\n");
}
if (hipMemcpy(dev_dest->ptr, host_src[0][0], n * m * p * sizeof(double), hipMemcpyHostToDevice)) {
printf("3d copy\n");
}
}
void copy_double_arr_to_gpu(cu_double_arr_t *dev_dest, double *host_src, int n) {
dev_dest->n = n;
if (hipMalloc(&(dev_dest->ptr), n * sizeof(double)) != hipSuccess) {
printf("double arr malloc\n");
}
if (hipMemcpy(dev_dest->ptr, host_src, n * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
printf("double arr copy\n");
}
}
void copy_int_arr_to_gpu(cu_int_arr_t *dev_dest, int *host_src, int n) {
dev_dest->n = n;
if (hipMalloc(&(dev_dest->ptr), n * sizeof(int)) != hipSuccess) {
printf("int arr malloc\n");
}
if (hipMemcpy(dev_dest->ptr, host_src, n * sizeof(int), hipMemcpyHostToDevice) != hipSuccess) {
printf("int arr copy\n");
}
}
__device__ void cu_quaternion_to_rotation_matrix(double R[][3], double q[]) {
double a = q[0];
double b = q[1];
double c = q[2];
double d = q[3];
R[0][0] = a*a + b*b - c*c - d*d;
R[0][1] = 2*b*c - 2*a*d;
R[0][2] = 2*b*d + 2*a*c;
R[1][0] = 2*b*c + 2*a*d;
R[1][1] = a*a - b*b + c*c - d*d;
R[1][2] = 2*c*d - 2*a*b;
R[2][0] = 2*b*d - 2*a*c;
R[2][1] = 2*c*d + 2*a*b;
R[2][2] = a*a - b*b - c*c + d*d;
}
__device__ double cu_dot(double x[], double y[], int n) {
int i;
double z = 0.0;
for (i = 0; i < n; i++)
z += x[i]*y[i];
return z;
}
__device__ void cu_matrix_vec_mult_3(double *y, double A[][3], double *x, int n) {
int i;
if (y == x) { // dbug
printf("**************FIX CU_MATRIX_VEC_MULT CALL!\n");
}
for (i = 0; i < n; i++)
y[i] = cu_dot(A[i], x, 3);
}
// matrix multiplication, Z = X*Y, where X is n-by-p and Y is p-by-m
__device__ void cu_matrix_mult_2_3_4(double Z[][4], double X[][3], double Y[][4])
{
int i, j, k;
for (i = 0; i < 2; i++) { // row i
for (j = 0; j < 4; j++) { // column j
Z[i][j] = 0;
for (k = 0; k < 3; k++)
Z[i][j] += X[i][k]*Y[k][j];
}
}
}
__device__ void cu_vec_matrix_mult_7(double y[], double x[], double A[][7], int n)
{
int i, j;
if (y == x) {
printf("****************FIX vec_matrix_mult call!\n");
}
else {
for (j = 0; j < 7; j++) {
y[j] = 0;
for (i = 0; i < n; i++)
y[j] += x[i]*A[i][j];
}
}
}
__device__ void cu_vec_matrix_mult_4(double y[], double x[], double A[][4], int n)
{
int i, j;
if (y == x) {
printf("****************FIX vec_matrix_mult call!\n");
}
else {
for (j = 0; j < 4; j++) {
y[j] = 0;
for (i = 0; i < n; i++)
y[j] += x[i]*A[i][j];
}
}
}
// adds two vectors, z = x+y
__device__ void cu_add(double z[], double x[], double y[], int n) {
int i;
for (i = 0; i < n; i++)
z[i] = x[i] + y[i];
}
__device__ double cu_norm(double x[], int n) {
double d = 0.0;
int i;
for (i = 0; i < n; i++)
d += x[i]*x[i];
return sqrt(d);
}
__device__ void cu_normalize(double y[], double x[], int n) {
double d = cu_norm(x, n);
int i;
for (i = 0; i < n; i++)
y[i] = x[i]/d;
}
// compute the pdf of a normal random variable
__device__ double cu_normpdf(double x, double mu, double sigma) {
double dx = x - mu;
return exp(-dx*dx / (2*sigma*sigma)) / (sqrt(2*M_PI) * sigma);
}
// invert a quaternion
__device__ void cu_quaternion_inverse(double q_inv[4], double *q) {
q_inv[0] = q[0];
q_inv[1] = -q[1];
q_inv[2] = -q[2];
q_inv[3] = -q[3];
}
// multiplies a vector by a scalar, y = c*x
__device__ void cu_mult(double y[], double x[], double c, int n)
{
int i;
for (i = 0; i < n; i++)
y[i] = c*x[i];
}
__device__ double cu_dist(double *x, double *y, int n) {
double d = 0.0;
int i;
for (i = 0; i < n; i++)
d += (x[i]-y[i])*(x[i]-y[i]);
return sqrt(d);
}
void cu_init() {
hipError_t err = hipInit(0);
//if (err != 0)
printf("Init error: %d\n", err);
}
void cu_init_model(scope_model_data_t *model_data, cu_model_data_t *cu_model) {
// Allocate all the memory
copy_double_matrix_to_gpu(&(cu_model->points), model_data->pcd_model->points, model_data->pcd_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->normals), model_data->pcd_model->normals, model_data->pcd_model->num_points, 3);
copy_double_arr_to_gpu(&(cu_model->normalvar), model_data->pcd_model->normalvar, model_data->pcd_model->num_points);
copy_double_matrix_to_gpu(&(cu_model->lab), model_data->pcd_model->lab, model_data->pcd_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->ved), model_data->pcd_model->ved, model_data->pcd_model->num_points, 66);
/*copy_double_matrix_to_gpu(&(cu_model->color_avg_cov), model_data->color_model->avg_cov, 3, 3);
copy_int_arr_to_gpu(&(cu_model->color_cnts1), model_data->color_model->cnts[0], model_data->color_model->num_points);
copy_int_arr_to_gpu(&(cu_model->color_cnts2), model_data->color_model->cnts[1], model_data->color_model->num_points);
copy_double_matrix_to_gpu(&(cu_model->color_means1), model_data->color_model->means[0], model_data->color_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->color_means2), model_data->color_model->means[1], model_data->color_model->num_points, 3);
copy_double_matrix3d_to_gpu(&(cu_model->color_cov1), model_data->color_model->covs[0], model_data->color_model->num_points, 3, 3);
copy_double_matrix3d_to_gpu(&(cu_model->color_cov2), model_data->color_model->covs[1], model_data->color_model->num_points, 3, 3);*/
copy_double_matrix_to_gpu(&(cu_model->fpfh_points), model_data->fpfh_model->points, model_data->fpfh_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->fpfh_normals), model_data->fpfh_model->normals, model_data->fpfh_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->fpfh), model_data->fpfh_model->fpfh, model_data->fpfh_model->num_points, model_data->fpfh_model->fpfh_length);
copy_double_matrix_to_gpu(&(cu_model->range_edges_model_views), model_data->range_edges_model->views, model_data->range_edges_model->num_views, 3);
copy_int_arr_to_gpu(&(cu_model->range_edges_view_idx), model_data->range_edges_model->view_idx, model_data->range_edges_model->num_views);
copy_int_arr_to_gpu(&(cu_model->range_edges_view_cnt), model_data->range_edges_model->view_cnt, model_data->range_edges_model->num_views);
copy_double_matrix_to_gpu(&(cu_model->range_edges_points), model_data->range_edges_model->pcd->points, model_data->range_edges_model->pcd->num_points, 3);
hipMalloc(&(cu_model->score_comp_models), sizeof(score_comp_models_t));
hipMemcpy(cu_model->score_comp_models, model_data->score_comp_models, sizeof(score_comp_models_t), hipMemcpyHostToDevice);
//memcpy(&cu_model->score_comp_models, model_data->score_comp_models, sizeof(score_comp_models_t));
cu_model->num_points = model_data->pcd_model->num_points;
cu_model->num_views = model_data->range_edges_model->num_views;
int n_edge = arr_max_i(model_data->range_edges_model->view_cnt, model_data->range_edges_model->num_views);
cu_model->max_num_edges = n_edge;
}
void cu_init_all_models(scope_model_data_t model_data[], int num_models, cu_model_data_t cu_model[]) {
for (int i = 0; i < num_models; ++i) {
cu_init_model(&model_data[i], &cu_model[i]);
}
}
void cu_init_obs(scope_obs_data_t *obs_data, cu_obs_data_t *cu_obs, scope_params_t *params) {
copy_double_matrix_to_gpu(&(cu_obs->range_image), obs_data->obs_range_image->image, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_int_matrix_to_gpu(&(cu_obs->range_image_cnt), obs_data->obs_range_image->cnt, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_double_matrix3d_to_gpu(&(cu_obs->range_image_points), obs_data->obs_range_image->points, obs_data->obs_range_image->w, obs_data->obs_range_image->h, 3);
copy_double_matrix3d_to_gpu(&(cu_obs->range_image_normals), obs_data->obs_range_image->normals, obs_data->obs_range_image->w, obs_data->obs_range_image->h, 3);
if (params->use_colors)
copy_double_matrix3d_to_gpu(&(cu_obs->obs_lab_image), obs_data->obs_lab_image, 3, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_int_matrix_to_gpu(&(cu_obs->range_image_idx), obs_data->obs_range_image->idx, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_double_matrix_to_gpu(&(cu_obs->range_image_pcd_obs_lab), obs_data->pcd_obs->lab, obs_data->pcd_obs->num_points, 3);
copy_double_matrix_to_gpu(&(cu_obs->fpfh_obs), obs_data->fpfh_obs->fpfh, obs_data->fpfh_obs->num_points, obs_data->fpfh_obs->fpfh_length);
copy_double_matrix_to_gpu(&(cu_obs->edge_image), obs_data->obs_edge_image, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_double_matrix_to_gpu(&(cu_obs->segment_affinities), obs_data->obs_segment_affinities, obs_data->num_obs_segments, obs_data->num_obs_segments);
copy_int_matrix_to_gpu(&(cu_obs->fg_range_image_cnt), obs_data->obs_fg_range_image->cnt, obs_data->obs_fg_range_image->w, obs_data->obs_fg_range_image->h);
copy_int_matrix_to_gpu(&(cu_obs->fg_range_image_idx), obs_data->obs_fg_range_image->idx, obs_data->obs_fg_range_image->w, obs_data->obs_fg_range_image->h);
cu_obs->fg_range_image_data.res = obs_data->obs_fg_range_image->res;
cu_obs->fg_range_image_data.min0 = obs_data->obs_fg_range_image->min[0];
cu_obs->fg_range_image_data.min1 = obs_data->obs_fg_range_image->min[1];
cu_obs->fg_range_image_data.w = obs_data->obs_fg_range_image->w;
cu_obs->fg_range_image_data.h = obs_data->obs_fg_range_image->h;
cu_obs->range_image_data.res = obs_data->obs_range_image->res;
cu_obs->range_image_data.min0 = obs_data->obs_range_image->min[0];
cu_obs->range_image_data.min1 = obs_data->obs_range_image->min[1];
cu_obs->range_image_data.w = obs_data->obs_range_image->w;
cu_obs->range_image_data.h = obs_data->obs_range_image->h;
cu_obs->num_obs_segments = obs_data->num_obs_segments;
// CONTINUE HERE FOR OBS DATA COPYING ********************************
}
void cu_free_all_the_model_things(cu_model_data_t *cu_model) {
hipFree(cu_model->points.ptr);
hipFree(cu_model->normals.ptr);
hipFree(cu_model->normalvar.ptr);
hipFree(cu_model->lab.ptr);
hipFree(cu_model->ved.ptr);
/*hipFree(cu_model->color_avg_cov.ptr);
hipFree(cu_model->color_means1.ptr);
hipFree(cu_model->color_means2.ptr);
hipFree(cu_model->color_cov1.ptr);
hipFree(cu_model->color_cov2.ptr);
hipFree(cu_model->color_cnts1.ptr);
hipFree(cu_model->color_cnts2.ptr);*/
hipFree(cu_model->fpfh.ptr);
hipFree(cu_model->fpfh_points.ptr);
hipFree(cu_model->fpfh_normals.ptr);
hipFree(cu_model->range_edges_model_views.ptr);
hipFree(cu_model->range_edges_points.ptr);
hipFree(cu_model->range_edges_view_idx.ptr);
hipFree(cu_model->range_edges_view_cnt.ptr);
hipFree(cu_model->score_comp_models);
}
void cu_free_all_the_things_all_models(cu_model_data_t cu_model[], int num_models) {
for (int i = 0; i < num_models; ++i) {
cu_free_all_the_model_things(&cu_model[i]);
}
}
void cu_free_all_the_obs_things(cu_obs_data_t *cu_obs, scope_params_t *params) {
hipFree(cu_obs->range_image.ptr);
hipFree(cu_obs->range_image_idx.ptr);
hipFree(cu_obs->range_image_pcd_obs_lab.ptr);
//hipFree(cu_obs->pcd_obs_fpfh.ptr);
hipFree(cu_obs->edge_image.ptr);
hipFree(cu_obs->range_image_points.ptr);
hipFree(cu_obs->range_image_normals.ptr);
hipFree(cu_obs->range_image_cnt.ptr);
hipFree(cu_obs->fg_range_image_cnt.ptr);
hipFree(cu_obs->fg_range_image_idx.ptr);
if (params->use_colors)
hipFree(cu_obs->obs_lab_image.ptr);
hipFree(cu_obs->segment_affinities.ptr);
}
void cu_free_all_the_things_init(scope_params_t *cu_params) {
cu_free(cu_params, "params");
hiprandDestroyGenerator(gen);
}
void cu_free_all_the_things(cu_model_data_t *cu_model, cu_obs_data_t *cu_obs, scope_params_t *cu_params, scope_params_t *params) {
/*
FREE
,,
';;
''
____ ||
; \ ||
\,---'-,-, ||
/ ( o) ||
(o )__,--'-' \ ||
,,,, ;'uuuuu'' ) ;;
\ \ \ ) ) /\//
'--' \'nnnnn' / \
\\ //'------' \
\\ // \ \
\\ // ) )
\\// | |
\\ / |
ALL THE THINGS
*/
cu_free_all_the_things_init(cu_params);
cu_free_all_the_model_things(cu_model);
cu_free_all_the_obs_things(cu_obs, params);
}
void cu_free_all_the_things_mope(cu_model_data_t cu_model[], cu_obs_data_t *cu_obs, scope_params_t *cu_params, int num_models, scope_params_t *params) {
// Free ALL the things!!!
cu_free_all_the_things_init(cu_params);
cu_free_all_the_things_all_models(cu_model, num_models);
cu_free_all_the_obs_things(cu_obs, params);
}
void cu_init_scoring(scope_params_t **cu_params, scope_params_t *params) {
cu_malloc(cu_params, sizeof(scope_params_t), "params");
hipMemcpy(*cu_params, params, sizeof(scope_params_t), hipMemcpyHostToDevice);
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
int mope_seed = time(NULL); // 1368460607; <--- There is still an unresolved issue with this seed
printf("********* mope seed = %d\n", mope_seed);
hiprandSetPseudoRandomGeneratorSeed(gen, mope_seed);
}
void cu_init_scoring_model_obs(scope_model_data_t *model_data, scope_obs_data_t *obs_data, cu_model_data_t *cu_model, cu_obs_data_t *cu_obs, scope_params_t **cu_params, scope_params_t *params) {
cu_init_scoring(cu_params, params);
cu_init_model(model_data, cu_model);
cu_init_obs(obs_data, cu_obs, params);
}
void cu_init_scoring_mope_models_obs(scope_model_data_t *model_data, scope_obs_data_t *obs_data, int num_models, cu_model_data_t cu_model[], cu_obs_data_t *cu_obs,
scope_params_t **cu_params, scope_params_t *params) {
cu_init_scoring(cu_params, params);
// Allocate all the memory
cu_init_all_models(model_data, num_models, cu_model);
cu_init_obs(obs_data, cu_obs, params);
}
__device__ void cu_range_image_xyz2sub(int *i, int *j, cu_range_image_data_t range_image, double xyz[])
{
//TODO: use range image viewpoint
double d = cu_norm(xyz, 3);
double x = atan2(xyz[0], xyz[2]);
double y = acos(xyz[1] / d);
int cx = (int)floor((x - range_image.min0) / range_image.res);
int cy = (int)floor((y - range_image.min1) / range_image.res);
*i = cx;
*j = cy;
if (!((cx >= 0 && cy>=0) && (cx < range_image.w) && (cy < range_image.h))) {
*i = -1;
*j = -1;
//printf("device res = %lf, min0 = %lf, min1 = %lf, w = %d, h = %d\n", range_image.res, range_image.min0, range_image.min1, range_image.w, range_image.h);
//printf("%lf %lf %lf\n", xyz[0], xyz[1], xyz[2]);
}
}
/*
* compute viewpoint (in model coordinates) for model placement (x,q) assuming observed viewpoint = (0,0,0)
*/
__device__ void cu_model_pose_to_viewpoint(double *vp, double *x, double *q)
{
double q_inv[4];
cu_quaternion_inverse(q_inv, q);
double R_inv[3][3];
cu_quaternion_to_rotation_matrix(R_inv,q_inv);
cu_matrix_vec_mult_3(vp, R_inv, x, 3);
cu_mult(vp, vp, -1, 3);
}
__global__ void cu_add_matrix_rows_slow(double *out_array, double *in_matrix, int n, int m, int *m_arr) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
int limit = m;
if (m_arr)
limit = m_arr[i];
out_array[i] = 0.0;
for (int j = 0; j < limit; ++j) {
out_array[i] += in_matrix[j + i * m];
}
}
__global__ void cu_add_matrix_rows_medium(double *out_array, double *in_matrix, int n, int m, int *m_arr) {
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n || j >= m)
return;
if (m_arr && j >= m_arr[i])
return;
int limit = m;
if (m_arr)
limit = m_arr[i];
//extern __shared__ double tmps[];
__shared__ double tmps[256];
tmps[threadIdx.x] = 0.0;
for (int k = threadIdx.x; k < limit; k += blockDim.x) {
tmps[threadIdx.x] += in_matrix[k + i * m];
}
__syncthreads();
limit = MIN(limit, blockDim.x);
if (j == 0) {
out_array[i] = 0.0;
for (int k = 0; k < limit; ++k) {
out_array[i] += tmps[k];
}
}
}
__global__ void cu_add_matrix_3d_slow(double *out_array, double *in_matrix, int n, int m, int *m_arr, int p) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
int limit = m;
if (m_arr)
limit = m_arr[i];
for (int k = 0; k < p; ++k) {
out_array[i*p + k] = 0.0;
}
for (int j = 0; j < limit; ++j) {
for (int k = 0; k < p; ++k) {
out_array[i*p + k] += in_matrix[j * p + i * m * p + k];
}
}
}
__global__ void cu_add_matrix_3d_medium(double *out_array, double *in_matrix, int n, int m, int *m_arr, int p) {
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n || j >= m)
return;
int limit1 = m;
if (m_arr)
limit1 = m_arr[i];
int limit2 = MIN(limit1, blockDim.x);
//extern __shared__ double tmps[];
__shared__ double tmps[256];
for (int l = 0; l < p; ++l) { // Outer loop to save shared memory
tmps[threadIdx.x] = 0.0;
for (int k = j; k < limit1; k += blockDim.x) {
tmps[threadIdx.x] += in_matrix[k * p + i * m * p + l];
}
__syncthreads();
if (j == 0) {
out_array[i * p + l] = 0.0;
for (int k = 0; k < limit2; ++k) {
out_array[i * p + l] += tmps[k];
}
}
__syncthreads();
}
}
__global__ void cu_divide_matrix_with_vector(double *out_matrix, double *in_matrix, double *scaling_array, int n, int m, int *m_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= n || j >= m)
return;
if (m_arr && j >= m_arr[i])
return;
out_matrix[j + i * m] = in_matrix[j + i * m] / scaling_array[i];
}
__global__ void cu_get_validation_points(int *idx, int total_pts, int needed, int num_samples, uint *rands)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= needed || i >= num_samples)
return;
if (needed == total_pts) { // use all the points
idx[j + i * needed] = j;
} else {
idx[j + i * needed] = ((rands[(i << 1)] % total_pts) + (j * (big_primes[rands[(i << 1) + 1] % 100] % total_pts))) % total_pts;
}
}
__global__ void cu_get_sub_cloud_at_pose(double *cloud, cu_double_matrix_t points, double *x, double *q, int *idx, int num_samples, int n)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= n || i >= num_samples)
return;
int i_arr = j + i * n;
double R[3][3];
cu_quaternion_to_rotation_matrix(R, &q[i * 4]);
double dest[3]; // In local memory so we access global memory less
dest[0] = points.ptr[idx[i_arr] * points.m];
dest[1] = points.ptr[idx[i_arr] * points.m + 1];
dest[2] = points.ptr[idx[i_arr] * points.m + 2];
double tmp[3];
cu_matrix_vec_mult_3(tmp, R, dest, 3);
cu_add(dest, tmp, &x[i * 3], 3);
cloud[3 * i_arr] = dest[0]; cloud[3*i_arr + 1] = dest[1]; cloud[3*i_arr + 2] = dest[2];
}
__global__ void cu_get_sub_cloud_normals_rotated(double *cloud_normals, cu_double_matrix_t normals, double *q, int *idx, int num_samples, int n)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= n || i >= num_samples)
return;
int i_arr = j + i * n;
double R[3][3];
cu_quaternion_to_rotation_matrix(R, &q[i * 4]);
double *row;
double dest[3];
row = &normals.ptr[idx[i_arr] * normals.m];
double tmp[3];
tmp[0] = row[0]; tmp[1] = row[1]; tmp[2] = row[2];
cu_matrix_vec_mult_3(dest, R, tmp, 3);
cloud_normals[3*i_arr] = dest[0]; cloud_normals[3*i_arr+1] = dest[1]; cloud_normals[3*i_arr + 2] = dest[2];
}
__global__ void cu_populate_xi_yi(int *xi, int *yi, double *cloud, cu_range_image_data_t range_image_data, int num_samples, int n, int *n_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n)
return;
if (n_arr && j >= n_arr[i])
return;
int i_arr = j + i * n;
double dest[3];
dest[0] = cloud[3*i_arr];
dest[1] = cloud[3*i_arr + 1];
dest[2] = cloud[3*i_arr + 2];
cu_range_image_xyz2sub(&xi[i_arr], &yi[i_arr], range_image_data, dest);
if (0)
printf("%d %d %d\n", i_arr, xi[i_arr], yi[i_arr]);
/*if (j == 0) {
printf("res = %lf, min0 = %lf, min1 = %lf, w = %d, h = %d\n", range_image_data.res, range_image_data.min0, range_image_data.min1, range_image_data.w, range_image_data.h);
}*/
}
__global__ void cu_compute_visibility_prob(double *cu_vis_prob, double *cu_cloud, double *cu_normals, int *cu_xi, int *cu_yi, cu_range_image_data_t ri_data,
cu_double_matrix_t range_image, double vis_thresh, int search_radius, int num_samples, int n, int *n_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n)
return;
if (n_arr && j >= n_arr[i])
return;
int i_arr = j + i * n;
int xi = cu_xi[i_arr];
int yi = cu_yi[i_arr];
double V[3];
double pt[3];
pt[0] = cu_cloud[3*i_arr]; pt[1] = cu_cloud[3*i_arr + 1]; pt[2] = cu_cloud[3*i_arr + 2];
cu_normalize(V, pt, 3);
if (cu_normals != NULL && cu_dot(V, &cu_normals[3*i_arr], 3) >= -.1) { // normals pointing away
cu_vis_prob[i_arr] = 0.0;
return;
}
if (xi == -1 && yi == -1) {
cu_vis_prob[i_arr] = 0.0;
return;
}
double model_range = cu_norm(pt, 3);
double obs_range = range_image.ptr[xi * range_image.m + yi];
if (search_radius > 0) {
int x0 = MAX(xi - search_radius, 0);
int x1 = MIN(xi + search_radius, ri_data.w - 1);
int y0 = MAX(yi - search_radius, 0);
int y1 = MIN(yi + search_radius, ri_data.h - 1);
int x, y;
for (x = x0; x <= x1; x++)
for (y = y0; y <= y1; y++)
obs_range = MAX(obs_range, range_image.ptr[x * range_image.m + y]);
}
double dR = model_range - obs_range;
cu_vis_prob[i_arr] = (dR < 0 ? 1.0 : cu_normpdf(dR/vis_thresh, 0, 1) / .3989); // .3989 = normpdf(0,0,1)
}
__global__ void cu_get_viewpoints(int *vi, int num_samples, double *samples_x, double *samples_q, cu_double_matrix_t range_edges_model_views) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
double vp[3];
cu_model_pose_to_viewpoint(vp, &samples_x[3*i], &samples_q[4*i]);
double vi_max = -(1<<19);
int j;
for (j = 0; j < range_edges_model_views.n; ++j) {
double tmp = cu_dot(&range_edges_model_views.ptr[j * range_edges_model_views.m], vp, 3);
if (tmp > vi_max) {
vi[i] = j;
vi_max = tmp;
}
}
}
__global__ void cu_get_noise_models(scope_noise_model_t *noise_models, double *cloud, double *normals, int *idx, int *vi, cu_double_matrix_t ved, cu_double_arr_t normalvar, int num_samples, int n) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= n || i >= num_samples)
return;
// prep for lookup edge distances for closest model viewpoint
double surface_angles, edge_dists;
// compute sigmas
int i_arr = i * n + j;
double normalized[3];
cu_normalize(normalized, &cloud[3*i_arr], 3);
surface_angles = 1 + cu_dot(normalized, &normals[3 * i_arr], 3);
edge_dists = ved.ptr[idx[i_arr] * ved.m + vi[i]];
noise_models[i_arr].range_sigma = .5*cu_sigmoid(surface_angles, b_SR) + .5*cu_sigmoid(edge_dists, b_ER);
noise_models[i_arr].normal_sigma = .5*cu_sigmoid(surface_angles, b_SN) + .5*cu_sigmoid(edge_dists, b_EN);
noise_models[i_arr].lab_sigma[0] = .5*cu_sigmoid(surface_angles, b_SL) + .5*cu_sigmoid(edge_dists, b_EL);
noise_models[i_arr].lab_sigma[1] = .5*cu_sigmoid(surface_angles, b_SA) + .5*cu_sigmoid(edge_dists, b_EA);
noise_models[i_arr].lab_sigma[2] = .5*cu_sigmoid(surface_angles, b_SB) + .5*cu_sigmoid(edge_dists, b_EB);
noise_models[i_arr].normal_sigma = MAX(noise_models[i_arr].normal_sigma, normalvar.ptr[idx[i_arr]]);
}
__global__ void cu_transform_cloud(double *cloud2, double *cloud, double *x, double *q, int num_samples, int n, int *n_arr)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n)
return;
if (n_arr && j >= n_arr[i])
return;
int i_arr = j + i * n;
double R[3][3];
cu_quaternion_to_rotation_matrix(R,&q[4*i]);
double tmp[3];
cu_matrix_vec_mult_3(tmp, R, &cloud[i_arr*3], 3);
cloud2[3*i_arr] = tmp[0];
cloud2[3*i_arr+1] = tmp[1];
cloud2[3*i_arr+2] = tmp[2];
if (x != NULL) {
cu_add(&cloud2[i_arr*3], &cloud2[i_arr*3], &x[3*i], 3);
}
}
__global__ void cu_reorder_rows(double *cloud, cu_double_matrix_t points, int *idx, int n, int m, int p, int *m_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= n || j >= m)
return;
if (m_arr && j >= m_arr[i])
return;
for (int k = 0; k < p; ++k) {
cloud[i * m * p + j * p + k] = points.ptr[idx[j + i * m] * p + k];
}
}
__global__ void cu_compute_xyz_score_individual(double *xyz_score, double *cloud, int *xi, int *yi, double *vis_pmf, scope_noise_model_t *noise_models, int num_samples, int num_validation_points,
cu_double_matrix_t range_image, cu_range_image_data_t range_image_data, cu_int_matrix_t range_image_cnt, scope_params_t *params)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= num_validation_points || i >= num_samples)
return;
int xyz_score_window = params->xyz_score_window;
int i_arr = j + i * num_validation_points;
xyz_score[i_arr] = 0.0;
if (vis_pmf[i_arr] > .01/(double)num_validation_points) {
double range_sigma = params->range_sigma * noise_models[i_arr].range_sigma;
double model_range = cu_norm(&cloud[3*i_arr], 3);
double dmax = 2*range_sigma;
double dmin = dmax;
int x, y;
int r = xyz_score_window;
for (x = xi[i_arr] - r; x<=xi[i_arr] + r; ++x) {
for (y = yi[i_arr] - r; y <= yi[i_arr] + r; ++y) {
if (x >= 0 && x < (range_image_data.w) && y>=0 && y<(range_image_data.h) && range_image_cnt.ptr[x * range_image_cnt.m + y] > 0) {
double obs_range = range_image.ptr[x * range_image.m + y];
double d = fabs(model_range - obs_range);
if (d < dmin)
dmin = d;
}
}
}
double d = dmin;
xyz_score[i_arr] = vis_pmf[i_arr] * log(cu_normpdf(d, 0, range_sigma));
}
}
__global__ void cu_compute_xyz_score_final(double *xyz_scores, double *score_comps, int num_samples, double *b_xyz, scope_params_t *params, int score_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
xyz_scores[i] -= log(cu_normpdf(0, 0, params->range_sigma));
if ((score_round == 2 && params->score2_use_score_comp_models) || (score_round == 3 && params->score3_use_score_comp_models))
xyz_scores[i] = cu_logistic(xyz_scores[i], b_xyz);
if (score_round == 3 && score_comps)
score_comps[i * num_components + xyz_idx] = xyz_scores[i];
double w = 0;
if (score_round == 2)
w = params->score2_xyz_weight;
else
w = params->score3_xyz_weight;
xyz_scores[i] *= w;
}
__global__ void cu_compute_normal_score_individual(double *normal_score, double *wtot_individual, double *cloud_normals, double *vis_pmf, scope_noise_model_t *noise_models, int num_samples,
int num_validation_points, int *xi, int *yi, cu_int_matrix_t range_image_cnt, cu_double_matrix3d_t range_image_normals, scope_params_t *params, int score_round)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= num_validation_points || i >= num_samples)
return;
int i_arr = j + i * num_validation_points;
//TODO: make this a param
double normalvar_thresh = params->normalvar_thresh;
normal_score[i_arr] = 0.0;
wtot_individual[i_arr] = 0.0;
if (vis_pmf[i_arr] > .01/ (double) num_validation_points && noise_models[i_arr].normal_sigma <= normalvar_thresh) {
double normal_sigma = params->normal_sigma * noise_models[i_arr].normal_sigma;
double dmax = 2*normal_sigma;
double d = dmax;
if ((xi[i_arr] != -1 && yi[i_arr] != -1) && range_image_cnt.ptr[xi[i_arr] * range_image_cnt.m + yi[i_arr]] > 0) {
d = 1.0 - cu_dot(&cloud_normals[3*i_arr], &(range_image_normals.ptr[xi[i_arr] * range_image_normals.m * range_image_normals.p + yi[i_arr] * range_image_normals.p]), 3);
d = MIN(d, dmax);
}
normal_score[i_arr] = vis_pmf[i_arr] * log(cu_normpdf(d, 0, normal_sigma));
wtot_individual[i_arr] = vis_pmf[i_arr];
}
}
__global__ void cu_compute_normal_score_final(double *normal_scores, double *score_comps, double *wtot, int num_samples, double *b_normal, scope_params_t *params, int score_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
if (wtot[i] > 0.0)
normal_scores[i] /= wtot[i];
normal_scores[i] -= log(cu_normpdf(0, 0, params->normal_sigma));
if ((score_round == 2 && params->score2_use_score_comp_models) || (score_round == 3 && params->score3_use_score_comp_models))
normal_scores[i] = cu_logistic(normal_scores[i], b_normal);
if (score_round == 3 && score_comps)
score_comps[i * num_components + normal_idx] = normal_scores[i];
double w = 0;
if (score_round == 2)
w = params->score2_normal_weight;
else
w = params->score3_normal_weight;
normal_scores[i] *= w;
}
__global__ void cu_compute_vis_score(double *vis_score, double *score_comps, double *vis_sums, int n, scope_params_t *params, int score_round)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
vis_score[i] = log(vis_sums[i] / (double) n);
if (score_round == 3)
score_comps[i * num_components + vis_idx] = vis_score[i];
double w = 0;
if (score_round == 2)
w = params->score2_vis_weight;
else
w = params->score3_vis_weight;
vis_score[i] *= w;
}
__global__ void cu_set_mask_for_segment_affinity(int *mask, int *segments, int *num_segments, int num_obs_segments, int num_samples) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= num_segments[i])
return;
// Assumes mask is initialized to all zeros before kernel execution
mask[segments[j + i * num_obs_segments] + i * num_obs_segments] = 1;
}
// compute the segment affinity score for a scope sample
__global__ void cu_compute_segment_affinity_score_per_seg(double *seg_affinity_score_per_seg, int *segments, int *num_segments, cu_double_matrix_t segment_affinities, int num_obs_segments, int *mask,
int num_samples)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= num_obs_segments)
return;
int k;
seg_affinity_score_per_seg[j + i * num_obs_segments] = 0.0;
if (mask[j + i * num_obs_segments] == 0) {
for (k = 0; k < num_segments[i]; ++k) {
int s = segments[k + i * num_obs_segments];
double a = MIN(segment_affinities.ptr[s * segment_affinities.m + j], .9);
if (a > 0.5)
seg_affinity_score_per_seg[j + i * num_obs_segments] += log((1-a)/a);
}
}
}
__global__ void cu_compute_segment_affinity_score_final(double *seg_affinity_score, double *score_comps, scope_params_t *params, int score_round, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
seg_affinity_score[i] *= .05;
if (score_round == 3)
score_comps[i * num_components + segment_affinity_idx] = seg_affinity_score[i];
double weight = 0;
if (score_round == 2)
weight = params->score2_segment_affinity_weight;
else
weight = params->score3_segment_affinity_weight;
seg_affinity_score[i] *= weight;
}
__global__ void cu_generate_n_for_range_edge(int *n_out, int *vi, int num_samples, int num_validation_points, cu_int_arr_t range_edges_view_cnt) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
int v_idx = vi[i];
int num_edge_points = range_edges_view_cnt.ptr[v_idx];
int n = num_validation_points;
if (n >= num_edge_points || n == 0) {
n = num_edge_points;
}
n_out[i] = n;
}
__global__ void cu_get_range_edge_idx(int *idx, int *needed, int num_samples, int total_pts, int n, uint *rands, int *vi,cu_int_arr_t range_edges_view_idx)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= needed[i])
return;
// NOTE(sanja): This might need some fixing if I use the function in a broader sense, like on a CPU version
if (needed[i] <= n) { // use all the points
idx[j + i * total_pts] = j;
} else {
idx[j + i * total_pts] = ((rands[i << 1] % needed[i]) + (j * (big_primes[rands[(i << 1) + 1] % 100] % needed[i]))) % needed[i];
}
int vp_idx = range_edges_view_idx.ptr[vi[i]];
idx[j + i * total_pts] += vp_idx;
}
/*__global__ void cu_get_range_edge_points(double *P, int num_samples, int *n, int *idx, int n_edge, cu_double_matrix_t range_edges_points)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples)
return;
if (j >= n[i])
return;
// get the actual points in the correct pose
P[3 * i * n_edge + 3 * j] = range_edges_points.ptr[3 * idx[j + i * n_edge]];
P[3 * i * n_edge + 3 * j + 1] = range_edges_points.ptr[3 * idx[j + i * n_edge] + 1];
P[3 * i * n_edge + 3 * j + 2] = range_edges_points.ptr[3 * idx[j + i * n_edge] + 2];
}*/
__global__ void cu_compute_edge_score_individual(double *edge_score, double *vis_pmf, int *xi, int *yi, cu_double_matrix_t edge_image, int num_samples, int *n, int n_edge) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n[i])
return;
edge_score[j + i * n_edge] = 0.0;
if (xi[j + i *n_edge] != -1 && yi[j + i * n_edge] != -1) {
edge_score[j + i * n_edge] = vis_pmf[j + i * n_edge] * edge_image.ptr[xi[j + i *n_edge]*edge_image.m + yi[j + i *n_edge]];
}
}
__global__ void cu_compute_edge_score_final(double *edge_score, double *score_comps, double *vis_score, double *vis_prob_sums, double *occ_score, int num_samples, int *n_arr, double *b_edge, double *b_edge_occ,
scope_params_t *params, int score_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
if ((score_round == 2 && params->score2_use_score_comp_models) || (score_round == 3 && params->score3_use_score_comp_models)) {
edge_score[i] = cu_logistic(edge_score[i], b_edge);
if (occ_score)
occ_score[i] = cu_logistic(occ_score[i], b_edge_occ);
}
vis_score[i] = log(vis_prob_sums[i] / (double) n_arr[i]);
if (score_round == 3 && score_comps) {
score_comps[i * num_components + edge_vis_idx] = vis_score[i];
if (occ_score)
score_comps[i * num_components + edge_occ_idx] = occ_score[i];
else
score_comps[i * num_components + edge_occ_idx] = 0.0;
score_comps[i * num_components + edge_idx] = edge_score[i];
}
double w1=0.0, w2=0.0, w3=0.0;
w1=1.0, w2=1.0, w3=1.0;
if (score_round == 2) {
w1 = params->score2_edge_weight;
w2 = params->score2_edge_vis_weight;
w3 = params->score2_edge_occ_weight;
}
else {
w1 = params->score3_edge_weight;
w2 = params->score3_edge_vis_weight;
w3 = params->score3_edge_occ_weight;
}
if (occ_score)
edge_score[i] = (w1 * edge_score[i]) + (w2 * vis_score[i]) + (w3 * occ_score[i]);
else
edge_score[i] = (w1 * edge_score[i]) + (w2 * vis_score[i]);
}
__global__ void cu_score_round1(double *scores_ind, int *xi, int *yi, double *cloud, cu_double_matrix_t range_image, int num_samples, int num_validation_points) {
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples || j >= num_validation_points)
return;
double dest[3];
int i_arr = i*num_validation_points;
scores_ind[i_arr + j] = 0.0;
dest[0] = cloud[3*(i_arr + j)]; dest[1] = cloud[3*(i_arr + j)+1]; dest[2] = cloud[3*(i_arr + j) + 2];
if ((xi[i_arr + j] != -1 && yi[i_arr + j] != -1) && range_image.ptr[xi[i_arr + j]*range_image.m + yi[i_arr + j]] > round1_dthresh + cu_norm(dest, 3))
scores_ind[i_arr + j] = -1.0;
}
__global__ void cu_score_round1_final(double *scores, int num_samples, int num_validation_points) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
scores[i] /= (double)num_validation_points;
}
// TODO(sanja): make this a more general function that takes double** or something like that
__global__ void cu_add_all_scores(double *cu_scores, double *cu_xyz_score, double *cu_normal_score, double *cu_vis_score, double *cu_seg_affinity_score, double *cu_edge_scores,
double *cu_fpfh_score, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_scores[i] = cu_xyz_score[i] + cu_normal_score[i] + cu_vis_score[i] + cu_seg_affinity_score[i] + cu_edge_scores[i] + cu_fpfh_score[i];
}
__global__ void cu_add_3_scores(double *cu_scores, double *cu_xyz_score, double *cu_normal_score, double *cu_edge_scores, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_scores[i] = cu_xyz_score[i] + cu_normal_score[i] + cu_edge_scores[i];
}
void get_vis_prob_sums_and_pmf(double *vis_prob, double *vis_prob_sums, double *vis_pmf, double *cloud, double *normals, int *xi, int *yi, cu_double_matrix_t range_image, cu_range_image_data_t range_image_data,
int vis_pixel_radius, int num_samples, int n, int *n_arr, scope_params_t *params, dim3 block, dim3 thread, dim3 block_sum, dim3 thread_sum, int slow_sum) {
hipLaunchKernelGGL(( cu_compute_visibility_prob), dim3(block), dim3(thread), 0, 0, vis_prob, cloud, normals, xi, yi, range_image_data, range_image, params->vis_thresh, vis_pixel_radius, num_samples, n, n_arr);
if ( hipSuccess != hipGetLastError() )
printf( "vis_prob!\n" );
if (slow_sum)
hipLaunchKernelGGL(( cu_add_matrix_rows_slow), dim3(block_sum), dim3(thread_sum), 0, 0, vis_prob_sums, vis_prob, num_samples, n, n_arr);
else
//cu_add_matrix_rows_medium<<<block_sum, thread_sum, thread_sum.x * sizeof(double)>>>(vis_prob_sums, vis_prob, num_samples, n, n_arr);
hipLaunchKernelGGL(( cu_add_matrix_rows_medium), dim3(block_sum), dim3(thread_sum), 0, 0, vis_prob_sums, vis_prob, num_samples, n, n_arr);
// TODO(sanja): Optimize. ArrayFire?
if ( hipSuccess != hipGetLastError() )
printf( "Vis prob sums!\n" );
hipLaunchKernelGGL(( cu_divide_matrix_with_vector), dim3(block), dim3(thread), 0, 0, vis_pmf, vis_prob, vis_prob_sums, num_samples, n, n_arr);
if ( hipSuccess != hipGetLastError() )
printf( "Vis pmf!\n" );
}
void unpack_x_q(double *cu_x, double *cu_q, scope_sample_t *samples, int num_samples) {
double **samples_x = new_matrix2(num_samples, 3);
double **samples_q = new_matrix2(num_samples, 4);
int i;
for (i = 0; i < num_samples; ++i) {
memcpy(samples_x[i], samples[i].x, 3 * sizeof(double));
}
for (i = 0; i < num_samples; ++i) {
memcpy(samples_q[i], samples[i].q, 4 * sizeof(double));
}
hipMemcpy(cu_x, samples_x[0], 3 * num_samples * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(cu_q, samples_q[0], 4 * num_samples * sizeof(double), hipMemcpyHostToDevice);
free_matrix2(samples_x);
free_matrix2(samples_q);
}
void get_range_edge_points(int *cu_n, double *cu_P, int num_samples, int n_edge, int *n_arr, int num_validation_points, cu_model_data_t *cu_model, int *cu_vi, int find_vi, double *cu_x, double *cu_q,
dim3 block, dim3 thread, dim3 block_small, dim3 thread_small) {
if (find_vi) {
hipLaunchKernelGGL(( cu_get_viewpoints), dim3(block_small), dim3(thread_small), 0, 0, cu_vi, num_samples, cu_x, cu_q, cu_model->range_edges_model_views);
if ( hipSuccess != hipGetLastError() )
printf( "Viewpoints!\n" );
}
int *cu_idx_edge;
cu_malloc(&cu_idx_edge, num_samples * n_edge * sizeof(int), "idx_edge");
uint *cu_rands_edge;
cu_malloc(&cu_rands_edge, 2 * num_samples * sizeof(uint), "rands_edge malloc");
hipLaunchKernelGGL(( cu_generate_n_for_range_edge), dim3(block_small), dim3(thread_small), 0, 0, cu_n, cu_vi, num_samples, num_validation_points, cu_model->range_edges_view_cnt);
if ( hipSuccess != hipGetLastError() )
printf( "find n!\n" );
hiprandGenerate(gen, cu_rands_edge, 2*num_samples);
if ( hipSuccess != hipGetLastError() )
printf( "generate rands_edge!\n" );
hipLaunchKernelGGL(( cu_get_range_edge_idx), dim3(block), dim3(thread), 0, 0, cu_idx_edge, cu_n, num_samples, n_edge, num_validation_points, cu_rands_edge, cu_vi, cu_model->range_edges_view_idx);
if ( hipSuccess != hipGetLastError() )
printf( "idx edge!\n" );
hipLaunchKernelGGL(( cu_reorder_rows), dim3(block), dim3(thread), 0, 0, cu_P, cu_model->range_edges_points, cu_idx_edge, num_samples, n_edge, 3, cu_n);
if ( hipSuccess != hipGetLastError() )
printf( "edge pts\n" );
if (!cu_rands_edge)
printf("NOOOOO!\n");
cu_free(cu_rands_edge, "rands_edge free\n");
cu_free(cu_idx_edge, "idx_edge");
}
void get_validation_points(int *cu_idx, int model_points, int num_validation_points, int num_samples, dim3 block, dim3 thread) {
uint *cu_rands;
cu_malloc(&cu_rands, 2 * num_samples * sizeof(uint), "rands");
if (model_points > num_validation_points) {
hiprandGenerate(gen, cu_rands, 2*num_samples);
}
hipLaunchKernelGGL(( cu_get_validation_points), dim3(block), dim3(thread), 0, 0, cu_idx, model_points, num_validation_points, num_samples, cu_rands);
if ( hipSuccess != hipGetLastError() )
printf( "Validation!\n" );
cu_free(cu_rands, "rands free");
}
void compute_xyz_score(double *cu_xyz_score, double *cu_score_comps, double *cu_cloud, int *cu_xi, int *cu_yi, double *cu_vis_pmf, scope_noise_model_t *cu_noise_models, cu_double_matrix_t range_image,
cu_range_image_data_t range_image_data, cu_int_matrix_t range_image_cnt, int num_samples, int num_validation_points, scope_params_t *cu_params, int round,
double *b_xyz, dim3 block_size, dim3 threads_per_block, dim3 block_size_sum, dim3 thread_size_sum, dim3 block_size_small, dim3 thread_size_small) {
int num_total = num_samples * num_validation_points;
double *cu_xyz_score_per_point;
cu_malloc(&cu_xyz_score_per_point, num_total * sizeof(double), "xyz_scores_pp");
hipLaunchKernelGGL(( cu_compute_xyz_score_individual), dim3(block_size), dim3(threads_per_block), 0, 0, cu_xyz_score_per_point, cu_cloud, cu_xi, cu_yi, cu_vis_pmf, cu_noise_models, num_samples, num_validation_points,
range_image, range_image_data, range_image_cnt, cu_params);
if ( hipSuccess != hipGetLastError() )
printf( "xyz individual!\n" );
hipLaunchKernelGGL(( cu_add_matrix_rows_medium), dim3(block_size_sum), dim3(thread_size_sum), thread_size_sum.x * sizeof(double), 0, cu_xyz_score, cu_xyz_score_per_point, num_samples, num_validation_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "xyz sums!\n" );
hipLaunchKernelGGL(( cu_compute_xyz_score_final), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_xyz_score, cu_score_comps, num_samples, b_xyz, cu_params, round);
if ( hipSuccess != hipGetLastError() )
printf( "xyz final!\n" );
cu_free(cu_xyz_score_per_point, "xyz_scores_pp");
}
void compute_normal_score(double *cu_normal_score, double *cu_score_comps, double *cu_normals, double *cu_vis_pmf, scope_noise_model_t *cu_noise_models, int num_samples, int num_validation_points,
int *cu_xi, int *cu_yi, cu_int_matrix_t range_image_cnt, cu_double_matrix3d_t range_image_normals, double *b_normal, scope_params_t *cu_params, int round,
dim3 block_size, dim3 threads_per_block, dim3 block_size_sum, dim3 thread_size_sum, dim3 block_size_small, dim3 thread_size_small) {
int num_total = num_samples * num_validation_points;
double *cu_normal_score_per_point;
cu_malloc(&cu_normal_score_per_point, num_total * sizeof(double), "normal_score_pp");
double *cu_wtot_per_point;
cu_malloc(&cu_wtot_per_point, num_total * sizeof(double), "wtot_pp");
hipLaunchKernelGGL(( cu_compute_normal_score_individual), dim3(block_size), dim3(threads_per_block), 0, 0, cu_normal_score_per_point, cu_wtot_per_point, cu_normals, cu_vis_pmf, cu_noise_models, num_samples, num_validation_points, cu_xi, cu_yi,
range_image_cnt, range_image_normals, cu_params, round);
if ( hipSuccess != hipGetLastError() )
printf( "normal individual!\n" );
double *cu_wtot;
cu_malloc(&cu_wtot, num_samples * sizeof(double), "wtot");
hipLaunchKernelGGL(( cu_add_matrix_rows_medium), dim3(block_size_sum), dim3(thread_size_sum), thread_size_sum.x * sizeof(double), 0, cu_normal_score, cu_normal_score_per_point, num_samples, num_validation_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "add 1!\n" );
hipLaunchKernelGGL(( cu_add_matrix_rows_medium), dim3(block_size_sum), dim3(thread_size_sum), thread_size_sum.x * sizeof(double), 0, cu_wtot, cu_wtot_per_point, num_samples, num_validation_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "add 2!\n" );
hipLaunchKernelGGL(( cu_compute_normal_score_final), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_normal_score, cu_score_comps, cu_wtot, num_samples, b_normal, cu_params, round);
cu_free(cu_normal_score_per_point, "normal_scores_pp");
cu_free(cu_wtot_per_point, "wtot_pp");
cu_free(cu_wtot, "wtot");
}
void compute_edge_score(double *cu_edge_scores, double *cu_score_comps, double *cu_P, cu_range_image_data_t range_image_data, cu_double_matrix_t range_image, int num_samples, int n_edge, int *cu_n,
cu_double_matrix_t edge_image, double *b_edge, double *b_edge_occ, scope_params_t *params, scope_params_t *cu_params, int round,
dim3 block_size_n_edge, dim3 block_size_sum, dim3 thread_size_sum, dim3 block_size_small, dim3 thread_size_small)
{
double *cu_vis_prob_edge, *cu_vis_prob_sums_edge, *cu_vis_pmf_edge;
cu_malloc(&cu_vis_prob_edge, num_samples * n_edge * sizeof(double), "vis_prob_edge");
cu_malloc(&cu_vis_prob_sums_edge, num_samples * sizeof(double), "vis_prob_sums_edge");
cu_malloc(&cu_vis_pmf_edge, num_samples * n_edge * sizeof(double), "vis_pmf_edge");
int *cu_xi_edge;
cu_malloc(&cu_xi_edge, num_samples * n_edge * sizeof(int), "xi");
int *cu_yi_edge;
cu_malloc(&cu_yi_edge, num_samples * n_edge * sizeof(int), "yi");
hipLaunchKernelGGL(( cu_populate_xi_yi), dim3(block_size_n_edge), dim3(thread_size_sum), 0, 0, cu_xi_edge, cu_yi_edge, cu_P, range_image_data, num_samples, n_edge, cu_n);
if ( hipSuccess != hipGetLastError() )
printf( "edge xi yi!\n" );
int vis_pixel_radius = 2;
get_vis_prob_sums_and_pmf(cu_vis_prob_edge, cu_vis_prob_sums_edge, cu_vis_pmf_edge, cu_P, NULL, cu_xi_edge, cu_yi_edge, range_image, range_image_data, vis_pixel_radius, num_samples, n_edge,
cu_n, params, block_size_n_edge, thread_size_sum, block_size_sum, thread_size_sum, 0); // HERE!!!
//cu_n, params, block_size_n_edge, thread_size_sum, block_size_small, thread_size_small, 1); // HERE!!!
double *cu_edge_score_individual;
cu_malloc(&cu_edge_score_individual, num_samples * n_edge * sizeof(double), "edge_score");
//cu_compute_edge_score_individual<<<block_size_sum, thread_size_sum>>>(cu_edge_score_individual, cu_vis_pmf_edge, cu_xi_edge, cu_yi_edge, edge_image, num_samples, cu_n, n_edge);
hipLaunchKernelGGL(( cu_compute_edge_score_individual), dim3(block_size_n_edge), dim3(thread_size_sum), 0, 0, cu_edge_score_individual, cu_vis_pmf_edge, cu_xi_edge, cu_yi_edge, edge_image, num_samples, cu_n, n_edge); // WEIRD
if ( hipSuccess != hipGetLastError() )
printf( "edge score individual!\n" );
//cu_add_matrix_rows_slow<<<block_size_small, thread_size_small>>>(cu_edge_scores, cu_edge_score_individual, num_samples, n_edge, cu_n);
hipLaunchKernelGGL(( cu_add_matrix_rows_medium), dim3(block_size_sum), dim3(thread_size_sum), 0, 0, cu_edge_scores, cu_edge_score_individual, num_samples, n_edge, cu_n);
double *cu_vis_scores;
cu_malloc(&cu_vis_scores, num_samples * sizeof(double), "vis_scores");
hipLaunchKernelGGL(( cu_compute_edge_score_final), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_edge_scores, cu_score_comps, cu_vis_scores, cu_vis_prob_sums_edge, NULL, num_samples, cu_n, b_edge, b_edge_occ, cu_params, round);
if ( hipSuccess != hipGetLastError() )
printf( "edge score final!\n" );
cu_free(cu_edge_score_individual, "edge_score_individual");
cu_free(cu_vis_scores, "vis_scores");
cu_free(cu_vis_prob_sums_edge, "vis_prob_sums_edge");
cu_free(cu_vis_pmf_edge, "vis_pmf_edge");
cu_free(cu_vis_prob_edge, "vis_prob_edge");
cu_free(cu_xi_edge, "xi_edge");
cu_free(cu_yi_edge, "yi_edge");
}
__global__ void cu_compute_fpfh_score_individual(double *cu_fpfh_score_individual, double *cu_cloud, double *cu_fpfh_cloud_f, double *cu_vis_pmf, int *xi, int *yi, cu_int_matrix_t fg_range_image_cnt,
cu_int_matrix_t fg_range_image_idx, cu_double_matrix_t fpfh_obs, int fpfh_length, int num_samples, int fpfh_num_validation_points, scope_params_t *cu_params) {
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples || j >= MIN(fpfh_num_validation_points, fpfh_obs.n))
return;
int i_arr = i * fpfh_num_validation_points + j;
cu_fpfh_score_individual[i_arr] = 0.0;
if (cu_vis_pmf[i_arr] > .01/(double)fpfh_num_validation_points) {
double f_sigma = cu_params->f_sigma;
double dmax = 2*f_sigma; // * 2*noise_models[i].range_sigma; //TODO: get FPFH noise model
double d = dmax;
if (fg_range_image_cnt.ptr[xi[i_arr] * fg_range_image_cnt.m + yi[i_arr]] > 0) {
int idx = fg_range_image_idx.ptr[xi[i_arr] * fg_range_image_idx.m + yi[i_arr]];
d = cu_dist(&cu_fpfh_cloud_f[i_arr * fpfh_length], &fpfh_obs.ptr[idx * fpfh_obs.m], fpfh_length);
d = MIN(d, dmax);
}
cu_fpfh_score_individual[i_arr] = cu_vis_pmf[i_arr] * log(cu_normpdf(d, 0, f_sigma));
}
}
__global__ void cu_compute_fpfh_score_final(double *cu_fpfh_scores, double *cu_score_comps, int num_samples, double *b_fpfh, scope_params_t *cu_params, int score_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_fpfh_scores[i] -= log(cu_normpdf(0, 0, cu_params->f_sigma));
if ((score_round == 2 && cu_params->score2_use_score_comp_models) || (score_round == 3 && cu_params->score3_use_score_comp_models))
cu_fpfh_scores[i] = cu_logistic(cu_fpfh_scores[i], b_fpfh);
cu_score_comps[i * num_components + fpfh_idx] = cu_fpfh_scores[i];
double w = 0;
if (score_round == 2)
w = cu_params->score2_fpfh_weight;
else
w = cu_params->score3_fpfh_weight;
cu_fpfh_scores[i] *= w;
}
void compute_fpfh_score(double *cu_fpfh_scores, double *cu_score_comps, cu_double_matrix_t cu_fpfh_points, cu_double_matrix_t cu_fpfh_normals, cu_double_matrix_t cu_fpfh, cu_double_matrix_t fpfh_obs,
cu_range_image_data_t range_image_data, cu_double_matrix_t range_image, cu_range_image_data_t fg_range_image_data, cu_int_matrix_t fg_range_image_cnt,
cu_int_matrix_t fg_range_image_idx, double *cu_samples_x, double *cu_samples_q, int num_samples, int num_validation_points, double *b_fpfh,
scope_params_t *params, scope_params_t *cu_params, int round) {
int num_fpfh_points = cu_fpfh.n;
int fpfh_length = cu_fpfh.m;
int fpfh_num_validation_points = (num_validation_points > 0 ? num_validation_points : num_fpfh_points);
dim3 threads_per_block(256, 1, 1);
dim3 block_size(ceil(1.0 * fpfh_num_validation_points / threads_per_block.x), num_samples);
dim3 thread_size_small(64);
dim3 block_size_small(ceil(1.0 * num_samples/thread_size_small.x));
dim3 thread_size_sum(256);
dim3 block_size_sum(1, num_samples);
int *cu_fpfh_idx;
cu_malloc(&cu_fpfh_idx, fpfh_num_validation_points * num_samples * sizeof(int), "fpfh_idx_malloc\n");
get_validation_points(cu_fpfh_idx, num_fpfh_points, fpfh_num_validation_points, num_samples, block_size, threads_per_block);
double *cu_cloud;
cu_malloc(&cu_cloud, 3 * fpfh_num_validation_points * num_samples * sizeof(double), "cloud");
hipLaunchKernelGGL(( cu_get_sub_cloud_at_pose), dim3(block_size), dim3(threads_per_block), 0, 0, cu_cloud, cu_fpfh_points, cu_samples_x, cu_samples_q, cu_fpfh_idx, num_samples, fpfh_num_validation_points);
if ( hipSuccess != hipGetLastError() )
printf( "fpfh Subcloud!\n" );
double *cu_normals;
cu_malloc(&cu_normals, 3 * fpfh_num_validation_points * num_samples * sizeof(double), "normals");
hipLaunchKernelGGL(( cu_get_sub_cloud_normals_rotated), dim3(block_size), dim3(threads_per_block), 0, 0, cu_normals, cu_fpfh_normals, cu_samples_q, cu_fpfh_idx, num_samples, fpfh_num_validation_points);
if ( hipSuccess != hipGetLastError() )
printf( "fpfh Normals!\n" );
double *cu_fpfh_cloud_f;
cu_malloc(&cu_fpfh_cloud_f, fpfh_num_validation_points * num_samples * fpfh_length * sizeof(double), "fpfh_cloud_f alloc\n");
hipLaunchKernelGGL(( cu_reorder_rows), dim3(block_size), dim3(threads_per_block), 0, 0, cu_fpfh_cloud_f, cu_fpfh, cu_fpfh_idx, num_samples, fpfh_num_validation_points, fpfh_length, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "reorder rows fpfh!\n" );
//hipMemcpy(gpu_cloud, cu_fpfh_cloud_f, fpfh_length * fpfh_num_validation_points * num_samples * sizeof(double), hipMemcpyDeviceToHost);
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_samples * fpfh_num_validation_points * sizeof(double), "fpfh vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "fpfh vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_samples * fpfh_num_validation_points * sizeof(double), "fpfh vis_pmf");
int *cu_xi;
cu_malloc(&cu_xi, num_samples * fpfh_num_validation_points * sizeof(int), "fpfh xi");
int *cu_yi;
cu_malloc(&cu_yi, num_samples * fpfh_num_validation_points * sizeof(int), "fpfh yi");
hipLaunchKernelGGL(( cu_populate_xi_yi), dim3(block_size), dim3(threads_per_block), 0, 0, cu_xi, cu_yi, cu_cloud, range_image_data, num_samples, fpfh_num_validation_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "fpfh xi yi, vis_pmf!\n" );
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_cloud, cu_normals, cu_xi, cu_yi, range_image, range_image_data, 0, num_samples, fpfh_num_validation_points, NULL, params,
block_size, threads_per_block, block_size_sum, thread_size_sum, 0);
hipLaunchKernelGGL(( cu_populate_xi_yi), dim3(block_size), dim3(threads_per_block), 0, 0, cu_xi, cu_yi, cu_cloud, fg_range_image_data, num_samples, fpfh_num_validation_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "fpfh xi, yi\n" );
//hipMemcpy(gpu_xi, cu_xi, num_samples * fpfh_num_validation_points * sizeof(int), hipMemcpyDeviceToHost);
//hipMemcpy(gpu_yi, cu_yi, num_samples * fpfh_num_validation_points * sizeof(int), hipMemcpyDeviceToHost);
double *cu_fpfh_score_individual;
cu_malloc(&cu_fpfh_score_individual, num_samples * fpfh_num_validation_points * sizeof(double), "fpfh_individual");
hipLaunchKernelGGL(( cu_compute_fpfh_score_individual), dim3(block_size), dim3(threads_per_block), 0, 0, cu_fpfh_score_individual, cu_cloud, cu_fpfh_cloud_f, cu_vis_pmf, cu_xi, cu_yi, fg_range_image_cnt, fg_range_image_idx,
fpfh_obs, fpfh_length, num_samples, fpfh_num_validation_points, cu_params);
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipLaunchKernelGGL(( cu_add_matrix_rows_medium), dim3(block_size_sum), dim3(thread_size_sum), 0, 0, cu_fpfh_scores, cu_fpfh_score_individual, num_samples, fpfh_num_validation_points, NULL);
hipLaunchKernelGGL(( cu_compute_fpfh_score_final), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_fpfh_scores, cu_score_comps, num_samples, b_fpfh, cu_params, round);
cu_free(cu_fpfh_idx, "fpfh_idx");
cu_free(cu_cloud, "fpfh_cloud");
cu_free(cu_normals, "fpfh_normals");
cu_free(cu_fpfh_cloud_f, "fpfh_cloud_f");
cu_free(cu_vis_prob, "fpfh vis_prob");
cu_free(cu_vis_prob_sums, "fpfh vis_prob_sums");
cu_free(cu_vis_pmf, "fpfh vis_pmf");
cu_free(cu_xi, "fpfh xi");
cu_free(cu_yi, "fpfh yi");
cu_free(cu_fpfh_score_individual, "fpfh_score individual");
}
void score_samples(double *scores, scope_sample_t *samples, int num_samples, cu_model_data_t *cu_model, cu_obs_data_t *cu_obs, scope_params_t *cu_params, scope_params_t *params, int num_validation_points,
int model_points, int num_obs_segments, int edge_scoring, int round) {
if (round == 3)
params->num_validation_points = 0;
dim3 threads_per_block(256, 1, 1);
dim3 block_size(ceil(1.0 * num_validation_points / threads_per_block.x), num_samples);
dim3 thread_size_small(64);
dim3 block_size_small(ceil(1.0 * num_samples/thread_size_small.x));
dim3 thread_size_sum(256);
dim3 block_size_sum(1, num_samples);
dim3 thread_size_sum_small(64);
int num_total = num_samples * num_validation_points;
double *cu_samples_x;
cu_malloc(&cu_samples_x, num_samples * 3 * sizeof(double), "samples_x");
double *cu_samples_q;
cu_malloc(&cu_samples_q, num_samples * 4 * sizeof(double), "samples_y");
unpack_x_q(cu_samples_x, cu_samples_q, samples, num_samples);
int i;
int *cu_idx;
cu_malloc(&cu_idx, num_total * sizeof(int), "idxs");
get_validation_points(cu_idx, model_points, num_validation_points, num_samples, block_size, threads_per_block);
// extract transformed model validation features
double *cu_cloud;
cu_malloc(&cu_cloud, 3 * num_total * sizeof(double), "cloud");
hipLaunchKernelGGL(( cu_get_sub_cloud_at_pose), dim3(block_size), dim3(threads_per_block), 0, 0, cu_cloud, cu_model->points, cu_samples_x, cu_samples_q, cu_idx, num_samples, num_validation_points);
if ( hipSuccess != hipGetLastError() )
printf( "Subcloud!\n" );
int *cu_xi;
cu_malloc(&cu_xi, num_total * sizeof(int), "xi");
int *cu_yi;
cu_malloc(&cu_yi, num_total * sizeof(int), "yi");
hipLaunchKernelGGL(( cu_populate_xi_yi), dim3(block_size), dim3(threads_per_block), 0, 0, cu_xi, cu_yi, cu_cloud, cu_obs->range_image_data, num_samples, num_validation_points, NULL);
double *cu_scores;
cu_malloc(&cu_scores, num_samples * sizeof(double), "scores");
if (round == 1) {
double *cu_scores_ind;
cu_malloc(&cu_scores_ind, num_total * sizeof(double), "scores_ind");
hipLaunchKernelGGL(( cu_score_round1), dim3(block_size), dim3(threads_per_block), 0, 0, cu_scores_ind, cu_xi, cu_yi, cu_cloud, cu_obs->range_image, num_samples, num_validation_points);
if ( hipSuccess != hipGetLastError() )
printf( "Round 1 score!\n" );
hipLaunchKernelGGL(( cu_add_matrix_rows_medium), dim3(block_size_sum), dim3(thread_size_sum), 0, 0, cu_scores, cu_scores_ind, num_samples, num_validation_points, NULL);
hipLaunchKernelGGL(( cu_score_round1_final), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_scores, num_samples, num_validation_points);
cu_free(cu_scores_ind, "scores_ind");
} else {
double *cu_score_comps;
if (round == 3) {
cu_malloc(&cu_score_comps, num_samples * num_components * sizeof(double), "score_comps");
hipMemset(cu_score_comps, 0, num_samples * num_components * sizeof(double));
}
double *cu_normals;
cu_malloc(&cu_normals, 3 * num_total * sizeof(double), "normals");
hipLaunchKernelGGL(( cu_get_sub_cloud_normals_rotated), dim3(block_size), dim3(threads_per_block), 0, 0, cu_normals, cu_model->normals, cu_samples_q, cu_idx, num_samples, num_validation_points);
if ( hipSuccess != hipGetLastError() )
printf( "Normals!\n" );
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_total * sizeof(double), "vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_total * sizeof(double), "vis_pmf");
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_cloud, cu_normals, cu_xi, cu_yi, cu_obs->range_image, cu_obs->range_image_data, 0, num_samples, num_validation_points, NULL, params,
block_size, threads_per_block, block_size_sum, thread_size_sum, 0);
int *cu_vi;
cu_malloc(&cu_vi, num_samples * sizeof(int), "vi");
hipLaunchKernelGGL(( cu_get_viewpoints), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_vi, num_samples, cu_samples_x, cu_samples_q, cu_model->range_edges_model_views);
if ( hipSuccess != hipGetLastError() )
printf( "Viewpoints!\n" );
scope_noise_model_t *cu_noise_models;
cu_malloc(&cu_noise_models, num_total * sizeof(scope_noise_model_t), "noise_models");
hipLaunchKernelGGL(( cu_get_noise_models), dim3(block_size), dim3(threads_per_block), 0, 0, cu_noise_models, cu_cloud, cu_normals, cu_idx, cu_vi, cu_model->ved, cu_model->normalvar, num_samples,
num_validation_points);
if ( hipSuccess != hipGetLastError() )
printf( "Noise model!\n" );
// TODO(sanja): Save results before weights kick in
double *cu_xyz_score;
cu_malloc(&cu_xyz_score, num_samples * sizeof(double), "xyz_scores");
compute_xyz_score(cu_xyz_score, cu_score_comps, cu_cloud, cu_xi, cu_yi, cu_vis_pmf, cu_noise_models, cu_obs->range_image, cu_obs->range_image_data, cu_obs->range_image_cnt, num_samples,
num_validation_points, cu_params, round, cu_model->score_comp_models->b_xyz, block_size, threads_per_block, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
double *cu_normal_score;
cu_malloc(&cu_normal_score, num_samples * sizeof(double), "normal_score");
compute_normal_score(cu_normal_score, cu_score_comps, cu_normals, cu_vis_pmf, cu_noise_models, num_samples, num_validation_points, cu_xi, cu_yi, cu_obs->range_image_cnt, cu_obs->range_image_normals,
cu_model->score_comp_models->b_normal, cu_params, round, block_size, threads_per_block, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
double *cu_vis_score;
cu_malloc(&cu_vis_score, num_samples * sizeof(double), "vis_score");
hipLaunchKernelGGL(( cu_compute_vis_score), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_vis_score, cu_score_comps, cu_vis_prob_sums, num_validation_points, cu_params, round);
if ( hipSuccess != hipGetLastError() )
printf( "vis score!\n" );
double *cu_seg_affinity_score;
cu_malloc(&cu_seg_affinity_score, num_samples * sizeof(double), "seg_aff_per_seg");
hipMemset(cu_seg_affinity_score, 0, num_samples * sizeof(double));
double *cu_seg_affinity_score_per_seg;
int *cu_mask;
int *cu_num_segments;
int *cu_segments_idx;
double *cu_fpfh_score;
cu_malloc(&cu_fpfh_score, num_samples * sizeof(double), "seg_aff_per_seg");
hipMemset(cu_fpfh_score, 0, num_samples * sizeof(double));
// TODO(sanja): Figure out how to speed up the prep for segment calculation
if (round >= 3) {
cu_malloc(&cu_seg_affinity_score_per_seg, num_samples * num_obs_segments * sizeof(double), "seg_aff_per_seg");
cu_malloc(&cu_mask, num_samples * num_obs_segments * sizeof(int), "mask");
hipMemset(cu_mask, 0, num_samples * num_obs_segments * sizeof(int));
int *num_segments;
safe_calloc(num_segments, num_samples, int);
for (i = 0; i < num_samples; ++i) {
num_segments[i] = samples[i].num_segments;
}
cu_malloc(&cu_num_segments, num_samples * sizeof(int), "num_segments");
hipMemcpy(cu_num_segments, num_segments, num_samples * sizeof(int), hipMemcpyHostToDevice);
free(num_segments);
int *tmp_segments_idx;
safe_malloc(tmp_segments_idx, num_samples * num_obs_segments, int);
memset(tmp_segments_idx, -1, num_samples * num_obs_segments * sizeof(int));
for (i = 0; i < num_samples; ++i) {
memcpy(&(tmp_segments_idx[i * num_obs_segments]), samples[i].segments_idx, samples[i].num_segments * sizeof(int));
}
cu_malloc(&cu_segments_idx, num_samples * num_obs_segments * sizeof(int), "segments_idx");
hipMemcpy(cu_segments_idx, tmp_segments_idx, num_samples * num_obs_segments * sizeof(int), hipMemcpyHostToDevice);
if ( hipSuccess != hipGetLastError() )
printf( "seg idx memcpy!\n" );
free(tmp_segments_idx);
dim3 block_size_seg(ceil(1.0 * num_obs_segments / thread_size_sum.x), num_samples);
//cu_set_mask_for_segment_affinity<<<block_size_seg, thread_size_sum>>>(cu_mask, cu_segments_idx, cu_num_segments, num_obs_segments, num_samples);
hipLaunchKernelGGL(( cu_set_mask_for_segment_affinity), dim3(block_size_seg), dim3(thread_size_sum), 0, 0, cu_mask, cu_segments_idx, cu_num_segments, num_obs_segments, num_samples);
if ( hipSuccess != hipGetLastError() )
printf( "seg mask!\n" );
hipLaunchKernelGGL(( cu_compute_segment_affinity_score_per_seg), dim3(block_size), dim3(thread_size_small), 0, 0, cu_seg_affinity_score_per_seg, cu_segments_idx, cu_num_segments, cu_obs->segment_affinities, num_obs_segments,
cu_mask, num_samples);
if ( hipSuccess != hipGetLastError() )
printf( "seg per seg!\n" );
hipLaunchKernelGGL(( cu_add_matrix_rows_slow), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_seg_affinity_score, cu_seg_affinity_score_per_seg, num_samples, num_obs_segments, NULL);
hipLaunchKernelGGL(( cu_compute_segment_affinity_score_final), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_seg_affinity_score, cu_score_comps, cu_params, round, num_samples);
if ( hipSuccess != hipGetLastError() )
printf( "seg affinity!\n" );
/*if (params->use_fpfh)
compute_fpfh_score(cu_fpfh_score, cu_score_comps, cu_model->fpfh_points, cu_model->fpfh_normals, cu_model->fpfh, cu_obs->fpfh_obs, cu_obs->range_image_data, cu_obs->range_image,
cu_obs->fg_range_image_data, cu_obs->fg_range_image_cnt, cu_obs->fg_range_image_idx, cu_samples_x, cu_samples_q, num_samples, params->num_validation_points,
cu_model->score_comp_models->b_fpfh, params, cu_params, round);*/
}
double *cu_edge_scores;
cu_malloc(&cu_edge_scores, num_samples * sizeof(double), "edge_scores");
hipMemset(cu_edge_scores, 0, num_samples * sizeof(double));
if ( hipSuccess != hipGetLastError() )
printf( "memset!\n" );
if (edge_scoring) {
int n_edge = cu_model->max_num_edges;
int *cu_n;
cu_malloc(&cu_n, num_samples * sizeof(int), "n");
dim3 block_size_n_edge(ceil(1.0 * n_edge / thread_size_sum.x), num_samples);
double *cu_P;
cu_malloc(&cu_P, num_samples * n_edge * 3*sizeof(double), "cu_P");
get_range_edge_points(cu_n, cu_P, num_samples, n_edge, cu_n, num_validation_points, cu_model, cu_vi, 0, NULL, NULL, block_size_n_edge, thread_size_sum, block_size_small, thread_size_small);
hipLaunchKernelGGL(( cu_transform_cloud), dim3(block_size_n_edge), dim3(thread_size_sum), 0, 0, cu_P, cu_P, cu_samples_x, cu_samples_q, num_samples, n_edge, cu_n);
if ( hipSuccess != hipGetLastError() )
printf( "transform cloud\n" );
compute_edge_score(cu_edge_scores, cu_score_comps, cu_P, cu_obs->range_image_data, cu_obs->range_image, num_samples, n_edge, cu_n, cu_obs->edge_image, cu_model->score_comp_models->b_edge,
cu_model->score_comp_models->b_edge_occ, params, cu_params, round, block_size_n_edge, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
cu_free(cu_n, "n");
cu_free(cu_P, "P");
}
hipLaunchKernelGGL(( cu_add_all_scores), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_scores, cu_xyz_score, cu_normal_score, cu_vis_score, cu_seg_affinity_score, cu_edge_scores, cu_fpfh_score, num_samples);
if ( hipSuccess != hipGetLastError() )
printf( "Final addition!\n" );
if (round == 3) {
double *sample_scores;
safe_malloc(sample_scores, num_samples * num_components, double);
if (hipMemcpy(sample_scores, cu_score_comps, num_samples * num_components * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
printf("sample_scores\n");
}
for (i = 0; i < num_samples; ++i) {
samples[i].num_scores = num_components;
safe_malloc(samples[i].scores, num_components, double);
memcpy(samples[i].scores, &sample_scores[i * num_components], num_components * sizeof(double));
}
}
// NEXT(sanja): Make calls for each score component async.
cu_free(cu_normals, "normals");
cu_free(cu_vis_prob, "vis_prob");
cu_free(cu_vis_prob_sums, "vis_prob_sums");
cu_free(cu_vis_pmf, "vis_pmf");
cu_free(cu_vi, "vi");
cu_free(cu_noise_models, "noise_models");
cu_free(cu_xyz_score, "xyz_scores");
cu_free(cu_normal_score, "normal_scores");
cu_free(cu_vis_score, "vis_score");
cu_free(cu_seg_affinity_score, "seg_aff");
if (round >= 3) {
cu_free(cu_seg_affinity_score_per_seg, "seg_aff_per_seg");
cu_free(cu_mask, "mask");
cu_free(cu_num_segments, "num_segments");
cu_free(cu_segments_idx, "segments_idx");
}
cu_free(cu_edge_scores, "edge_scores");
if (round == 3)
cu_free(cu_score_comps, "score comps");
}
hipMemcpy(scores, cu_scores, num_samples * sizeof(double), hipMemcpyDeviceToHost);
cu_free(cu_samples_x, "samples_x"); cu_free(cu_samples_q, "samples_y");
cu_free(cu_idx, "idx");
cu_free(cu_cloud, "cloud");
cu_free(cu_xi, "xi"); cu_free(cu_yi, "yi");
cu_free(cu_scores, "scores");
hipDeviceSynchronize();
}
__device__ void cu_matrix_cell_gradient(double *g, int i, int j, double *X, int n, int m)
{
if (i == 0)
g[0] = X[1*m + j] - X[0*m + j];
else if (i == n-1)
g[0] = X[(n-1)*m + j] - X[(n-2)*m + j];
else
g[0] = (X[(i+1)*m + j] - X[(i-1) * m + j]) / 2.0;
if (j == 0)
g[1] = X[i*m + 1] - X[i*m + 0];
else if (j == m-1)
g[1] = X[i*m + m-1] - X[i*m + m-2];
else
g[1] = (X[i*m + j+1] - X[i*m + j-1]) / 2.0;
}
// get the jacobian of R*x w.r.t. q
__device__ void cu_point_rotation_jacobian(double out[][4], double *q, double *x)
{
double q1 = q[0];
double q2 = q[1];
double q3 = q[2];
double q4 = q[3];
double v1 = x[0];
double v2 = x[1];
double v3 = x[2];
out[0][0] = 2*(q1*v1 + q3*v3 - q4*v2); out[0][1] = 2*(q2*v1 + q3*v2 + q4*v3); out[0][2] = 2*(q1*v3 + q2*v2 - q3*v1); out[0][3] = 2*(q2*v3 - q1*v2 - q4*v1);
out[1][0] = 2*(q1*v2 - q2*v3 + q4*v1); out[1][1] = 2*(q3*v1 - q2*v2 - q1*v3); out[1][2] = 2*(q2*v1 + q3*v2 + q4*v3); out[1][3] = 2*(q1*v1 + q3*v3 - q4*v2);
out[2][0] = 2*(q1*v3 + q2*v2 - q3*v1); out[2][1] = 2*(q1*v2 - q2*v3 + q4*v1); out[2][2] = 2*(q4*v2 - q3*v3 - q1*v1); out[2][3] = 2*(q2*v1 + q3*v2 + q4*v3);
}
__device__ void cu_range_image_pixel_pose_jacobian(double dij_dxq[][7], cu_range_image_data_t range_image_data, double *model_point, double *x, double *q)
{
// transform model point by model pose (x,q)
double p[3];
double R[3][3];
cu_quaternion_to_rotation_matrix(R,q);
cu_matrix_vec_mult_3(p, R, model_point, 3);
cu_add(p, p, x, 3);
double p1 = p[0];
double p2 = p[1];
double p3 = p[2];
double r = cu_norm(p,3);
// compute jacobian of (i,j) w.r.t. p (and x)
double ci = 1.0 / (p1*p1 + p3*p3);
double cj = r * sqrt(ci);
double dij_dp[2][3] = {{ci*p3, 0, -ci*p1}, {cj*p1*p2, cj*(p2*p2-r*r), cj*p2*p3}};
// compute jacobian of (i,j) w.r.t. q
double dp_dq[3][4];
cu_point_rotation_jacobian(dp_dq, q, model_point);
double dij_dq[2][4];
cu_matrix_mult_2_3_4(dij_dq, dij_dp, dp_dq);
// copy jacobians into dij_dxq
dij_dxq[0][0] = dij_dp[0][0]; dij_dxq[0][1] = dij_dp[0][1]; dij_dxq[0][2] = dij_dp[0][2];
dij_dxq[1][0] = dij_dp[1][0]; dij_dxq[1][1] = dij_dp[1][1]; dij_dxq[1][2] = dij_dp[1][2];
dij_dxq[0][3] = dij_dq[0][0]; dij_dxq[0][4] = dij_dq[0][1]; dij_dxq[0][5] = dij_dq[0][2]; dij_dxq[0][6] = dij_dq[0][3];
dij_dxq[1][3] = dij_dq[1][0]; dij_dxq[1][4] = dij_dq[1][1]; dij_dxq[1][5] = dij_dq[1][2]; dij_dxq[1][6] = dij_dq[1][3];
// divide by range image resolution
cu_mult(dij_dxq[0], dij_dxq[0], 1.0/range_image_data.res, 7);
cu_mult(dij_dxq[1], dij_dxq[1], 1.0/range_image_data.res, 7); // NOTE(sanja): This can probably be one multiplication of length 14, but I am somewhat defensive right now
}
__global__ void cu_edge_score_gradient_individual(double *edge_gradient_score_individual, double *dI_dxq_individual, double *vis_prob, double *vis_pmf, int *xi, int *yi, double *P, double *x, double *q,
cu_double_matrix_t edge_image, cu_range_image_data_t range_image_data, cu_double_matrix_t range_image, int num_samples, int n, int *n_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n_arr[i])
return;
int i_arr = j + i * n;
edge_gradient_score_individual[i_arr] = 0.0;
int k;
for (k = 0; k < 7; ++k)
dI_dxq_individual[k + 7*i_arr] = 0.0;
if (vis_prob[i_arr] < .01)
return;
if (xi[i_arr] == -1 || yi[i_arr] == -1)
return;
// add pixel edge score to total score
edge_gradient_score_individual[i_arr] = vis_pmf[i_arr] * edge_image.ptr[xi[i_arr] * edge_image.m + yi[i_arr]];
// get edge image gradient at current pixel
double dI_dij[2];
cu_matrix_cell_gradient(dI_dij, xi[i_arr], yi[i_arr], edge_image.ptr, range_image_data.w, range_image_data.h);
// get gradient of pixel location w.r.t. model pose (x,q)
double dij_dxq[2][7];
cu_range_image_pixel_pose_jacobian(dij_dxq, range_image_data, &P[3*i_arr], &x[3 * i], &q[4*i]);
// get gradient of this point's edge score w.r.t. model pose (x,q)
double dI_dxq[7];
cu_vec_matrix_mult_7(dI_dxq, dI_dij, dij_dxq, 2);
cu_mult(&dI_dxq_individual[7*i_arr], dI_dxq, vis_pmf[i_arr], 7);
}
__global__ void cu_edge_gradient_score_final(double *edge_score, double *G_edge, int num_samples, int n_edge, scope_params_t *params, int scope_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
double w = (scope_round==2 ? params->score2_edge_weight : params->score3_edge_weight);
cu_mult(&(G_edge[7*i]), &(G_edge[7*i]), w, 7);
edge_score[i] *= w;
}
void edge_score_gradient(double *cu_edge_gradient_score, double *cu_G_edge, double *cu_samples_x, double *cu_samples_q, double *cu_P2, double *cu_P, int num_samples, int n_edge, int *cu_n,
cu_double_matrix_t range_image, cu_range_image_data_t range_image_data, cu_double_matrix_t edge_image, scope_params_t *cu_params, scope_params_t *params, int scope_round,
dim3 block, dim3 thread, dim3 block_size_sum, dim3 thread_size_sum, dim3 block_size_small, dim3 thread_size_small) {
hipLaunchKernelGGL(( cu_transform_cloud), dim3(block), dim3(thread), 0, 0, cu_P2, cu_P, cu_samples_x, cu_samples_q, num_samples, n_edge, cu_n);
if ( hipSuccess != hipGetLastError() )
printf( "cloud transform!\n" );
int *cu_xi;
cu_malloc(&cu_xi, num_samples*n_edge * sizeof(int), "xi");
int *cu_yi;
cu_malloc(&cu_yi, num_samples*n_edge * sizeof(int), "yi");
hipLaunchKernelGGL(( cu_populate_xi_yi), dim3(block), dim3(thread), 0, 0, cu_xi, cu_yi, cu_P2, range_image_data, num_samples, n_edge, cu_n);
if ( hipSuccess != hipGetLastError() )
printf( "populate xi yi!\n" );
// compute visibility of sampled model edges
int vis_pixel_radius = 2;
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_samples * n_edge * sizeof(double), "vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_samples * n_edge * sizeof(double), "vis_pmf");
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_P2, NULL, cu_xi, cu_yi, range_image, range_image_data, vis_pixel_radius, num_samples, n_edge, cu_n, params, block, thread,
// block_size_small, thread_size_small, 1); // HERE!!!
block_size_sum, thread_size_sum, 0);
double *cu_edge_gradient_score_individual;
double *cu_dI_dxq_individual;
cu_malloc(&cu_edge_gradient_score_individual, num_samples * n_edge * sizeof(double), "edge_gradient_score_individual");
cu_malloc(&cu_dI_dxq_individual, num_samples * n_edge * 7 * sizeof(double), "dI_dxq_individual");
hipLaunchKernelGGL(( cu_edge_score_gradient_individual), dim3(block), dim3(thread), 0, 0, cu_edge_gradient_score_individual, cu_dI_dxq_individual, cu_vis_prob, cu_vis_pmf, cu_xi, cu_yi, cu_P, cu_samples_x, cu_samples_q, edge_image,
range_image_data, range_image, num_samples, n_edge, cu_n);
if ( hipSuccess != hipGetLastError() )
printf( "edge score gradient!\n" );
// sum up edge_scores and gradients <--- HERE!!!
//cu_add_matrix_rows_slow<<<block_size_small, thread_size_small>>>(cu_edge_gradient_score, cu_edge_gradient_score_individual, num_samples, n_edge, cu_n);
hipLaunchKernelGGL(( cu_add_matrix_rows_medium), dim3(block_size_sum), dim3(thread_size_sum), 0, 0, cu_edge_gradient_score, cu_edge_gradient_score_individual, num_samples, n_edge, cu_n);
//cu_add_matrix_3d_slow<<<block_size_small, thread_size_small>>>(cu_G_edge, cu_dI_dxq_individual, num_samples, n_edge, cu_n, 7);
hipLaunchKernelGGL(( cu_add_matrix_3d_medium), dim3(block_size_sum), dim3(thread_size_sum), 0, 0, cu_G_edge, cu_dI_dxq_individual, num_samples, n_edge, cu_n, 7);
hipLaunchKernelGGL(( cu_edge_gradient_score_final), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_edge_gradient_score, cu_G_edge, num_samples, n_edge, cu_params, scope_round);
if ( hipSuccess != hipGetLastError() )
printf( "edge gradient final!\n" );
cu_free(cu_xi, "xi");
cu_free(cu_yi, "yi");
cu_free(cu_vis_prob, "vis_prob");
cu_free(cu_vis_prob_sums, "vis_prob_sums");
cu_free(cu_vis_pmf, "vis_pmf");
cu_free(cu_edge_gradient_score_individual, "edge gradient score individual");
cu_free(cu_dI_dxq_individual, "dI_dxq_individual");
}
/*
* get the plane equation coefficients (c[0]*x + c[1]*y + c[2]*z + c[3] = 0) from (point,normal)
*/
__device__ void cu_xyzn_to_plane(double *c, double *point, double *normal)
{
c[0] = normal[0];
c[1] = normal[1];
c[2] = normal[2];
c[3] = -cu_dot(point, normal, 3);
}
__global__ void cu_xyzn_score_gradient_individual(double *xyzn_gradient_score_individual, double *G_xyzn_individual, double *vis_prob, double *vis_pmf, int *cu_xi, int *cu_yi,
double *cloud2, double *cloud, double *normals2, double *normals, double *samples_x, double *samples_q,
cu_int_matrix_t range_image_cnt, cu_double_matrix3d_t range_image_points, cu_double_matrix3d_t range_image_normals, scope_noise_model_t *noise_models,
int num_samples, int num_surface_points, scope_params_t *params, int score_round) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= num_surface_points)
return;
int i_arr = i * num_surface_points + j;
xyzn_gradient_score_individual[i_arr] = 0.0;
int k;
for (k = 0; k < 7; ++k)
G_xyzn_individual[7*i_arr + k] = 0.0;
if (vis_prob[i_arr] < .01)
return;
int xi = cu_xi[i_arr];
int yi = cu_yi[i_arr];
// get range image cell
if (xi == -1 || yi == -1)
return;
double range_sigma = params->range_sigma * noise_models[i_arr].range_sigma;
double normal_sigma = params->normal_sigma * noise_models[i_arr].normal_sigma;
double dmax_xyz = 2*range_sigma;
double dmax_normal = 2*normal_sigma;
double d_xyz = dmax_xyz;
double d_normal = dmax_normal;
double c[4]; // range image cell plane coeffs
if (range_image_cnt.ptr[xi * range_image_cnt.m + yi] > 0) {
// get distance from model point to range image cell plane
cu_xyzn_to_plane(c, &range_image_points.ptr[xi*range_image_points.m * range_image_points.p + yi * range_image_points.p],
&range_image_normals.ptr[xi*range_image_normals.m * range_image_normals.p + yi * range_image_normals.p]);
d_xyz = fabs(cu_dot(c, &cloud2[3*i_arr], 3) + c[3]);
//d_xyz /= noise_models[i].range_sigma;
d_xyz = MIN(d_xyz, dmax_xyz);
d_normal = 1.0 - cu_dot(&normals2[3*i_arr], &range_image_normals.ptr[xi*range_image_normals.m * range_image_normals.p + yi * range_image_normals.p], 3);
//d_normal /= noise_models[i].normal_sigma;
d_normal = MIN(d_normal, dmax_normal);
}
double xyz_weight = (score_round == 2 ? params->score2_xyz_weight : params->score3_xyz_weight);
double normal_weight = (score_round == 2 ? params->score2_normal_weight : params->score3_normal_weight);
xyzn_gradient_score_individual[i_arr] += xyz_weight * vis_pmf[i_arr] * log(cu_normpdf(d_xyz, 0, range_sigma));
xyzn_gradient_score_individual[i_arr] += normal_weight * vis_pmf[i_arr] * log(cu_normpdf(d_normal, 0, normal_sigma));
// get gradient of this point's xyz score w.r.t. model pose (x,q)
if (d_xyz < dmax_xyz) {
double dp_dq[3][4];
cu_point_rotation_jacobian(dp_dq, &(samples_q[4*i]), &cloud[3*i_arr]);
double df_dp[3];
df_dp[0] = c[0]; df_dp[1] = c[1]; df_dp[2] = c[2];
//double rs = range_sigma * noise_models[i].range_sigma;
cu_mult(df_dp, df_dp, -(c[3] + cu_dot(&cloud2[3*i_arr], c, 3)) / (range_sigma*range_sigma), 3);
G_xyzn_individual[7*i_arr + 0] = df_dp[0]; G_xyzn_individual[7*i_arr + 1] = df_dp[1]; G_xyzn_individual[7*i_arr + 2] = df_dp[2];
cu_vec_matrix_mult_4(&G_xyzn_individual[7*i_arr + 3], df_dp, dp_dq, 3);
cu_mult(&G_xyzn_individual[7*i_arr], &G_xyzn_individual[7*i_arr], xyz_weight * vis_pmf[i_arr], 7);
}
// get gradient of this point's normal score w.r.t. model pose (x,q)
if (d_normal < dmax_normal) {
double dpn_dq[3][4];
cu_point_rotation_jacobian(dpn_dq, &(samples_q[4*i]), &normals[3*i_arr]);
double df_dpn[3];
df_dpn[0] = c[0]; df_dpn[1] = c[1]; df_dpn[2] = c[2];
//double ns = normal_sigma * noise_models[i].normal_sigma;
cu_mult(df_dpn, df_dpn, (1 - cu_dot(&normals2[3*i_arr], c, 3)) / (normal_sigma*normal_sigma), 3);
double G_normal[7] = {0,0,0,0,0,0,0};
cu_vec_matrix_mult_4(&G_normal[3], df_dpn, dpn_dq, 3);
cu_mult(G_normal, G_normal, normal_weight * vis_pmf[i_arr], 7);
cu_add(&G_xyzn_individual[7*i_arr], &G_xyzn_individual[7*i_arr], G_normal, 7);
}
}
__global__ void cu_matrix_add(double *cu_G, double *cu_G_edge, double *cu_G_xyzn, int num_samples, int m) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
for (int k = 0; k < m; ++k) {
cu_G[m * i + k] = cu_G_edge[m * i + k] + cu_G_xyzn[m * i + k];
}
}
__global__ void cu_init_arr(double *cu_best_score, double init_val, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_best_score[i] = init_val;
}
void xyzn_score_gradient(double *cu_xyzn_gradient_score, double *cu_G_xyzn, double *cu_samples_x, double *cu_samples_q, double *cu_cloud2, double *cu_cloud, double *cu_normals2, double *cu_normals,
int transform, scope_noise_model_t *cu_noise_models, cu_range_image_data_t range_image_data, cu_double_matrix_t range_image, cu_int_matrix_t range_image_cnt,
cu_double_matrix3d_t range_image_points, cu_double_matrix3d_t range_image_normals,
int num_samples, int num_surface_points, scope_params_t *cu_params, scope_params_t *params, int score_round,
dim3 block_size, dim3 threads_per_block, dim3 block_size_small, dim3 thread_size_small, dim3 block_size_sum, dim3 thread_size_sum) {
if (transform) {
hipLaunchKernelGGL(( cu_transform_cloud), dim3(block_size), dim3(threads_per_block), 0, 0, cu_cloud2, cu_cloud, cu_samples_x, cu_samples_q, num_samples, num_surface_points, NULL);
hipLaunchKernelGGL(( cu_transform_cloud), dim3(block_size), dim3(threads_per_block), 0, 0, cu_normals2, cu_normals, NULL, cu_samples_q, num_samples, num_surface_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "transform cloud!\n" );
}
int *cu_xi;
cu_malloc(&cu_xi, num_samples*num_surface_points * sizeof(int), "xi");
int *cu_yi;
cu_malloc(&cu_yi, num_samples*num_surface_points * sizeof(int), "yi");
hipLaunchKernelGGL(( cu_populate_xi_yi), dim3(block_size), dim3(threads_per_block), 0, 0, cu_xi, cu_yi, cu_cloud2, range_image_data, num_samples, num_surface_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "populate xi yi xyzn gradient!\n" );
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_samples * num_surface_points * sizeof(double), "vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_samples * num_surface_points * sizeof(double), "vis_pmf");
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_cloud2, cu_normals2, cu_xi, cu_yi, range_image, range_image_data, 0, num_samples, num_surface_points, NULL, params,
block_size, threads_per_block, block_size_sum, thread_size_sum, 0);
double *cu_xyzn_gradient_score_individual;
cu_malloc(&cu_xyzn_gradient_score_individual, num_samples * num_surface_points * sizeof(double), "xyzn_score_gradient_individual");
double *cu_G_xyzn_individual;
cu_malloc(&cu_G_xyzn_individual, num_samples * num_surface_points * 7 * sizeof(double), "G_xyzn_individual");
hipLaunchKernelGGL(( cu_xyzn_score_gradient_individual), dim3(block_size), dim3(threads_per_block), 0, 0, cu_xyzn_gradient_score_individual, cu_G_xyzn_individual, cu_vis_prob, cu_vis_pmf, cu_xi, cu_yi, cu_cloud2, cu_cloud, cu_normals2,
cu_normals, cu_samples_x, cu_samples_q, range_image_cnt, range_image_points, range_image_normals, cu_noise_models,
num_samples, num_surface_points, cu_params, score_round);
if ( hipSuccess != hipGetLastError() )
printf( "xyzn gradient individual!\n" );
// sum up xyzn_scores and gradients
//cu_add_matrix_rows_slow<<<block_size_small, thread_size_small>>>(cu_xyzn_gradient_score, cu_xyzn_gradient_score_individual, num_samples, num_surface_points, NULL); // HERE
hipLaunchKernelGGL(( cu_add_matrix_rows_medium), dim3(block_size_sum), dim3(thread_size_sum), 0, 0, cu_xyzn_gradient_score, cu_xyzn_gradient_score_individual, num_samples, num_surface_points, NULL);
//cu_add_matrix_3d_slow<<<block_size_small, thread_size_small>>>(cu_G_xyzn, cu_G_xyzn_individual, num_samples, num_surface_points, NULL, 7); // HERE!!!
hipLaunchKernelGGL(( cu_add_matrix_3d_medium), dim3(block_size_sum), dim3(thread_size_sum), 0, 0, cu_G_xyzn, cu_G_xyzn_individual, num_samples, num_surface_points, NULL, 7);
if ( hipSuccess != hipGetLastError() )
printf( "add mat 3d slow!\n" );
cu_free(cu_xi, "xi");
cu_free(cu_yi, "yi");
cu_free(cu_vis_prob, "vis_prob");
cu_free(cu_vis_prob_sums, "vis_prob_sums");
cu_free(cu_vis_pmf, "vis_pmf");
cu_free(cu_xyzn_gradient_score_individual, "edge gradient score individual");
cu_free(cu_G_xyzn_individual, "dI_dxq_individual");
}
__global__ void cu_normalize_matrix_rows(double *cu_out, double *cu_in, int num_samples, int width) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_normalize(&(cu_out[i*width]), &(cu_in[i*width]), width);
}
__global__ void cu_mult_matrix_rows(double *cu_out, double *cu_in, double mult, double *cu_mult_arr, int num_samples, int width) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
double f = mult;
if (cu_mult_arr)
f *= cu_mult_arr[i];
cu_mult(&(cu_out[i * width]), &(cu_in[i*width]), f, width);
}
__global__ void cu_add_dxq(double *cu_x2, double *cu_q2, double *cu_samples_x, double *cu_samples_q, double *cu_dxq, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_x2[3*i] = cu_samples_x[3*i] + cu_dxq[7*i]; cu_x2[3*i + 1] = cu_samples_x[3*i + 1] + cu_dxq[7*i + 1]; cu_x2[3*i + 2] = cu_samples_x[3*i + 2] + cu_dxq[7*i + 2];
cu_q2[4*i] = cu_samples_q[4*i] + cu_dxq[7*i + 3]; cu_q2[4*i + 1] = cu_samples_q[4*i + 1] + cu_dxq[7*i + 4];
cu_q2[4*i + 2] = cu_samples_q[4*i + 2] + cu_dxq[7*i + 5]; cu_q2[4*i + 3] = cu_samples_q[4*i + 3] + cu_dxq[7*i + 6];
}
__global__ void cu_update_best(double *cu_best_score, double *cu_best_x, double *cu_best_q, double *cu_best_step, double *cu_scores, double *cu_x2, double *cu_q2, double *cu_step, double step, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
if (cu_best_score[i] < cu_scores[i]) {
cu_best_score[i] = cu_scores[i];
cu_best_x[3*i] = cu_x2[3*i]; cu_best_x[3*i+1] = cu_x2[3*i+1]; cu_best_x[3*i+2] = cu_x2[3*i+2];
cu_best_q[4*i] = cu_q2[4*i]; cu_best_q[4*i+1] = cu_q2[4*i+1]; cu_best_q[4*i+2] = cu_q2[4*i+2]; cu_best_q[4*i+3] = cu_q2[4*i+3];
cu_best_step[i] = step * cu_step[i];
}
}
void align_models_gradient(scope_sample_t *samples, int num_samples, cu_model_data_t *cu_model, cu_obs_data_t *cu_obs, scope_params_t *cu_params, scope_params_t *params,
int num_points, int model_points, int round) {
if (num_samples == 0) {
printf("Align models, no samples!\n");
}
//TODO(sanja): These dim3s need some serious reorganizing/renaming
int num_surface_points = (num_points > 0 ? num_points : model_points);
dim3 threads_per_block(256, 1, 1);
dim3 block_size(ceil(1.0 * num_surface_points / threads_per_block.x), num_samples);
dim3 thread_size_small(64);
dim3 block_size_small(ceil(1.0 * num_samples/thread_size_small.x));
dim3 thread_size_sum(256);
dim3 block_size_sum(1, num_samples);
dim3 thread_size_sum_small(64);
//TODO: make these params
int max_iter = 20;
// unpack args
double *cu_samples_x;
cu_malloc(&cu_samples_x, num_samples * 3 * sizeof(double), "samples_x");
double *cu_samples_q;
cu_malloc(&cu_samples_q, num_samples * 4 * sizeof(double), "samples_y");
unpack_x_q(cu_samples_x, cu_samples_q, samples, num_samples);
int n_edge = cu_model->max_num_edges;
int *cu_n;
cu_malloc(&cu_n, num_samples * sizeof(int), "n");
dim3 block_size_n_edge(ceil(1.0 * n_edge / thread_size_sum.x), num_samples);
double *cu_P;
cu_malloc(&cu_P, num_samples * n_edge * 3*sizeof(double), "cu_P");
int *cu_vi;
cu_malloc(&cu_vi, num_samples * sizeof(int), "vi");
get_range_edge_points(cu_n, cu_P, num_samples, n_edge, cu_n, num_points, cu_model, cu_vi, 1, cu_samples_x, cu_samples_q, block_size_n_edge, thread_size_sum, block_size_small, thread_size_small);
double *cu_P2;
cu_malloc(&cu_P2, num_samples * n_edge * 3*sizeof(double), "cu_P2");
// get model surface points
int *cu_idx;
cu_malloc(&cu_idx, num_samples * num_surface_points * sizeof(int), "idxs");
get_validation_points(cu_idx, model_points, num_surface_points, num_samples, block_size, threads_per_block);
double *cu_cloud, *cu_cloud2, *cu_normals, *cu_normals2;
cu_malloc(&cu_cloud, num_samples * num_surface_points * 3 * sizeof(double), "cloud");
cu_malloc(&cu_cloud2, num_samples * num_surface_points * 3 * sizeof(double), "cloud2");
cu_malloc(&cu_normals, num_samples * num_surface_points * 3 * sizeof(double), "normals");
cu_malloc(&cu_normals2, num_samples * num_surface_points * 3 * sizeof(double), "normals2");
hipLaunchKernelGGL(( cu_reorder_rows), dim3(block_size), dim3(threads_per_block), 0, 0, cu_cloud, cu_model->points, cu_idx, num_samples, num_surface_points, 3, NULL);
hipLaunchKernelGGL(( cu_reorder_rows), dim3(block_size), dim3(threads_per_block), 0, 0, cu_normals, cu_model->normals, cu_idx, num_samples, num_surface_points, 3, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "reorder rows!\n" );
scope_noise_model_t *cu_noise_models;
cu_malloc(&cu_noise_models, num_samples * num_surface_points * sizeof(scope_noise_model_t), "noise_models");
double *cu_G_edge, *cu_G_xyzn, *cu_G;
cu_malloc(&cu_G_edge, 7*num_samples * sizeof(double), "G_edge");
cu_malloc(&cu_G_xyzn, 7*num_samples * sizeof(double), "G_xyzn");
cu_malloc(&cu_G, 7*num_samples * sizeof(double), "G");
double *cu_edge_gradient_score;
cu_malloc(&cu_edge_gradient_score, num_samples * sizeof(double), "edge_gradient_score");
double *cu_xyzn_gradient_score;
cu_malloc(&cu_xyzn_gradient_score, num_samples * sizeof(double), "edge_gradient_score");
int *cu_xi;
cu_malloc(&cu_xi, num_samples*num_surface_points * sizeof(int), "xi");
int *cu_yi;
cu_malloc(&cu_yi, num_samples*num_surface_points * sizeof(int), "yi");
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_samples * num_surface_points * sizeof(double), "vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_samples * num_surface_points * sizeof(double), "vis_pmf");
double *cu_edge_scores, *cu_xyz_scores, *cu_normal_scores, *cu_scores;
cu_malloc(&cu_edge_scores, num_samples * sizeof(double), "edge_scores");
cu_malloc(&cu_xyz_scores, num_samples * sizeof(double), "xyz_scores");
cu_malloc(&cu_normal_scores, num_samples * sizeof(double), "normal_scores");
cu_malloc(&cu_scores, num_samples * sizeof(double), "scores");
double *cu_x2, *cu_q2, *cu_dxq;
cu_malloc(&cu_x2, num_samples * 3 * sizeof(double), "x2");
cu_malloc(&cu_q2, num_samples * 4 * sizeof(double), "q2");
cu_malloc(&cu_dxq, num_samples * 7 * sizeof(double), "dxq");
double step = .01; // step size in gradient ascent
double step_mult[3] = {.6, 1, 1.6};
double *cu_step;
cu_malloc(&cu_step, num_samples * sizeof(double), "step");
hipLaunchKernelGGL(( cu_init_arr), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_step, step, num_samples);
double *cu_best_score, *cu_best_step;
cu_malloc(&cu_best_score, num_samples * sizeof(double), "best_score");
cu_malloc(&cu_best_step, num_samples * sizeof(double), "best_step");
double *cu_best_x, *cu_best_q;
cu_malloc(&cu_best_x, num_samples * 3 * sizeof(double), "best_x");
cu_malloc(&cu_best_q, num_samples * 4 * sizeof(double), "best_q");
int j, iter;
double init_val = -10000000.0;
for (iter = 0; iter < max_iter; iter++) {
hipLaunchKernelGGL(( cu_transform_cloud), dim3(block_size), dim3(threads_per_block), 0, 0, cu_cloud2, cu_cloud, cu_samples_x, cu_samples_q, num_samples, num_surface_points, NULL);
hipLaunchKernelGGL(( cu_transform_cloud), dim3(block_size), dim3(threads_per_block), 0, 0, cu_normals2, cu_normals, NULL, cu_samples_q, num_samples, num_surface_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "transform cloud!\n" );
hipLaunchKernelGGL(( cu_get_viewpoints), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_vi, num_samples, cu_samples_x, cu_samples_q, cu_model->range_edges_model_views);
hipLaunchKernelGGL(( cu_get_noise_models), dim3(block_size), dim3(threads_per_block), 0, 0, cu_noise_models, cu_cloud2, cu_normals2, cu_idx, cu_vi, cu_model->ved, cu_model->normalvar, num_samples, num_surface_points);
if ( hipSuccess != hipGetLastError() )
printf( "get noise models!\n" );
hipMemset(cu_G_edge, 0, 7*num_samples * sizeof(double));
hipMemset(cu_G_xyzn, 0, 7*num_samples * sizeof(double));
edge_score_gradient(cu_edge_gradient_score, cu_G_edge, cu_samples_x, cu_samples_q, cu_P2, cu_P, num_samples, n_edge, cu_n, cu_obs->range_image, cu_obs->range_image_data, cu_obs->edge_image,
cu_params, params, round, block_size_n_edge, thread_size_sum, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
xyzn_score_gradient(cu_xyzn_gradient_score, cu_G_xyzn, cu_samples_x, cu_samples_q, cu_cloud2, cu_cloud, cu_normals2, cu_normals, 0, cu_noise_models, cu_obs->range_image_data, cu_obs->range_image,
cu_obs->range_image_cnt, cu_obs->range_image_points, cu_obs->range_image_normals, num_samples, num_surface_points, cu_params, params, round,
block_size, threads_per_block, block_size_small, thread_size_small, block_size_sum, thread_size_sum);
hipLaunchKernelGGL(( cu_matrix_add), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_G, cu_G_edge, cu_G_xyzn, num_samples, 7);
hipLaunchKernelGGL(( cu_init_arr), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_best_score, init_val, num_samples);
if ( hipSuccess != hipGetLastError() )
printf( "init arr!\n" );
hipMemset(cu_best_step, 0, num_samples * sizeof(double));
for (j = 0; j < 3; ++j) {
// take a step in the direction of the gradient
hipLaunchKernelGGL(( cu_normalize_matrix_rows), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_G, cu_G, num_samples, 7);
hipLaunchKernelGGL(( cu_mult_matrix_rows), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_dxq, cu_G, step_mult[j], cu_step, num_samples, 7);
hipLaunchKernelGGL(( cu_add_dxq), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_x2, cu_q2, cu_samples_x, cu_samples_q, cu_dxq, num_samples);
hipLaunchKernelGGL(( cu_normalize_matrix_rows), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_q2, cu_q2, num_samples, 4);
if ( hipSuccess != hipGetLastError() )
printf( "prep stuff for step direction!\n" );
hipLaunchKernelGGL(( cu_transform_cloud), dim3(block_size), dim3(threads_per_block), 0, 0, cu_cloud2, cu_cloud, cu_x2, cu_q2, num_samples, num_surface_points, NULL);
hipLaunchKernelGGL(( cu_transform_cloud), dim3(block_size), dim3(threads_per_block), 0, 0, cu_normals2, cu_normals, NULL, cu_q2, num_samples, num_surface_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "transform clouds!\n" );
hipLaunchKernelGGL(( cu_populate_xi_yi), dim3(block_size), dim3(threads_per_block), 0, 0, cu_xi, cu_yi, cu_cloud2, cu_obs->range_image_data, num_samples, num_surface_points, NULL);
if ( hipSuccess != hipGetLastError() )
printf( "populate xi yi!\n" );
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_cloud2, cu_normals2, cu_xi, cu_yi, cu_obs->range_image, cu_obs->range_image_data, 0, num_samples, num_surface_points, NULL, params,
block_size, threads_per_block, block_size_sum, thread_size_sum, 0);
// transform edge points
hipLaunchKernelGGL(( cu_transform_cloud), dim3(block_size_n_edge), dim3(thread_size_sum), 0, 0, cu_P2, cu_P, cu_x2, cu_q2, num_samples, n_edge, cu_n);
if ( hipSuccess != hipGetLastError() )
printf( "transform edge!\n" );
// evaluate the score
compute_edge_score(cu_edge_scores, NULL, cu_P2, cu_obs->range_image_data, cu_obs->range_image, num_samples, n_edge, cu_n, cu_obs->edge_image, cu_model->score_comp_models->b_edge,
cu_model->score_comp_models->b_edge_occ, params, cu_params, round, block_size_n_edge, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
compute_xyz_score(cu_xyz_scores, NULL, cu_cloud2, cu_xi, cu_yi, cu_vis_pmf, cu_noise_models, cu_obs->range_image, cu_obs->range_image_data, cu_obs->range_image_cnt, num_samples, num_surface_points,
cu_params, round, cu_model->score_comp_models->b_xyz, block_size, threads_per_block, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
compute_normal_score(cu_normal_scores, NULL, cu_normals2, cu_vis_pmf, cu_noise_models, num_samples, num_surface_points, cu_xi, cu_yi, cu_obs->range_image_cnt, cu_obs->range_image_normals,
cu_model->score_comp_models->b_normal, cu_params, round, block_size, threads_per_block, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
hipLaunchKernelGGL(( cu_add_3_scores), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_scores, cu_xyz_scores, cu_normal_scores, cu_edge_scores, num_samples);
if ( hipSuccess != hipGetLastError() )
printf( "add 3 scores!\n" );
hipLaunchKernelGGL(( cu_update_best), dim3(block_size_small), dim3(thread_size_small), 0, 0, cu_best_score, cu_best_x, cu_best_q, cu_best_step, cu_scores, cu_x2, cu_q2, cu_step, step_mult[j], num_samples);
if ( hipSuccess != hipGetLastError() )
printf( "update best!\n" );
}
hipMemcpy(cu_samples_x, cu_best_x, 3*num_samples*sizeof(double), hipMemcpyDeviceToDevice);
hipMemcpy(cu_samples_q, cu_best_q, 4*num_samples*sizeof(double), hipMemcpyDeviceToDevice);
hipMemcpy(cu_step, cu_best_step, num_samples * sizeof(double), hipMemcpyDeviceToDevice); // NOTE(sanja): According to StackOverflow, might be expensive
}
double **samples_x = new_matrix2(num_samples, 3);
double **samples_q = new_matrix2(num_samples, 4);
hipMemcpy(samples_x[0], cu_samples_x, 3 * num_samples * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(samples_q[0], cu_samples_q, 4 * num_samples * sizeof(double), hipMemcpyDeviceToHost);
int i;
for (i = 0; i < num_samples; ++i) {
memcpy(samples[i].x, samples_x[i], 3 * sizeof(double));
}
for (i = 0; i < num_samples; ++i) {
memcpy(samples[i].q, samples_q[i], 4 * sizeof(double));
}
free_matrix2(samples_x);
free_matrix2(samples_q);
cu_free(cu_samples_x, "x free");
cu_free(cu_samples_q, "q free");
cu_free(cu_n, "n free");
cu_free(cu_P, "P free");
cu_free(cu_vi, "vi free");
cu_free(cu_P2, "P2 free");
cu_free(cu_idx, "idx free");
cu_free(cu_cloud, "cloud free");
cu_free(cu_cloud2, "cloud2 free");
cu_free(cu_normals, "normals free");
cu_free(cu_normals2, "normals2 free");
cu_free(cu_G_edge, "G_edge free");
cu_free(cu_G_xyzn, "G_xyzn free");
cu_free(cu_G, "G free");
cu_free(cu_edge_gradient_score, "edge_gradient_score");
cu_free(cu_xyzn_gradient_score, "edge_gradient_score");
cu_free(cu_xi, "xi");
cu_free(cu_yi, "yi");
cu_free(cu_vis_prob, "vis_prob");
cu_free(cu_vis_prob_sums, "vis_prob_sums");
cu_free(cu_vis_pmf, "vis_pmf");
cu_free(cu_edge_scores, "edge_scores");
cu_free(cu_xyz_scores, "xyz_scores");
cu_free(cu_normal_scores, "normal_scores");
cu_free(cu_scores, "scores");
cu_free(cu_best_score, "best_score");
cu_free(cu_best_step, "best_step");
cu_free(cu_best_x, "best_x");
cu_free(cu_best_q, "best_q");
cu_free(cu_step, "step");
hipDeviceSynchronize();
}
__global__ void cu_knn(float *nn_d2, int *nn_idx, double *ref, double *query, int ref_n, int query_n, int d, int k) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= query_n || j >= ref_n)
return;
__shared__ float shared_d2[THREADS_KNN * KNN_SIZE];
__shared__ int shared_idx[THREADS_KNN * KNN_SIZE];
__shared__ float tmp_d2[THREADS_KNN * KNN_SIZE / 2];
__shared__ int tmp_idx[THREADS_KNN * KNN_SIZE / 2];
for (int k = 0; k < KNN_SIZE; ++k) {
shared_d2[j * KNN_SIZE + k] = 1000000.0;
shared_idx[j * KNN_SIZE + k] = -1;
}
// Initialize the arrays
float dist;
int last = 0;
for (int kk = j; kk < ref_n; kk += blockDim.x) {
dist = 0.0;
for (int l = 0; l < d; ++l)
dist += (ref[d * kk + l] - query[d * i + l]) * (ref[d * kk + l] - query[d * i + l]);
if (last == KNN_SIZE && dist >= shared_d2[j * KNN_SIZE + last-1])
continue;
if (last < KNN_SIZE)
++last;
if (last < KNN_SIZE || (last == KNN_SIZE && shared_d2[j * KNN_SIZE + last-1] > dist)) {
shared_d2[j * KNN_SIZE + last-1] = dist;
shared_idx[j * KNN_SIZE + last-1] = kk;
}
for (int l = last-1; l > 0; --l) {
if (shared_d2[j * KNN_SIZE +l] < shared_d2[j * KNN_SIZE +l-1]) {
float tmp_d2 = shared_d2[j * KNN_SIZE + l]; shared_d2[j * KNN_SIZE + l] = shared_d2[j * KNN_SIZE + l-1]; shared_d2[j * KNN_SIZE + l-1] = tmp_d2;
int tmp_idx = shared_idx[j * KNN_SIZE + l]; shared_idx[j * KNN_SIZE + l] = shared_idx[j * KNN_SIZE + l-1]; shared_idx[j * KNN_SIZE + l-1] = tmp_idx;
}
}
}
__syncthreads();
// Merge partial queues
for (int num_threads = blockDim.x / 2; num_threads > 0; num_threads >>= 1) {
// Merge two partial queues
if (j < num_threads) {
int a = 0, b = 0;
int other = j + num_threads;
int c = 0;
while (a < KNN_SIZE && b < KNN_SIZE && c < KNN_SIZE) {
if (shared_d2[j * KNN_SIZE + a] < shared_d2[other * KNN_SIZE + b]) {
tmp_d2[j * KNN_SIZE + c] = shared_d2[j * KNN_SIZE + a];
tmp_idx[j * KNN_SIZE + c++] = shared_idx[j * KNN_SIZE + a++];
} else {
tmp_d2[j * KNN_SIZE + c] = shared_d2[other * KNN_SIZE + b];
tmp_idx[j * KNN_SIZE + c++] = shared_idx[other * KNN_SIZE + b++];
}
}
// Because we only have KNN_SIZE things in tmp, once one array runs out, we are done.
// Copy the final list into the correct shared memory location
for (a = 0; a < KNN_SIZE; ++a) {
shared_d2[j * KNN_SIZE + a] = tmp_d2[j * KNN_SIZE + a];
shared_idx[j * KNN_SIZE + a] = tmp_idx[j * KNN_SIZE + a];
}
}
__syncthreads();
}
// Copy data from shared memory into the global memory
if (j < KNN_SIZE) {
nn_d2[i * KNN_SIZE + j] = shared_d2[0 * KNN_SIZE + j];
nn_idx[i * KNN_SIZE + j] = shared_idx[0 * KNN_SIZE + j];
}
}
void knn(float *nn_d2, int *nn_idx, double *reference, double *query, int ref_n, int d, int k, int start, int batch_size) {
dim3 block_size(1, batch_size, 1);
dim3 threads_per_block(THREADS_KNN, 1, 1);
double *cu_ref;
double *cu_query;
cu_malloc(&cu_ref, ref_n * d * sizeof(double), "ref");
cu_malloc(&cu_query, batch_size * d * sizeof(double), "query");
hipMemcpy(cu_ref, reference, ref_n * d * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(cu_query, query, batch_size * d * sizeof(double), hipMemcpyHostToDevice);
float *cu_nn_d2;
int *cu_nn_idx;
cu_malloc(&cu_nn_d2, batch_size * k * sizeof(float), "nn_d2");
cu_malloc(&cu_nn_idx, batch_size * k * sizeof(int), "nn_idx");
double t_tmp = get_time_ms();
hipLaunchKernelGGL(( cu_knn), dim3(block_size), dim3(threads_per_block), 0, 0, cu_nn_d2, cu_nn_idx, cu_ref, cu_query, ref_n, batch_size, d, k);
if ( hipSuccess != hipGetLastError() )
printf( "knn!\n" );
hipMemcpy(nn_d2, cu_nn_d2, batch_size * k * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(nn_idx, cu_nn_idx, batch_size * k * sizeof(int), hipMemcpyDeviceToHost);
cu_free(cu_ref, "ref");
cu_free(cu_query, "query");
cu_free(cu_nn_d2, "nn_d2");
cu_free(cu_nn_idx, "nn_idx");
}
void testAdd3d() {
double *A, *B;
int side = 100;
int total = side*side*side;
safe_malloc(A, total, double);
safe_malloc(B, side*side, double);
for (int i = 0; i < total; ++i) {
A[i] = 1;
}
double *cu_A;
double *cu_B;
cu_malloc(&cu_A, total * sizeof(double), "a");
hipMemcpy(cu_A, A, total * sizeof(double), hipMemcpyHostToDevice);
cu_malloc(&cu_B, side*side * sizeof(double), "a");
dim3 block(1, side, 1);
dim3 thread(side/2, 1, 1);
hipLaunchKernelGGL(( cu_add_matrix_3d_medium), dim3(block), dim3(thread), 0, 0, cu_B, cu_A, side, side, NULL, side);
//cu_add_matrix_3d_slow<<<side, 1>>>(cu_B, cu_A, side, side, NULL, side);
hipMemcpy(B, cu_B, side*side*sizeof(double), hipMemcpyDeviceToHost);
for (int i = 0; i < side; ++i) {
for (int j = 0; j < side; ++j) {
printf("%lf ", B[i * side + j]);
}
printf("\n\n");
}
}
void sample_all_first_fpfh_correspondences(scope_sample_t *samples, int *num_samples_init, int num_samples, scope_model_data_t *model_data, scope_obs_data_t *obs_data, scope_params_t *params) {
extern double knn_t; // dbug
knn_t = 0.0;
extern int knn_calls;
knn_calls = 0;
int batch_size = 200;
params->knn = KNN_SIZE;
int nn_idx[params->knn * batch_size];
float nn_d2[params->knn * batch_size];
int i;
double t0 = get_time_ms();
// Create random permutation of points to avoid continuous sampling
int idx[obs_data->fpfh_obs->num_points];
for (i = 0; i < obs_data->fpfh_obs->num_points; i++) {
idx[i] = i;
}
randperm(idx, obs_data->fpfh_obs->num_points, obs_data->fpfh_obs->num_points);
int picked = 0;
int start = 0;
int width = obs_data->fpfh_obs->fpfh_length;
while (picked < num_samples && start < obs_data->fpfh_obs->num_points) {
if (start + batch_size > obs_data->fpfh_obs->num_points) {
batch_size = obs_data->fpfh_obs->num_points - start;
}
double query_pts[batch_size * width];
for (i = 0; i < batch_size; ++i) {
memcpy(&query_pts[i * width], obs_data->fpfh_obs->fpfh[idx[i + start]], width * sizeof(double));
}
t0 = get_time_ms();
knn(nn_d2, nn_idx, model_data->fpfh_model->fpfh[0], query_pts, model_data->fpfh_model->num_points, width, params->knn, start, batch_size);
hipDeviceSynchronize();
knn_t += get_time_ms() - t0;
knn_calls += batch_size;
for (i = 0; i < batch_size && picked < num_samples; ++i) {
if (nn_d2[i * params->knn] < params->f_sigma * params->f_sigma) {
int c_obs = idx[i + start];
double p[params->knn];
int j;
for (j = 0; j < params->knn; j++)
p[j] = exp(-.5*nn_d2[i * params->knn + j] / (params->f_sigma * params->f_sigma));
normalize_pmf(p, p, params->knn);
j = pmfrand(p, params->knn);
int c_model = nn_idx[i * params->knn + j];
samples[picked].c_obs[0] = c_obs;
samples[picked].c_model[0] = c_model;
samples[picked].c_type[0] = C_TYPE_FPFH;
samples[picked].nc = 1;
// compute correspondence score
samples[picked].c_score[0] = log(normpdf(sqrt(nn_d2[i * params->knn + j]), 0, params->f_sigma));
++picked;
}
}
start += batch_size;
}
*num_samples_init = picked; // In case we terminated early because we ran out of good points to sample
}
| 60b97143cd66bc4d4a4a823320e7fa3392c33f0d.cu | #include "cuda.h"
#include "include/bingham/cuda_wrapper.h"
#include "curand.h"
#include "bingham/olf.h"
#include <math.h>
#define MAX(x,y) ((x) > (y) ? (x) : (y))
#define MIN(x,y) ((x) < (y) ? (x) : (y))
#define cu_malloc(x, sz, msg) do{ if (cudaMalloc(x, sz) != cudaSuccess) printf(msg); } while (0)
#define cu_free(x, msg) do{ if (cudaFree(x) != cudaSuccess) printf(msg); } while (0)
curandGenerator_t gen;
//#define CUDA_LAUNCH_BLOCKING 1
__device__ __constant__ int big_primes[100] = {996311, 163573, 481123, 187219, 963323, 103769, 786979, 826363, 874891, 168991, 442501, 318679, 810377, 471073, 914519, 251059, 321983, 220009, 211877, 875339, 605603, 578483, 219619, 860089, 644911, 398819, 544927, 444043, 161717, 301447, 201329, 252731, 301463, 458207, 140053, 906713, 946487, 524389, 522857, 387151, 904283, 415213, 191047, 791543, 433337, 302989, 445853, 178859, 208499, 943589, 957331, 601291, 148439, 296801, 400657, 829637, 112337, 134707, 240047, 669667, 746287, 668243, 488329, 575611, 350219, 758449, 257053, 704287, 252283, 414539, 647771, 791201, 166031, 931313, 787021, 520529, 474667, 484361, 358907, 540271, 542251, 825829, 804709, 664843, 423347, 820367, 562577, 398347, 940349, 880603, 578267, 644783, 611833, 273001, 354329, 506101, 292837, 851017, 262103, 288989};
__device__ __constant__ double b_SR[3] = {0.2878, -5.6214, 7.7247};
__device__ __constant__ double b_SN[3] = {0.1521, -7.1290, 10.7090};
__device__ __constant__ double b_SL[3] = {0.2238, -5.1827, 6.8242};
__device__ __constant__ double b_SA[3] = {0.1618, -6.3992, 8.0207};
__device__ __constant__ double b_SB[3] = {0.2313, -6.3463, 8.0651};
__device__ __constant__ double b_ER[3] = {0.3036, 0.2607, -125.8843};
__device__ __constant__ double b_EN[3] = {0.1246, 1.4406, -185.8350};
__device__ __constant__ double b_EL[3] = {0.2461, 0.2624, -140.0192};
__device__ __constant__ double b_EA[3] = {0.1494, 0.2114, -139.4324};
__device__ __constant__ double b_EB[3] = {0.2165, 0.2600, -135.5203};
__device__ __constant__ double round1_dthresh = .05; //TODO: make this a param
__device__ inline double cu_sigmoid(double x, const double *b)
{
return b[0] + (1 - b[0]) / (1 + exp(-b[1]-b[2]*x));
}
__device__ inline double cu_logistic(double x, double *b)
{
return 1.0 / (1.0 + exp(-x*b[1]-b[0]));
}
/*int gpu_xi[10000000];
int gpu_yi[10000000];
double gpu_cloud[100000000];*/
#define THREADS_KNN 128 // Constant so we can allocate shared memory easier
#define KNN_SIZE 30
const int num_components = 15;
__device__ __constant__ int cu_num_components = num_components;
__device__ __constant__ int xyz_idx = 0, normal_idx = 1, vis_idx = 2, random_walk_idx = 3, edge_idx = 4, edge_vis_idx = 5, edge_occ_idx = 6, L_idx = 7, A_idx = 8, B_idx = 9, fpfh_idx = 10,
specularity_idx = 11, segment_affinity_idx = 12, segment_idx = 13, table_idx = 14;
void copy_double_matrix_to_gpu(cu_double_matrix_t *dev_dest, double **host_src, int n, int m) {
dev_dest->n = n;
dev_dest->m = m;
if (cudaMalloc(&(dev_dest->ptr), m*n*sizeof(double)) != cudaSuccess) {
printf("double 2d malloc\n");
}
if (cudaMemcpy(dev_dest->ptr, host_src[0], n * m * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
printf("double 2d copy\n");
}
}
void copy_int_matrix_to_gpu(cu_int_matrix_t *dev_dest, int **host_src, int n, int m) {
dev_dest->n = n;
dev_dest->m = m;
if (cudaMalloc(&(dev_dest->ptr), m*n*sizeof(int)) != cudaSuccess) {
printf("int 2d malloc \n");
}
if (cudaMemcpy(dev_dest->ptr, host_src[0], n * m * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) {
printf("int 2d copy\n");
}
}
void copy_double_matrix3d_to_gpu(cu_double_matrix3d_t *dev_dest, double ***host_src, int n, int m, int p) {
dev_dest->n = n; dev_dest->m = m; dev_dest->p = p;
if (cudaMalloc(&(dev_dest->ptr), n * m * p * sizeof(double)) != cudaSuccess) {
printf("3d malloc\n");
}
if (cudaMemcpy(dev_dest->ptr, host_src[0][0], n * m * p * sizeof(double), cudaMemcpyHostToDevice)) {
printf("3d copy\n");
}
}
void copy_double_arr_to_gpu(cu_double_arr_t *dev_dest, double *host_src, int n) {
dev_dest->n = n;
if (cudaMalloc(&(dev_dest->ptr), n * sizeof(double)) != cudaSuccess) {
printf("double arr malloc\n");
}
if (cudaMemcpy(dev_dest->ptr, host_src, n * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
printf("double arr copy\n");
}
}
void copy_int_arr_to_gpu(cu_int_arr_t *dev_dest, int *host_src, int n) {
dev_dest->n = n;
if (cudaMalloc(&(dev_dest->ptr), n * sizeof(int)) != cudaSuccess) {
printf("int arr malloc\n");
}
if (cudaMemcpy(dev_dest->ptr, host_src, n * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) {
printf("int arr copy\n");
}
}
__device__ void cu_quaternion_to_rotation_matrix(double R[][3], double q[]) {
double a = q[0];
double b = q[1];
double c = q[2];
double d = q[3];
R[0][0] = a*a + b*b - c*c - d*d;
R[0][1] = 2*b*c - 2*a*d;
R[0][2] = 2*b*d + 2*a*c;
R[1][0] = 2*b*c + 2*a*d;
R[1][1] = a*a - b*b + c*c - d*d;
R[1][2] = 2*c*d - 2*a*b;
R[2][0] = 2*b*d - 2*a*c;
R[2][1] = 2*c*d + 2*a*b;
R[2][2] = a*a - b*b - c*c + d*d;
}
__device__ double cu_dot(double x[], double y[], int n) {
int i;
double z = 0.0;
for (i = 0; i < n; i++)
z += x[i]*y[i];
return z;
}
__device__ void cu_matrix_vec_mult_3(double *y, double A[][3], double *x, int n) {
int i;
if (y == x) { // dbug
printf("**************FIX CU_MATRIX_VEC_MULT CALL!\n");
}
for (i = 0; i < n; i++)
y[i] = cu_dot(A[i], x, 3);
}
// matrix multiplication, Z = X*Y, where X is n-by-p and Y is p-by-m
__device__ void cu_matrix_mult_2_3_4(double Z[][4], double X[][3], double Y[][4])
{
int i, j, k;
for (i = 0; i < 2; i++) { // row i
for (j = 0; j < 4; j++) { // column j
Z[i][j] = 0;
for (k = 0; k < 3; k++)
Z[i][j] += X[i][k]*Y[k][j];
}
}
}
__device__ void cu_vec_matrix_mult_7(double y[], double x[], double A[][7], int n)
{
int i, j;
if (y == x) {
printf("****************FIX vec_matrix_mult call!\n");
}
else {
for (j = 0; j < 7; j++) {
y[j] = 0;
for (i = 0; i < n; i++)
y[j] += x[i]*A[i][j];
}
}
}
__device__ void cu_vec_matrix_mult_4(double y[], double x[], double A[][4], int n)
{
int i, j;
if (y == x) {
printf("****************FIX vec_matrix_mult call!\n");
}
else {
for (j = 0; j < 4; j++) {
y[j] = 0;
for (i = 0; i < n; i++)
y[j] += x[i]*A[i][j];
}
}
}
// adds two vectors, z = x+y
__device__ void cu_add(double z[], double x[], double y[], int n) {
int i;
for (i = 0; i < n; i++)
z[i] = x[i] + y[i];
}
__device__ double cu_norm(double x[], int n) {
double d = 0.0;
int i;
for (i = 0; i < n; i++)
d += x[i]*x[i];
return sqrt(d);
}
__device__ void cu_normalize(double y[], double x[], int n) {
double d = cu_norm(x, n);
int i;
for (i = 0; i < n; i++)
y[i] = x[i]/d;
}
// compute the pdf of a normal random variable
__device__ double cu_normpdf(double x, double mu, double sigma) {
double dx = x - mu;
return exp(-dx*dx / (2*sigma*sigma)) / (sqrt(2*M_PI) * sigma);
}
// invert a quaternion
__device__ void cu_quaternion_inverse(double q_inv[4], double *q) {
q_inv[0] = q[0];
q_inv[1] = -q[1];
q_inv[2] = -q[2];
q_inv[3] = -q[3];
}
// multiplies a vector by a scalar, y = c*x
__device__ void cu_mult(double y[], double x[], double c, int n)
{
int i;
for (i = 0; i < n; i++)
y[i] = c*x[i];
}
__device__ double cu_dist(double *x, double *y, int n) {
double d = 0.0;
int i;
for (i = 0; i < n; i++)
d += (x[i]-y[i])*(x[i]-y[i]);
return sqrt(d);
}
void cu_init() {
CUresult err = cuInit(0);
//if (err != 0)
printf("Init error: %d\n", err);
}
void cu_init_model(scope_model_data_t *model_data, cu_model_data_t *cu_model) {
// Allocate all the memory
copy_double_matrix_to_gpu(&(cu_model->points), model_data->pcd_model->points, model_data->pcd_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->normals), model_data->pcd_model->normals, model_data->pcd_model->num_points, 3);
copy_double_arr_to_gpu(&(cu_model->normalvar), model_data->pcd_model->normalvar, model_data->pcd_model->num_points);
copy_double_matrix_to_gpu(&(cu_model->lab), model_data->pcd_model->lab, model_data->pcd_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->ved), model_data->pcd_model->ved, model_data->pcd_model->num_points, 66);
/*copy_double_matrix_to_gpu(&(cu_model->color_avg_cov), model_data->color_model->avg_cov, 3, 3);
copy_int_arr_to_gpu(&(cu_model->color_cnts1), model_data->color_model->cnts[0], model_data->color_model->num_points);
copy_int_arr_to_gpu(&(cu_model->color_cnts2), model_data->color_model->cnts[1], model_data->color_model->num_points);
copy_double_matrix_to_gpu(&(cu_model->color_means1), model_data->color_model->means[0], model_data->color_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->color_means2), model_data->color_model->means[1], model_data->color_model->num_points, 3);
copy_double_matrix3d_to_gpu(&(cu_model->color_cov1), model_data->color_model->covs[0], model_data->color_model->num_points, 3, 3);
copy_double_matrix3d_to_gpu(&(cu_model->color_cov2), model_data->color_model->covs[1], model_data->color_model->num_points, 3, 3);*/
copy_double_matrix_to_gpu(&(cu_model->fpfh_points), model_data->fpfh_model->points, model_data->fpfh_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->fpfh_normals), model_data->fpfh_model->normals, model_data->fpfh_model->num_points, 3);
copy_double_matrix_to_gpu(&(cu_model->fpfh), model_data->fpfh_model->fpfh, model_data->fpfh_model->num_points, model_data->fpfh_model->fpfh_length);
copy_double_matrix_to_gpu(&(cu_model->range_edges_model_views), model_data->range_edges_model->views, model_data->range_edges_model->num_views, 3);
copy_int_arr_to_gpu(&(cu_model->range_edges_view_idx), model_data->range_edges_model->view_idx, model_data->range_edges_model->num_views);
copy_int_arr_to_gpu(&(cu_model->range_edges_view_cnt), model_data->range_edges_model->view_cnt, model_data->range_edges_model->num_views);
copy_double_matrix_to_gpu(&(cu_model->range_edges_points), model_data->range_edges_model->pcd->points, model_data->range_edges_model->pcd->num_points, 3);
cudaMalloc(&(cu_model->score_comp_models), sizeof(score_comp_models_t));
cudaMemcpy(cu_model->score_comp_models, model_data->score_comp_models, sizeof(score_comp_models_t), cudaMemcpyHostToDevice);
//memcpy(&cu_model->score_comp_models, model_data->score_comp_models, sizeof(score_comp_models_t));
cu_model->num_points = model_data->pcd_model->num_points;
cu_model->num_views = model_data->range_edges_model->num_views;
int n_edge = arr_max_i(model_data->range_edges_model->view_cnt, model_data->range_edges_model->num_views);
cu_model->max_num_edges = n_edge;
}
void cu_init_all_models(scope_model_data_t model_data[], int num_models, cu_model_data_t cu_model[]) {
for (int i = 0; i < num_models; ++i) {
cu_init_model(&model_data[i], &cu_model[i]);
}
}
void cu_init_obs(scope_obs_data_t *obs_data, cu_obs_data_t *cu_obs, scope_params_t *params) {
copy_double_matrix_to_gpu(&(cu_obs->range_image), obs_data->obs_range_image->image, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_int_matrix_to_gpu(&(cu_obs->range_image_cnt), obs_data->obs_range_image->cnt, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_double_matrix3d_to_gpu(&(cu_obs->range_image_points), obs_data->obs_range_image->points, obs_data->obs_range_image->w, obs_data->obs_range_image->h, 3);
copy_double_matrix3d_to_gpu(&(cu_obs->range_image_normals), obs_data->obs_range_image->normals, obs_data->obs_range_image->w, obs_data->obs_range_image->h, 3);
if (params->use_colors)
copy_double_matrix3d_to_gpu(&(cu_obs->obs_lab_image), obs_data->obs_lab_image, 3, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_int_matrix_to_gpu(&(cu_obs->range_image_idx), obs_data->obs_range_image->idx, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_double_matrix_to_gpu(&(cu_obs->range_image_pcd_obs_lab), obs_data->pcd_obs->lab, obs_data->pcd_obs->num_points, 3);
copy_double_matrix_to_gpu(&(cu_obs->fpfh_obs), obs_data->fpfh_obs->fpfh, obs_data->fpfh_obs->num_points, obs_data->fpfh_obs->fpfh_length);
copy_double_matrix_to_gpu(&(cu_obs->edge_image), obs_data->obs_edge_image, obs_data->obs_range_image->w, obs_data->obs_range_image->h);
copy_double_matrix_to_gpu(&(cu_obs->segment_affinities), obs_data->obs_segment_affinities, obs_data->num_obs_segments, obs_data->num_obs_segments);
copy_int_matrix_to_gpu(&(cu_obs->fg_range_image_cnt), obs_data->obs_fg_range_image->cnt, obs_data->obs_fg_range_image->w, obs_data->obs_fg_range_image->h);
copy_int_matrix_to_gpu(&(cu_obs->fg_range_image_idx), obs_data->obs_fg_range_image->idx, obs_data->obs_fg_range_image->w, obs_data->obs_fg_range_image->h);
cu_obs->fg_range_image_data.res = obs_data->obs_fg_range_image->res;
cu_obs->fg_range_image_data.min0 = obs_data->obs_fg_range_image->min[0];
cu_obs->fg_range_image_data.min1 = obs_data->obs_fg_range_image->min[1];
cu_obs->fg_range_image_data.w = obs_data->obs_fg_range_image->w;
cu_obs->fg_range_image_data.h = obs_data->obs_fg_range_image->h;
cu_obs->range_image_data.res = obs_data->obs_range_image->res;
cu_obs->range_image_data.min0 = obs_data->obs_range_image->min[0];
cu_obs->range_image_data.min1 = obs_data->obs_range_image->min[1];
cu_obs->range_image_data.w = obs_data->obs_range_image->w;
cu_obs->range_image_data.h = obs_data->obs_range_image->h;
cu_obs->num_obs_segments = obs_data->num_obs_segments;
// CONTINUE HERE FOR OBS DATA COPYING ********************************
}
void cu_free_all_the_model_things(cu_model_data_t *cu_model) {
cudaFree(cu_model->points.ptr);
cudaFree(cu_model->normals.ptr);
cudaFree(cu_model->normalvar.ptr);
cudaFree(cu_model->lab.ptr);
cudaFree(cu_model->ved.ptr);
/*cudaFree(cu_model->color_avg_cov.ptr);
cudaFree(cu_model->color_means1.ptr);
cudaFree(cu_model->color_means2.ptr);
cudaFree(cu_model->color_cov1.ptr);
cudaFree(cu_model->color_cov2.ptr);
cudaFree(cu_model->color_cnts1.ptr);
cudaFree(cu_model->color_cnts2.ptr);*/
cudaFree(cu_model->fpfh.ptr);
cudaFree(cu_model->fpfh_points.ptr);
cudaFree(cu_model->fpfh_normals.ptr);
cudaFree(cu_model->range_edges_model_views.ptr);
cudaFree(cu_model->range_edges_points.ptr);
cudaFree(cu_model->range_edges_view_idx.ptr);
cudaFree(cu_model->range_edges_view_cnt.ptr);
cudaFree(cu_model->score_comp_models);
}
void cu_free_all_the_things_all_models(cu_model_data_t cu_model[], int num_models) {
for (int i = 0; i < num_models; ++i) {
cu_free_all_the_model_things(&cu_model[i]);
}
}
void cu_free_all_the_obs_things(cu_obs_data_t *cu_obs, scope_params_t *params) {
cudaFree(cu_obs->range_image.ptr);
cudaFree(cu_obs->range_image_idx.ptr);
cudaFree(cu_obs->range_image_pcd_obs_lab.ptr);
//cudaFree(cu_obs->pcd_obs_fpfh.ptr);
cudaFree(cu_obs->edge_image.ptr);
cudaFree(cu_obs->range_image_points.ptr);
cudaFree(cu_obs->range_image_normals.ptr);
cudaFree(cu_obs->range_image_cnt.ptr);
cudaFree(cu_obs->fg_range_image_cnt.ptr);
cudaFree(cu_obs->fg_range_image_idx.ptr);
if (params->use_colors)
cudaFree(cu_obs->obs_lab_image.ptr);
cudaFree(cu_obs->segment_affinities.ptr);
}
void cu_free_all_the_things_init(scope_params_t *cu_params) {
cu_free(cu_params, "params");
curandDestroyGenerator(gen);
}
void cu_free_all_the_things(cu_model_data_t *cu_model, cu_obs_data_t *cu_obs, scope_params_t *cu_params, scope_params_t *params) {
/*
FREE
,,
';;
''
____ ||
; \ ||
\,---'-,-, ||
/ ( o) ||
(o )__,--'-' \ ||
,,,, ;'uuuuu'' ) ;;
\ \ \ ) ) /\//
'--' \'nnnnn' / \
\\ //'------' \
\\ // \ \
\\ // ) )
\\// | |
\\ / |
ALL THE THINGS
*/
cu_free_all_the_things_init(cu_params);
cu_free_all_the_model_things(cu_model);
cu_free_all_the_obs_things(cu_obs, params);
}
void cu_free_all_the_things_mope(cu_model_data_t cu_model[], cu_obs_data_t *cu_obs, scope_params_t *cu_params, int num_models, scope_params_t *params) {
// Free ALL the things!!!
cu_free_all_the_things_init(cu_params);
cu_free_all_the_things_all_models(cu_model, num_models);
cu_free_all_the_obs_things(cu_obs, params);
}
void cu_init_scoring(scope_params_t **cu_params, scope_params_t *params) {
cu_malloc(cu_params, sizeof(scope_params_t), "params");
cudaMemcpy(*cu_params, params, sizeof(scope_params_t), cudaMemcpyHostToDevice);
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
int mope_seed = time(NULL); // 1368460607; <--- There is still an unresolved issue with this seed
printf("********* mope seed = %d\n", mope_seed);
curandSetPseudoRandomGeneratorSeed(gen, mope_seed);
}
void cu_init_scoring_model_obs(scope_model_data_t *model_data, scope_obs_data_t *obs_data, cu_model_data_t *cu_model, cu_obs_data_t *cu_obs, scope_params_t **cu_params, scope_params_t *params) {
cu_init_scoring(cu_params, params);
cu_init_model(model_data, cu_model);
cu_init_obs(obs_data, cu_obs, params);
}
void cu_init_scoring_mope_models_obs(scope_model_data_t *model_data, scope_obs_data_t *obs_data, int num_models, cu_model_data_t cu_model[], cu_obs_data_t *cu_obs,
scope_params_t **cu_params, scope_params_t *params) {
cu_init_scoring(cu_params, params);
// Allocate all the memory
cu_init_all_models(model_data, num_models, cu_model);
cu_init_obs(obs_data, cu_obs, params);
}
__device__ void cu_range_image_xyz2sub(int *i, int *j, cu_range_image_data_t range_image, double xyz[])
{
//TODO: use range image viewpoint
double d = cu_norm(xyz, 3);
double x = atan2(xyz[0], xyz[2]);
double y = acos(xyz[1] / d);
int cx = (int)floor((x - range_image.min0) / range_image.res);
int cy = (int)floor((y - range_image.min1) / range_image.res);
*i = cx;
*j = cy;
if (!((cx >= 0 && cy>=0) && (cx < range_image.w) && (cy < range_image.h))) {
*i = -1;
*j = -1;
//printf("device res = %lf, min0 = %lf, min1 = %lf, w = %d, h = %d\n", range_image.res, range_image.min0, range_image.min1, range_image.w, range_image.h);
//printf("%lf %lf %lf\n", xyz[0], xyz[1], xyz[2]);
}
}
/*
* compute viewpoint (in model coordinates) for model placement (x,q) assuming observed viewpoint = (0,0,0)
*/
__device__ void cu_model_pose_to_viewpoint(double *vp, double *x, double *q)
{
double q_inv[4];
cu_quaternion_inverse(q_inv, q);
double R_inv[3][3];
cu_quaternion_to_rotation_matrix(R_inv,q_inv);
cu_matrix_vec_mult_3(vp, R_inv, x, 3);
cu_mult(vp, vp, -1, 3);
}
__global__ void cu_add_matrix_rows_slow(double *out_array, double *in_matrix, int n, int m, int *m_arr) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
int limit = m;
if (m_arr)
limit = m_arr[i];
out_array[i] = 0.0;
for (int j = 0; j < limit; ++j) {
out_array[i] += in_matrix[j + i * m];
}
}
__global__ void cu_add_matrix_rows_medium(double *out_array, double *in_matrix, int n, int m, int *m_arr) {
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n || j >= m)
return;
if (m_arr && j >= m_arr[i])
return;
int limit = m;
if (m_arr)
limit = m_arr[i];
//extern __shared__ double tmps[];
__shared__ double tmps[256];
tmps[threadIdx.x] = 0.0;
for (int k = threadIdx.x; k < limit; k += blockDim.x) {
tmps[threadIdx.x] += in_matrix[k + i * m];
}
__syncthreads();
limit = MIN(limit, blockDim.x);
if (j == 0) {
out_array[i] = 0.0;
for (int k = 0; k < limit; ++k) {
out_array[i] += tmps[k];
}
}
}
__global__ void cu_add_matrix_3d_slow(double *out_array, double *in_matrix, int n, int m, int *m_arr, int p) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
int limit = m;
if (m_arr)
limit = m_arr[i];
for (int k = 0; k < p; ++k) {
out_array[i*p + k] = 0.0;
}
for (int j = 0; j < limit; ++j) {
for (int k = 0; k < p; ++k) {
out_array[i*p + k] += in_matrix[j * p + i * m * p + k];
}
}
}
__global__ void cu_add_matrix_3d_medium(double *out_array, double *in_matrix, int n, int m, int *m_arr, int p) {
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n || j >= m)
return;
int limit1 = m;
if (m_arr)
limit1 = m_arr[i];
int limit2 = MIN(limit1, blockDim.x);
//extern __shared__ double tmps[];
__shared__ double tmps[256];
for (int l = 0; l < p; ++l) { // Outer loop to save shared memory
tmps[threadIdx.x] = 0.0;
for (int k = j; k < limit1; k += blockDim.x) {
tmps[threadIdx.x] += in_matrix[k * p + i * m * p + l];
}
__syncthreads();
if (j == 0) {
out_array[i * p + l] = 0.0;
for (int k = 0; k < limit2; ++k) {
out_array[i * p + l] += tmps[k];
}
}
__syncthreads();
}
}
__global__ void cu_divide_matrix_with_vector(double *out_matrix, double *in_matrix, double *scaling_array, int n, int m, int *m_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= n || j >= m)
return;
if (m_arr && j >= m_arr[i])
return;
out_matrix[j + i * m] = in_matrix[j + i * m] / scaling_array[i];
}
__global__ void cu_get_validation_points(int *idx, int total_pts, int needed, int num_samples, uint *rands)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= needed || i >= num_samples)
return;
if (needed == total_pts) { // use all the points
idx[j + i * needed] = j;
} else {
idx[j + i * needed] = ((rands[(i << 1)] % total_pts) + (j * (big_primes[rands[(i << 1) + 1] % 100] % total_pts))) % total_pts;
}
}
__global__ void cu_get_sub_cloud_at_pose(double *cloud, cu_double_matrix_t points, double *x, double *q, int *idx, int num_samples, int n)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= n || i >= num_samples)
return;
int i_arr = j + i * n;
double R[3][3];
cu_quaternion_to_rotation_matrix(R, &q[i * 4]);
double dest[3]; // In local memory so we access global memory less
dest[0] = points.ptr[idx[i_arr] * points.m];
dest[1] = points.ptr[idx[i_arr] * points.m + 1];
dest[2] = points.ptr[idx[i_arr] * points.m + 2];
double tmp[3];
cu_matrix_vec_mult_3(tmp, R, dest, 3);
cu_add(dest, tmp, &x[i * 3], 3);
cloud[3 * i_arr] = dest[0]; cloud[3*i_arr + 1] = dest[1]; cloud[3*i_arr + 2] = dest[2];
}
__global__ void cu_get_sub_cloud_normals_rotated(double *cloud_normals, cu_double_matrix_t normals, double *q, int *idx, int num_samples, int n)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= n || i >= num_samples)
return;
int i_arr = j + i * n;
double R[3][3];
cu_quaternion_to_rotation_matrix(R, &q[i * 4]);
double *row;
double dest[3];
row = &normals.ptr[idx[i_arr] * normals.m];
double tmp[3];
tmp[0] = row[0]; tmp[1] = row[1]; tmp[2] = row[2];
cu_matrix_vec_mult_3(dest, R, tmp, 3);
cloud_normals[3*i_arr] = dest[0]; cloud_normals[3*i_arr+1] = dest[1]; cloud_normals[3*i_arr + 2] = dest[2];
}
__global__ void cu_populate_xi_yi(int *xi, int *yi, double *cloud, cu_range_image_data_t range_image_data, int num_samples, int n, int *n_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n)
return;
if (n_arr && j >= n_arr[i])
return;
int i_arr = j + i * n;
double dest[3];
dest[0] = cloud[3*i_arr];
dest[1] = cloud[3*i_arr + 1];
dest[2] = cloud[3*i_arr + 2];
cu_range_image_xyz2sub(&xi[i_arr], &yi[i_arr], range_image_data, dest);
if (0)
printf("%d %d %d\n", i_arr, xi[i_arr], yi[i_arr]);
/*if (j == 0) {
printf("res = %lf, min0 = %lf, min1 = %lf, w = %d, h = %d\n", range_image_data.res, range_image_data.min0, range_image_data.min1, range_image_data.w, range_image_data.h);
}*/
}
__global__ void cu_compute_visibility_prob(double *cu_vis_prob, double *cu_cloud, double *cu_normals, int *cu_xi, int *cu_yi, cu_range_image_data_t ri_data,
cu_double_matrix_t range_image, double vis_thresh, int search_radius, int num_samples, int n, int *n_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n)
return;
if (n_arr && j >= n_arr[i])
return;
int i_arr = j + i * n;
int xi = cu_xi[i_arr];
int yi = cu_yi[i_arr];
double V[3];
double pt[3];
pt[0] = cu_cloud[3*i_arr]; pt[1] = cu_cloud[3*i_arr + 1]; pt[2] = cu_cloud[3*i_arr + 2];
cu_normalize(V, pt, 3);
if (cu_normals != NULL && cu_dot(V, &cu_normals[3*i_arr], 3) >= -.1) { // normals pointing away
cu_vis_prob[i_arr] = 0.0;
return;
}
if (xi == -1 && yi == -1) {
cu_vis_prob[i_arr] = 0.0;
return;
}
double model_range = cu_norm(pt, 3);
double obs_range = range_image.ptr[xi * range_image.m + yi];
if (search_radius > 0) {
int x0 = MAX(xi - search_radius, 0);
int x1 = MIN(xi + search_radius, ri_data.w - 1);
int y0 = MAX(yi - search_radius, 0);
int y1 = MIN(yi + search_radius, ri_data.h - 1);
int x, y;
for (x = x0; x <= x1; x++)
for (y = y0; y <= y1; y++)
obs_range = MAX(obs_range, range_image.ptr[x * range_image.m + y]);
}
double dR = model_range - obs_range;
cu_vis_prob[i_arr] = (dR < 0 ? 1.0 : cu_normpdf(dR/vis_thresh, 0, 1) / .3989); // .3989 = normpdf(0,0,1)
}
__global__ void cu_get_viewpoints(int *vi, int num_samples, double *samples_x, double *samples_q, cu_double_matrix_t range_edges_model_views) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
double vp[3];
cu_model_pose_to_viewpoint(vp, &samples_x[3*i], &samples_q[4*i]);
double vi_max = -(1<<19);
int j;
for (j = 0; j < range_edges_model_views.n; ++j) {
double tmp = cu_dot(&range_edges_model_views.ptr[j * range_edges_model_views.m], vp, 3);
if (tmp > vi_max) {
vi[i] = j;
vi_max = tmp;
}
}
}
__global__ void cu_get_noise_models(scope_noise_model_t *noise_models, double *cloud, double *normals, int *idx, int *vi, cu_double_matrix_t ved, cu_double_arr_t normalvar, int num_samples, int n) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= n || i >= num_samples)
return;
// prep for lookup edge distances for closest model viewpoint
double surface_angles, edge_dists;
// compute sigmas
int i_arr = i * n + j;
double normalized[3];
cu_normalize(normalized, &cloud[3*i_arr], 3);
surface_angles = 1 + cu_dot(normalized, &normals[3 * i_arr], 3);
edge_dists = ved.ptr[idx[i_arr] * ved.m + vi[i]];
noise_models[i_arr].range_sigma = .5*cu_sigmoid(surface_angles, b_SR) + .5*cu_sigmoid(edge_dists, b_ER);
noise_models[i_arr].normal_sigma = .5*cu_sigmoid(surface_angles, b_SN) + .5*cu_sigmoid(edge_dists, b_EN);
noise_models[i_arr].lab_sigma[0] = .5*cu_sigmoid(surface_angles, b_SL) + .5*cu_sigmoid(edge_dists, b_EL);
noise_models[i_arr].lab_sigma[1] = .5*cu_sigmoid(surface_angles, b_SA) + .5*cu_sigmoid(edge_dists, b_EA);
noise_models[i_arr].lab_sigma[2] = .5*cu_sigmoid(surface_angles, b_SB) + .5*cu_sigmoid(edge_dists, b_EB);
noise_models[i_arr].normal_sigma = MAX(noise_models[i_arr].normal_sigma, normalvar.ptr[idx[i_arr]]);
}
__global__ void cu_transform_cloud(double *cloud2, double *cloud, double *x, double *q, int num_samples, int n, int *n_arr)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n)
return;
if (n_arr && j >= n_arr[i])
return;
int i_arr = j + i * n;
double R[3][3];
cu_quaternion_to_rotation_matrix(R,&q[4*i]);
double tmp[3];
cu_matrix_vec_mult_3(tmp, R, &cloud[i_arr*3], 3);
cloud2[3*i_arr] = tmp[0];
cloud2[3*i_arr+1] = tmp[1];
cloud2[3*i_arr+2] = tmp[2];
if (x != NULL) {
cu_add(&cloud2[i_arr*3], &cloud2[i_arr*3], &x[3*i], 3);
}
}
__global__ void cu_reorder_rows(double *cloud, cu_double_matrix_t points, int *idx, int n, int m, int p, int *m_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= n || j >= m)
return;
if (m_arr && j >= m_arr[i])
return;
for (int k = 0; k < p; ++k) {
cloud[i * m * p + j * p + k] = points.ptr[idx[j + i * m] * p + k];
}
}
__global__ void cu_compute_xyz_score_individual(double *xyz_score, double *cloud, int *xi, int *yi, double *vis_pmf, scope_noise_model_t *noise_models, int num_samples, int num_validation_points,
cu_double_matrix_t range_image, cu_range_image_data_t range_image_data, cu_int_matrix_t range_image_cnt, scope_params_t *params)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= num_validation_points || i >= num_samples)
return;
int xyz_score_window = params->xyz_score_window;
int i_arr = j + i * num_validation_points;
xyz_score[i_arr] = 0.0;
if (vis_pmf[i_arr] > .01/(double)num_validation_points) {
double range_sigma = params->range_sigma * noise_models[i_arr].range_sigma;
double model_range = cu_norm(&cloud[3*i_arr], 3);
double dmax = 2*range_sigma;
double dmin = dmax;
int x, y;
int r = xyz_score_window;
for (x = xi[i_arr] - r; x<=xi[i_arr] + r; ++x) {
for (y = yi[i_arr] - r; y <= yi[i_arr] + r; ++y) {
if (x >= 0 && x < (range_image_data.w) && y>=0 && y<(range_image_data.h) && range_image_cnt.ptr[x * range_image_cnt.m + y] > 0) {
double obs_range = range_image.ptr[x * range_image.m + y];
double d = fabs(model_range - obs_range);
if (d < dmin)
dmin = d;
}
}
}
double d = dmin;
xyz_score[i_arr] = vis_pmf[i_arr] * log(cu_normpdf(d, 0, range_sigma));
}
}
__global__ void cu_compute_xyz_score_final(double *xyz_scores, double *score_comps, int num_samples, double *b_xyz, scope_params_t *params, int score_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
xyz_scores[i] -= log(cu_normpdf(0, 0, params->range_sigma));
if ((score_round == 2 && params->score2_use_score_comp_models) || (score_round == 3 && params->score3_use_score_comp_models))
xyz_scores[i] = cu_logistic(xyz_scores[i], b_xyz);
if (score_round == 3 && score_comps)
score_comps[i * num_components + xyz_idx] = xyz_scores[i];
double w = 0;
if (score_round == 2)
w = params->score2_xyz_weight;
else
w = params->score3_xyz_weight;
xyz_scores[i] *= w;
}
__global__ void cu_compute_normal_score_individual(double *normal_score, double *wtot_individual, double *cloud_normals, double *vis_pmf, scope_noise_model_t *noise_models, int num_samples,
int num_validation_points, int *xi, int *yi, cu_int_matrix_t range_image_cnt, cu_double_matrix3d_t range_image_normals, scope_params_t *params, int score_round)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= num_validation_points || i >= num_samples)
return;
int i_arr = j + i * num_validation_points;
//TODO: make this a param
double normalvar_thresh = params->normalvar_thresh;
normal_score[i_arr] = 0.0;
wtot_individual[i_arr] = 0.0;
if (vis_pmf[i_arr] > .01/ (double) num_validation_points && noise_models[i_arr].normal_sigma <= normalvar_thresh) {
double normal_sigma = params->normal_sigma * noise_models[i_arr].normal_sigma;
double dmax = 2*normal_sigma;
double d = dmax;
if ((xi[i_arr] != -1 && yi[i_arr] != -1) && range_image_cnt.ptr[xi[i_arr] * range_image_cnt.m + yi[i_arr]] > 0) {
d = 1.0 - cu_dot(&cloud_normals[3*i_arr], &(range_image_normals.ptr[xi[i_arr] * range_image_normals.m * range_image_normals.p + yi[i_arr] * range_image_normals.p]), 3);
d = MIN(d, dmax);
}
normal_score[i_arr] = vis_pmf[i_arr] * log(cu_normpdf(d, 0, normal_sigma));
wtot_individual[i_arr] = vis_pmf[i_arr];
}
}
__global__ void cu_compute_normal_score_final(double *normal_scores, double *score_comps, double *wtot, int num_samples, double *b_normal, scope_params_t *params, int score_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
if (wtot[i] > 0.0)
normal_scores[i] /= wtot[i];
normal_scores[i] -= log(cu_normpdf(0, 0, params->normal_sigma));
if ((score_round == 2 && params->score2_use_score_comp_models) || (score_round == 3 && params->score3_use_score_comp_models))
normal_scores[i] = cu_logistic(normal_scores[i], b_normal);
if (score_round == 3 && score_comps)
score_comps[i * num_components + normal_idx] = normal_scores[i];
double w = 0;
if (score_round == 2)
w = params->score2_normal_weight;
else
w = params->score3_normal_weight;
normal_scores[i] *= w;
}
__global__ void cu_compute_vis_score(double *vis_score, double *score_comps, double *vis_sums, int n, scope_params_t *params, int score_round)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
vis_score[i] = log(vis_sums[i] / (double) n);
if (score_round == 3)
score_comps[i * num_components + vis_idx] = vis_score[i];
double w = 0;
if (score_round == 2)
w = params->score2_vis_weight;
else
w = params->score3_vis_weight;
vis_score[i] *= w;
}
__global__ void cu_set_mask_for_segment_affinity(int *mask, int *segments, int *num_segments, int num_obs_segments, int num_samples) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= num_segments[i])
return;
// Assumes mask is initialized to all zeros before kernel execution
mask[segments[j + i * num_obs_segments] + i * num_obs_segments] = 1;
}
// compute the segment affinity score for a scope sample
__global__ void cu_compute_segment_affinity_score_per_seg(double *seg_affinity_score_per_seg, int *segments, int *num_segments, cu_double_matrix_t segment_affinities, int num_obs_segments, int *mask,
int num_samples)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= num_obs_segments)
return;
int k;
seg_affinity_score_per_seg[j + i * num_obs_segments] = 0.0;
if (mask[j + i * num_obs_segments] == 0) {
for (k = 0; k < num_segments[i]; ++k) {
int s = segments[k + i * num_obs_segments];
double a = MIN(segment_affinities.ptr[s * segment_affinities.m + j], .9);
if (a > 0.5)
seg_affinity_score_per_seg[j + i * num_obs_segments] += log((1-a)/a);
}
}
}
__global__ void cu_compute_segment_affinity_score_final(double *seg_affinity_score, double *score_comps, scope_params_t *params, int score_round, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
seg_affinity_score[i] *= .05;
if (score_round == 3)
score_comps[i * num_components + segment_affinity_idx] = seg_affinity_score[i];
double weight = 0;
if (score_round == 2)
weight = params->score2_segment_affinity_weight;
else
weight = params->score3_segment_affinity_weight;
seg_affinity_score[i] *= weight;
}
__global__ void cu_generate_n_for_range_edge(int *n_out, int *vi, int num_samples, int num_validation_points, cu_int_arr_t range_edges_view_cnt) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
int v_idx = vi[i];
int num_edge_points = range_edges_view_cnt.ptr[v_idx];
int n = num_validation_points;
if (n >= num_edge_points || n == 0) {
n = num_edge_points;
}
n_out[i] = n;
}
__global__ void cu_get_range_edge_idx(int *idx, int *needed, int num_samples, int total_pts, int n, uint *rands, int *vi,cu_int_arr_t range_edges_view_idx)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= needed[i])
return;
// NOTE(sanja): This might need some fixing if I use the function in a broader sense, like on a CPU version
if (needed[i] <= n) { // use all the points
idx[j + i * total_pts] = j;
} else {
idx[j + i * total_pts] = ((rands[i << 1] % needed[i]) + (j * (big_primes[rands[(i << 1) + 1] % 100] % needed[i]))) % needed[i];
}
int vp_idx = range_edges_view_idx.ptr[vi[i]];
idx[j + i * total_pts] += vp_idx;
}
/*__global__ void cu_get_range_edge_points(double *P, int num_samples, int *n, int *idx, int n_edge, cu_double_matrix_t range_edges_points)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples)
return;
if (j >= n[i])
return;
// get the actual points in the correct pose
P[3 * i * n_edge + 3 * j] = range_edges_points.ptr[3 * idx[j + i * n_edge]];
P[3 * i * n_edge + 3 * j + 1] = range_edges_points.ptr[3 * idx[j + i * n_edge] + 1];
P[3 * i * n_edge + 3 * j + 2] = range_edges_points.ptr[3 * idx[j + i * n_edge] + 2];
}*/
__global__ void cu_compute_edge_score_individual(double *edge_score, double *vis_pmf, int *xi, int *yi, cu_double_matrix_t edge_image, int num_samples, int *n, int n_edge) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n[i])
return;
edge_score[j + i * n_edge] = 0.0;
if (xi[j + i *n_edge] != -1 && yi[j + i * n_edge] != -1) {
edge_score[j + i * n_edge] = vis_pmf[j + i * n_edge] * edge_image.ptr[xi[j + i *n_edge]*edge_image.m + yi[j + i *n_edge]];
}
}
__global__ void cu_compute_edge_score_final(double *edge_score, double *score_comps, double *vis_score, double *vis_prob_sums, double *occ_score, int num_samples, int *n_arr, double *b_edge, double *b_edge_occ,
scope_params_t *params, int score_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
if ((score_round == 2 && params->score2_use_score_comp_models) || (score_round == 3 && params->score3_use_score_comp_models)) {
edge_score[i] = cu_logistic(edge_score[i], b_edge);
if (occ_score)
occ_score[i] = cu_logistic(occ_score[i], b_edge_occ);
}
vis_score[i] = log(vis_prob_sums[i] / (double) n_arr[i]);
if (score_round == 3 && score_comps) {
score_comps[i * num_components + edge_vis_idx] = vis_score[i];
if (occ_score)
score_comps[i * num_components + edge_occ_idx] = occ_score[i];
else
score_comps[i * num_components + edge_occ_idx] = 0.0;
score_comps[i * num_components + edge_idx] = edge_score[i];
}
double w1=0.0, w2=0.0, w3=0.0;
w1=1.0, w2=1.0, w3=1.0;
if (score_round == 2) {
w1 = params->score2_edge_weight;
w2 = params->score2_edge_vis_weight;
w3 = params->score2_edge_occ_weight;
}
else {
w1 = params->score3_edge_weight;
w2 = params->score3_edge_vis_weight;
w3 = params->score3_edge_occ_weight;
}
if (occ_score)
edge_score[i] = (w1 * edge_score[i]) + (w2 * vis_score[i]) + (w3 * occ_score[i]);
else
edge_score[i] = (w1 * edge_score[i]) + (w2 * vis_score[i]);
}
__global__ void cu_score_round1(double *scores_ind, int *xi, int *yi, double *cloud, cu_double_matrix_t range_image, int num_samples, int num_validation_points) {
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples || j >= num_validation_points)
return;
double dest[3];
int i_arr = i*num_validation_points;
scores_ind[i_arr + j] = 0.0;
dest[0] = cloud[3*(i_arr + j)]; dest[1] = cloud[3*(i_arr + j)+1]; dest[2] = cloud[3*(i_arr + j) + 2];
if ((xi[i_arr + j] != -1 && yi[i_arr + j] != -1) && range_image.ptr[xi[i_arr + j]*range_image.m + yi[i_arr + j]] > round1_dthresh + cu_norm(dest, 3))
scores_ind[i_arr + j] = -1.0;
}
__global__ void cu_score_round1_final(double *scores, int num_samples, int num_validation_points) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
scores[i] /= (double)num_validation_points;
}
// TODO(sanja): make this a more general function that takes double** or something like that
__global__ void cu_add_all_scores(double *cu_scores, double *cu_xyz_score, double *cu_normal_score, double *cu_vis_score, double *cu_seg_affinity_score, double *cu_edge_scores,
double *cu_fpfh_score, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_scores[i] = cu_xyz_score[i] + cu_normal_score[i] + cu_vis_score[i] + cu_seg_affinity_score[i] + cu_edge_scores[i] + cu_fpfh_score[i];
}
__global__ void cu_add_3_scores(double *cu_scores, double *cu_xyz_score, double *cu_normal_score, double *cu_edge_scores, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_scores[i] = cu_xyz_score[i] + cu_normal_score[i] + cu_edge_scores[i];
}
void get_vis_prob_sums_and_pmf(double *vis_prob, double *vis_prob_sums, double *vis_pmf, double *cloud, double *normals, int *xi, int *yi, cu_double_matrix_t range_image, cu_range_image_data_t range_image_data,
int vis_pixel_radius, int num_samples, int n, int *n_arr, scope_params_t *params, dim3 block, dim3 thread, dim3 block_sum, dim3 thread_sum, int slow_sum) {
cu_compute_visibility_prob<<<block, thread>>>(vis_prob, cloud, normals, xi, yi, range_image_data, range_image, params->vis_thresh, vis_pixel_radius, num_samples, n, n_arr);
if ( cudaSuccess != cudaGetLastError() )
printf( "vis_prob!\n" );
if (slow_sum)
cu_add_matrix_rows_slow<<<block_sum, thread_sum>>>(vis_prob_sums, vis_prob, num_samples, n, n_arr);
else
//cu_add_matrix_rows_medium<<<block_sum, thread_sum, thread_sum.x * sizeof(double)>>>(vis_prob_sums, vis_prob, num_samples, n, n_arr);
cu_add_matrix_rows_medium<<<block_sum, thread_sum>>>(vis_prob_sums, vis_prob, num_samples, n, n_arr);
// TODO(sanja): Optimize. ArrayFire?
if ( cudaSuccess != cudaGetLastError() )
printf( "Vis prob sums!\n" );
cu_divide_matrix_with_vector<<<block, thread>>>(vis_pmf, vis_prob, vis_prob_sums, num_samples, n, n_arr);
if ( cudaSuccess != cudaGetLastError() )
printf( "Vis pmf!\n" );
}
void unpack_x_q(double *cu_x, double *cu_q, scope_sample_t *samples, int num_samples) {
double **samples_x = new_matrix2(num_samples, 3);
double **samples_q = new_matrix2(num_samples, 4);
int i;
for (i = 0; i < num_samples; ++i) {
memcpy(samples_x[i], samples[i].x, 3 * sizeof(double));
}
for (i = 0; i < num_samples; ++i) {
memcpy(samples_q[i], samples[i].q, 4 * sizeof(double));
}
cudaMemcpy(cu_x, samples_x[0], 3 * num_samples * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(cu_q, samples_q[0], 4 * num_samples * sizeof(double), cudaMemcpyHostToDevice);
free_matrix2(samples_x);
free_matrix2(samples_q);
}
void get_range_edge_points(int *cu_n, double *cu_P, int num_samples, int n_edge, int *n_arr, int num_validation_points, cu_model_data_t *cu_model, int *cu_vi, int find_vi, double *cu_x, double *cu_q,
dim3 block, dim3 thread, dim3 block_small, dim3 thread_small) {
if (find_vi) {
cu_get_viewpoints<<<block_small, thread_small>>>(cu_vi, num_samples, cu_x, cu_q, cu_model->range_edges_model_views);
if ( cudaSuccess != cudaGetLastError() )
printf( "Viewpoints!\n" );
}
int *cu_idx_edge;
cu_malloc(&cu_idx_edge, num_samples * n_edge * sizeof(int), "idx_edge");
uint *cu_rands_edge;
cu_malloc(&cu_rands_edge, 2 * num_samples * sizeof(uint), "rands_edge malloc");
cu_generate_n_for_range_edge<<<block_small, thread_small>>>(cu_n, cu_vi, num_samples, num_validation_points, cu_model->range_edges_view_cnt);
if ( cudaSuccess != cudaGetLastError() )
printf( "find n!\n" );
curandGenerate(gen, cu_rands_edge, 2*num_samples);
if ( cudaSuccess != cudaGetLastError() )
printf( "generate rands_edge!\n" );
cu_get_range_edge_idx<<<block, thread>>>(cu_idx_edge, cu_n, num_samples, n_edge, num_validation_points, cu_rands_edge, cu_vi, cu_model->range_edges_view_idx);
if ( cudaSuccess != cudaGetLastError() )
printf( "idx edge!\n" );
cu_reorder_rows<<<block, thread>>>(cu_P, cu_model->range_edges_points, cu_idx_edge, num_samples, n_edge, 3, cu_n);
if ( cudaSuccess != cudaGetLastError() )
printf( "edge pts\n" );
if (!cu_rands_edge)
printf("NOOOOO!\n");
cu_free(cu_rands_edge, "rands_edge free\n");
cu_free(cu_idx_edge, "idx_edge");
}
void get_validation_points(int *cu_idx, int model_points, int num_validation_points, int num_samples, dim3 block, dim3 thread) {
uint *cu_rands;
cu_malloc(&cu_rands, 2 * num_samples * sizeof(uint), "rands");
if (model_points > num_validation_points) {
curandGenerate(gen, cu_rands, 2*num_samples);
}
cu_get_validation_points<<<block, thread>>>(cu_idx, model_points, num_validation_points, num_samples, cu_rands);
if ( cudaSuccess != cudaGetLastError() )
printf( "Validation!\n" );
cu_free(cu_rands, "rands free");
}
void compute_xyz_score(double *cu_xyz_score, double *cu_score_comps, double *cu_cloud, int *cu_xi, int *cu_yi, double *cu_vis_pmf, scope_noise_model_t *cu_noise_models, cu_double_matrix_t range_image,
cu_range_image_data_t range_image_data, cu_int_matrix_t range_image_cnt, int num_samples, int num_validation_points, scope_params_t *cu_params, int round,
double *b_xyz, dim3 block_size, dim3 threads_per_block, dim3 block_size_sum, dim3 thread_size_sum, dim3 block_size_small, dim3 thread_size_small) {
int num_total = num_samples * num_validation_points;
double *cu_xyz_score_per_point;
cu_malloc(&cu_xyz_score_per_point, num_total * sizeof(double), "xyz_scores_pp");
cu_compute_xyz_score_individual<<<block_size, threads_per_block>>>(cu_xyz_score_per_point, cu_cloud, cu_xi, cu_yi, cu_vis_pmf, cu_noise_models, num_samples, num_validation_points,
range_image, range_image_data, range_image_cnt, cu_params);
if ( cudaSuccess != cudaGetLastError() )
printf( "xyz individual!\n" );
cu_add_matrix_rows_medium<<<block_size_sum, thread_size_sum, thread_size_sum.x * sizeof(double)>>>(cu_xyz_score, cu_xyz_score_per_point, num_samples, num_validation_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "xyz sums!\n" );
cu_compute_xyz_score_final<<<block_size_small, thread_size_small>>>(cu_xyz_score, cu_score_comps, num_samples, b_xyz, cu_params, round);
if ( cudaSuccess != cudaGetLastError() )
printf( "xyz final!\n" );
cu_free(cu_xyz_score_per_point, "xyz_scores_pp");
}
void compute_normal_score(double *cu_normal_score, double *cu_score_comps, double *cu_normals, double *cu_vis_pmf, scope_noise_model_t *cu_noise_models, int num_samples, int num_validation_points,
int *cu_xi, int *cu_yi, cu_int_matrix_t range_image_cnt, cu_double_matrix3d_t range_image_normals, double *b_normal, scope_params_t *cu_params, int round,
dim3 block_size, dim3 threads_per_block, dim3 block_size_sum, dim3 thread_size_sum, dim3 block_size_small, dim3 thread_size_small) {
int num_total = num_samples * num_validation_points;
double *cu_normal_score_per_point;
cu_malloc(&cu_normal_score_per_point, num_total * sizeof(double), "normal_score_pp");
double *cu_wtot_per_point;
cu_malloc(&cu_wtot_per_point, num_total * sizeof(double), "wtot_pp");
cu_compute_normal_score_individual<<<block_size, threads_per_block>>>(cu_normal_score_per_point, cu_wtot_per_point, cu_normals, cu_vis_pmf, cu_noise_models, num_samples, num_validation_points, cu_xi, cu_yi,
range_image_cnt, range_image_normals, cu_params, round);
if ( cudaSuccess != cudaGetLastError() )
printf( "normal individual!\n" );
double *cu_wtot;
cu_malloc(&cu_wtot, num_samples * sizeof(double), "wtot");
cu_add_matrix_rows_medium<<<block_size_sum, thread_size_sum, thread_size_sum.x * sizeof(double)>>>(cu_normal_score, cu_normal_score_per_point, num_samples, num_validation_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "add 1!\n" );
cu_add_matrix_rows_medium<<<block_size_sum, thread_size_sum, thread_size_sum.x * sizeof(double)>>>(cu_wtot, cu_wtot_per_point, num_samples, num_validation_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "add 2!\n" );
cu_compute_normal_score_final<<<block_size_small, thread_size_small>>>(cu_normal_score, cu_score_comps, cu_wtot, num_samples, b_normal, cu_params, round);
cu_free(cu_normal_score_per_point, "normal_scores_pp");
cu_free(cu_wtot_per_point, "wtot_pp");
cu_free(cu_wtot, "wtot");
}
void compute_edge_score(double *cu_edge_scores, double *cu_score_comps, double *cu_P, cu_range_image_data_t range_image_data, cu_double_matrix_t range_image, int num_samples, int n_edge, int *cu_n,
cu_double_matrix_t edge_image, double *b_edge, double *b_edge_occ, scope_params_t *params, scope_params_t *cu_params, int round,
dim3 block_size_n_edge, dim3 block_size_sum, dim3 thread_size_sum, dim3 block_size_small, dim3 thread_size_small)
{
double *cu_vis_prob_edge, *cu_vis_prob_sums_edge, *cu_vis_pmf_edge;
cu_malloc(&cu_vis_prob_edge, num_samples * n_edge * sizeof(double), "vis_prob_edge");
cu_malloc(&cu_vis_prob_sums_edge, num_samples * sizeof(double), "vis_prob_sums_edge");
cu_malloc(&cu_vis_pmf_edge, num_samples * n_edge * sizeof(double), "vis_pmf_edge");
int *cu_xi_edge;
cu_malloc(&cu_xi_edge, num_samples * n_edge * sizeof(int), "xi");
int *cu_yi_edge;
cu_malloc(&cu_yi_edge, num_samples * n_edge * sizeof(int), "yi");
cu_populate_xi_yi<<<block_size_n_edge, thread_size_sum>>>(cu_xi_edge, cu_yi_edge, cu_P, range_image_data, num_samples, n_edge, cu_n);
if ( cudaSuccess != cudaGetLastError() )
printf( "edge xi yi!\n" );
int vis_pixel_radius = 2;
get_vis_prob_sums_and_pmf(cu_vis_prob_edge, cu_vis_prob_sums_edge, cu_vis_pmf_edge, cu_P, NULL, cu_xi_edge, cu_yi_edge, range_image, range_image_data, vis_pixel_radius, num_samples, n_edge,
cu_n, params, block_size_n_edge, thread_size_sum, block_size_sum, thread_size_sum, 0); // HERE!!!
//cu_n, params, block_size_n_edge, thread_size_sum, block_size_small, thread_size_small, 1); // HERE!!!
double *cu_edge_score_individual;
cu_malloc(&cu_edge_score_individual, num_samples * n_edge * sizeof(double), "edge_score");
//cu_compute_edge_score_individual<<<block_size_sum, thread_size_sum>>>(cu_edge_score_individual, cu_vis_pmf_edge, cu_xi_edge, cu_yi_edge, edge_image, num_samples, cu_n, n_edge);
cu_compute_edge_score_individual<<<block_size_n_edge, thread_size_sum>>>(cu_edge_score_individual, cu_vis_pmf_edge, cu_xi_edge, cu_yi_edge, edge_image, num_samples, cu_n, n_edge); // WEIRD
if ( cudaSuccess != cudaGetLastError() )
printf( "edge score individual!\n" );
//cu_add_matrix_rows_slow<<<block_size_small, thread_size_small>>>(cu_edge_scores, cu_edge_score_individual, num_samples, n_edge, cu_n);
cu_add_matrix_rows_medium<<<block_size_sum, thread_size_sum>>>(cu_edge_scores, cu_edge_score_individual, num_samples, n_edge, cu_n);
double *cu_vis_scores;
cu_malloc(&cu_vis_scores, num_samples * sizeof(double), "vis_scores");
cu_compute_edge_score_final<<<block_size_small, thread_size_small>>>(cu_edge_scores, cu_score_comps, cu_vis_scores, cu_vis_prob_sums_edge, NULL, num_samples, cu_n, b_edge, b_edge_occ, cu_params, round);
if ( cudaSuccess != cudaGetLastError() )
printf( "edge score final!\n" );
cu_free(cu_edge_score_individual, "edge_score_individual");
cu_free(cu_vis_scores, "vis_scores");
cu_free(cu_vis_prob_sums_edge, "vis_prob_sums_edge");
cu_free(cu_vis_pmf_edge, "vis_pmf_edge");
cu_free(cu_vis_prob_edge, "vis_prob_edge");
cu_free(cu_xi_edge, "xi_edge");
cu_free(cu_yi_edge, "yi_edge");
}
__global__ void cu_compute_fpfh_score_individual(double *cu_fpfh_score_individual, double *cu_cloud, double *cu_fpfh_cloud_f, double *cu_vis_pmf, int *xi, int *yi, cu_int_matrix_t fg_range_image_cnt,
cu_int_matrix_t fg_range_image_idx, cu_double_matrix_t fpfh_obs, int fpfh_length, int num_samples, int fpfh_num_validation_points, scope_params_t *cu_params) {
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples || j >= MIN(fpfh_num_validation_points, fpfh_obs.n))
return;
int i_arr = i * fpfh_num_validation_points + j;
cu_fpfh_score_individual[i_arr] = 0.0;
if (cu_vis_pmf[i_arr] > .01/(double)fpfh_num_validation_points) {
double f_sigma = cu_params->f_sigma;
double dmax = 2*f_sigma; // * 2*noise_models[i].range_sigma; //TODO: get FPFH noise model
double d = dmax;
if (fg_range_image_cnt.ptr[xi[i_arr] * fg_range_image_cnt.m + yi[i_arr]] > 0) {
int idx = fg_range_image_idx.ptr[xi[i_arr] * fg_range_image_idx.m + yi[i_arr]];
d = cu_dist(&cu_fpfh_cloud_f[i_arr * fpfh_length], &fpfh_obs.ptr[idx * fpfh_obs.m], fpfh_length);
d = MIN(d, dmax);
}
cu_fpfh_score_individual[i_arr] = cu_vis_pmf[i_arr] * log(cu_normpdf(d, 0, f_sigma));
}
}
__global__ void cu_compute_fpfh_score_final(double *cu_fpfh_scores, double *cu_score_comps, int num_samples, double *b_fpfh, scope_params_t *cu_params, int score_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_fpfh_scores[i] -= log(cu_normpdf(0, 0, cu_params->f_sigma));
if ((score_round == 2 && cu_params->score2_use_score_comp_models) || (score_round == 3 && cu_params->score3_use_score_comp_models))
cu_fpfh_scores[i] = cu_logistic(cu_fpfh_scores[i], b_fpfh);
cu_score_comps[i * num_components + fpfh_idx] = cu_fpfh_scores[i];
double w = 0;
if (score_round == 2)
w = cu_params->score2_fpfh_weight;
else
w = cu_params->score3_fpfh_weight;
cu_fpfh_scores[i] *= w;
}
void compute_fpfh_score(double *cu_fpfh_scores, double *cu_score_comps, cu_double_matrix_t cu_fpfh_points, cu_double_matrix_t cu_fpfh_normals, cu_double_matrix_t cu_fpfh, cu_double_matrix_t fpfh_obs,
cu_range_image_data_t range_image_data, cu_double_matrix_t range_image, cu_range_image_data_t fg_range_image_data, cu_int_matrix_t fg_range_image_cnt,
cu_int_matrix_t fg_range_image_idx, double *cu_samples_x, double *cu_samples_q, int num_samples, int num_validation_points, double *b_fpfh,
scope_params_t *params, scope_params_t *cu_params, int round) {
int num_fpfh_points = cu_fpfh.n;
int fpfh_length = cu_fpfh.m;
int fpfh_num_validation_points = (num_validation_points > 0 ? num_validation_points : num_fpfh_points);
dim3 threads_per_block(256, 1, 1);
dim3 block_size(ceil(1.0 * fpfh_num_validation_points / threads_per_block.x), num_samples);
dim3 thread_size_small(64);
dim3 block_size_small(ceil(1.0 * num_samples/thread_size_small.x));
dim3 thread_size_sum(256);
dim3 block_size_sum(1, num_samples);
int *cu_fpfh_idx;
cu_malloc(&cu_fpfh_idx, fpfh_num_validation_points * num_samples * sizeof(int), "fpfh_idx_malloc\n");
get_validation_points(cu_fpfh_idx, num_fpfh_points, fpfh_num_validation_points, num_samples, block_size, threads_per_block);
double *cu_cloud;
cu_malloc(&cu_cloud, 3 * fpfh_num_validation_points * num_samples * sizeof(double), "cloud");
cu_get_sub_cloud_at_pose<<<block_size, threads_per_block>>>(cu_cloud, cu_fpfh_points, cu_samples_x, cu_samples_q, cu_fpfh_idx, num_samples, fpfh_num_validation_points);
if ( cudaSuccess != cudaGetLastError() )
printf( "fpfh Subcloud!\n" );
double *cu_normals;
cu_malloc(&cu_normals, 3 * fpfh_num_validation_points * num_samples * sizeof(double), "normals");
cu_get_sub_cloud_normals_rotated<<<block_size, threads_per_block>>>(cu_normals, cu_fpfh_normals, cu_samples_q, cu_fpfh_idx, num_samples, fpfh_num_validation_points);
if ( cudaSuccess != cudaGetLastError() )
printf( "fpfh Normals!\n" );
double *cu_fpfh_cloud_f;
cu_malloc(&cu_fpfh_cloud_f, fpfh_num_validation_points * num_samples * fpfh_length * sizeof(double), "fpfh_cloud_f alloc\n");
cu_reorder_rows<<<block_size, threads_per_block>>>(cu_fpfh_cloud_f, cu_fpfh, cu_fpfh_idx, num_samples, fpfh_num_validation_points, fpfh_length, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "reorder rows fpfh!\n" );
//cudaMemcpy(gpu_cloud, cu_fpfh_cloud_f, fpfh_length * fpfh_num_validation_points * num_samples * sizeof(double), cudaMemcpyDeviceToHost);
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_samples * fpfh_num_validation_points * sizeof(double), "fpfh vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "fpfh vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_samples * fpfh_num_validation_points * sizeof(double), "fpfh vis_pmf");
int *cu_xi;
cu_malloc(&cu_xi, num_samples * fpfh_num_validation_points * sizeof(int), "fpfh xi");
int *cu_yi;
cu_malloc(&cu_yi, num_samples * fpfh_num_validation_points * sizeof(int), "fpfh yi");
cu_populate_xi_yi<<<block_size, threads_per_block>>>(cu_xi, cu_yi, cu_cloud, range_image_data, num_samples, fpfh_num_validation_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "fpfh xi yi, vis_pmf!\n" );
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_cloud, cu_normals, cu_xi, cu_yi, range_image, range_image_data, 0, num_samples, fpfh_num_validation_points, NULL, params,
block_size, threads_per_block, block_size_sum, thread_size_sum, 0);
cu_populate_xi_yi<<<block_size, threads_per_block>>>(cu_xi, cu_yi, cu_cloud, fg_range_image_data, num_samples, fpfh_num_validation_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "fpfh xi, yi\n" );
//cudaMemcpy(gpu_xi, cu_xi, num_samples * fpfh_num_validation_points * sizeof(int), cudaMemcpyDeviceToHost);
//cudaMemcpy(gpu_yi, cu_yi, num_samples * fpfh_num_validation_points * sizeof(int), cudaMemcpyDeviceToHost);
double *cu_fpfh_score_individual;
cu_malloc(&cu_fpfh_score_individual, num_samples * fpfh_num_validation_points * sizeof(double), "fpfh_individual");
cu_compute_fpfh_score_individual<<<block_size, threads_per_block>>>(cu_fpfh_score_individual, cu_cloud, cu_fpfh_cloud_f, cu_vis_pmf, cu_xi, cu_yi, fg_range_image_cnt, fg_range_image_idx,
fpfh_obs, fpfh_length, num_samples, fpfh_num_validation_points, cu_params);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cu_add_matrix_rows_medium<<<block_size_sum, thread_size_sum>>>(cu_fpfh_scores, cu_fpfh_score_individual, num_samples, fpfh_num_validation_points, NULL);
cu_compute_fpfh_score_final<<<block_size_small, thread_size_small>>>(cu_fpfh_scores, cu_score_comps, num_samples, b_fpfh, cu_params, round);
cu_free(cu_fpfh_idx, "fpfh_idx");
cu_free(cu_cloud, "fpfh_cloud");
cu_free(cu_normals, "fpfh_normals");
cu_free(cu_fpfh_cloud_f, "fpfh_cloud_f");
cu_free(cu_vis_prob, "fpfh vis_prob");
cu_free(cu_vis_prob_sums, "fpfh vis_prob_sums");
cu_free(cu_vis_pmf, "fpfh vis_pmf");
cu_free(cu_xi, "fpfh xi");
cu_free(cu_yi, "fpfh yi");
cu_free(cu_fpfh_score_individual, "fpfh_score individual");
}
void score_samples(double *scores, scope_sample_t *samples, int num_samples, cu_model_data_t *cu_model, cu_obs_data_t *cu_obs, scope_params_t *cu_params, scope_params_t *params, int num_validation_points,
int model_points, int num_obs_segments, int edge_scoring, int round) {
if (round == 3)
params->num_validation_points = 0;
dim3 threads_per_block(256, 1, 1);
dim3 block_size(ceil(1.0 * num_validation_points / threads_per_block.x), num_samples);
dim3 thread_size_small(64);
dim3 block_size_small(ceil(1.0 * num_samples/thread_size_small.x));
dim3 thread_size_sum(256);
dim3 block_size_sum(1, num_samples);
dim3 thread_size_sum_small(64);
int num_total = num_samples * num_validation_points;
double *cu_samples_x;
cu_malloc(&cu_samples_x, num_samples * 3 * sizeof(double), "samples_x");
double *cu_samples_q;
cu_malloc(&cu_samples_q, num_samples * 4 * sizeof(double), "samples_y");
unpack_x_q(cu_samples_x, cu_samples_q, samples, num_samples);
int i;
int *cu_idx;
cu_malloc(&cu_idx, num_total * sizeof(int), "idxs");
get_validation_points(cu_idx, model_points, num_validation_points, num_samples, block_size, threads_per_block);
// extract transformed model validation features
double *cu_cloud;
cu_malloc(&cu_cloud, 3 * num_total * sizeof(double), "cloud");
cu_get_sub_cloud_at_pose<<<block_size, threads_per_block>>>(cu_cloud, cu_model->points, cu_samples_x, cu_samples_q, cu_idx, num_samples, num_validation_points);
if ( cudaSuccess != cudaGetLastError() )
printf( "Subcloud!\n" );
int *cu_xi;
cu_malloc(&cu_xi, num_total * sizeof(int), "xi");
int *cu_yi;
cu_malloc(&cu_yi, num_total * sizeof(int), "yi");
cu_populate_xi_yi<<<block_size, threads_per_block>>>(cu_xi, cu_yi, cu_cloud, cu_obs->range_image_data, num_samples, num_validation_points, NULL);
double *cu_scores;
cu_malloc(&cu_scores, num_samples * sizeof(double), "scores");
if (round == 1) {
double *cu_scores_ind;
cu_malloc(&cu_scores_ind, num_total * sizeof(double), "scores_ind");
cu_score_round1<<<block_size, threads_per_block>>>(cu_scores_ind, cu_xi, cu_yi, cu_cloud, cu_obs->range_image, num_samples, num_validation_points);
if ( cudaSuccess != cudaGetLastError() )
printf( "Round 1 score!\n" );
cu_add_matrix_rows_medium<<<block_size_sum, thread_size_sum>>>(cu_scores, cu_scores_ind, num_samples, num_validation_points, NULL);
cu_score_round1_final<<<block_size_small, thread_size_small>>>(cu_scores, num_samples, num_validation_points);
cu_free(cu_scores_ind, "scores_ind");
} else {
double *cu_score_comps;
if (round == 3) {
cu_malloc(&cu_score_comps, num_samples * num_components * sizeof(double), "score_comps");
cudaMemset(cu_score_comps, 0, num_samples * num_components * sizeof(double));
}
double *cu_normals;
cu_malloc(&cu_normals, 3 * num_total * sizeof(double), "normals");
cu_get_sub_cloud_normals_rotated<<<block_size, threads_per_block>>>(cu_normals, cu_model->normals, cu_samples_q, cu_idx, num_samples, num_validation_points);
if ( cudaSuccess != cudaGetLastError() )
printf( "Normals!\n" );
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_total * sizeof(double), "vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_total * sizeof(double), "vis_pmf");
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_cloud, cu_normals, cu_xi, cu_yi, cu_obs->range_image, cu_obs->range_image_data, 0, num_samples, num_validation_points, NULL, params,
block_size, threads_per_block, block_size_sum, thread_size_sum, 0);
int *cu_vi;
cu_malloc(&cu_vi, num_samples * sizeof(int), "vi");
cu_get_viewpoints<<<block_size_small, thread_size_small>>>(cu_vi, num_samples, cu_samples_x, cu_samples_q, cu_model->range_edges_model_views);
if ( cudaSuccess != cudaGetLastError() )
printf( "Viewpoints!\n" );
scope_noise_model_t *cu_noise_models;
cu_malloc(&cu_noise_models, num_total * sizeof(scope_noise_model_t), "noise_models");
cu_get_noise_models<<<block_size, threads_per_block>>>(cu_noise_models, cu_cloud, cu_normals, cu_idx, cu_vi, cu_model->ved, cu_model->normalvar, num_samples,
num_validation_points);
if ( cudaSuccess != cudaGetLastError() )
printf( "Noise model!\n" );
// TODO(sanja): Save results before weights kick in
double *cu_xyz_score;
cu_malloc(&cu_xyz_score, num_samples * sizeof(double), "xyz_scores");
compute_xyz_score(cu_xyz_score, cu_score_comps, cu_cloud, cu_xi, cu_yi, cu_vis_pmf, cu_noise_models, cu_obs->range_image, cu_obs->range_image_data, cu_obs->range_image_cnt, num_samples,
num_validation_points, cu_params, round, cu_model->score_comp_models->b_xyz, block_size, threads_per_block, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
double *cu_normal_score;
cu_malloc(&cu_normal_score, num_samples * sizeof(double), "normal_score");
compute_normal_score(cu_normal_score, cu_score_comps, cu_normals, cu_vis_pmf, cu_noise_models, num_samples, num_validation_points, cu_xi, cu_yi, cu_obs->range_image_cnt, cu_obs->range_image_normals,
cu_model->score_comp_models->b_normal, cu_params, round, block_size, threads_per_block, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
double *cu_vis_score;
cu_malloc(&cu_vis_score, num_samples * sizeof(double), "vis_score");
cu_compute_vis_score<<<block_size_small, thread_size_small>>>(cu_vis_score, cu_score_comps, cu_vis_prob_sums, num_validation_points, cu_params, round);
if ( cudaSuccess != cudaGetLastError() )
printf( "vis score!\n" );
double *cu_seg_affinity_score;
cu_malloc(&cu_seg_affinity_score, num_samples * sizeof(double), "seg_aff_per_seg");
cudaMemset(cu_seg_affinity_score, 0, num_samples * sizeof(double));
double *cu_seg_affinity_score_per_seg;
int *cu_mask;
int *cu_num_segments;
int *cu_segments_idx;
double *cu_fpfh_score;
cu_malloc(&cu_fpfh_score, num_samples * sizeof(double), "seg_aff_per_seg");
cudaMemset(cu_fpfh_score, 0, num_samples * sizeof(double));
// TODO(sanja): Figure out how to speed up the prep for segment calculation
if (round >= 3) {
cu_malloc(&cu_seg_affinity_score_per_seg, num_samples * num_obs_segments * sizeof(double), "seg_aff_per_seg");
cu_malloc(&cu_mask, num_samples * num_obs_segments * sizeof(int), "mask");
cudaMemset(cu_mask, 0, num_samples * num_obs_segments * sizeof(int));
int *num_segments;
safe_calloc(num_segments, num_samples, int);
for (i = 0; i < num_samples; ++i) {
num_segments[i] = samples[i].num_segments;
}
cu_malloc(&cu_num_segments, num_samples * sizeof(int), "num_segments");
cudaMemcpy(cu_num_segments, num_segments, num_samples * sizeof(int), cudaMemcpyHostToDevice);
free(num_segments);
int *tmp_segments_idx;
safe_malloc(tmp_segments_idx, num_samples * num_obs_segments, int);
memset(tmp_segments_idx, -1, num_samples * num_obs_segments * sizeof(int));
for (i = 0; i < num_samples; ++i) {
memcpy(&(tmp_segments_idx[i * num_obs_segments]), samples[i].segments_idx, samples[i].num_segments * sizeof(int));
}
cu_malloc(&cu_segments_idx, num_samples * num_obs_segments * sizeof(int), "segments_idx");
cudaMemcpy(cu_segments_idx, tmp_segments_idx, num_samples * num_obs_segments * sizeof(int), cudaMemcpyHostToDevice);
if ( cudaSuccess != cudaGetLastError() )
printf( "seg idx memcpy!\n" );
free(tmp_segments_idx);
dim3 block_size_seg(ceil(1.0 * num_obs_segments / thread_size_sum.x), num_samples);
//cu_set_mask_for_segment_affinity<<<block_size_seg, thread_size_sum>>>(cu_mask, cu_segments_idx, cu_num_segments, num_obs_segments, num_samples);
cu_set_mask_for_segment_affinity<<<block_size_seg, thread_size_sum>>>(cu_mask, cu_segments_idx, cu_num_segments, num_obs_segments, num_samples);
if ( cudaSuccess != cudaGetLastError() )
printf( "seg mask!\n" );
cu_compute_segment_affinity_score_per_seg<<<block_size, thread_size_small>>>(cu_seg_affinity_score_per_seg, cu_segments_idx, cu_num_segments, cu_obs->segment_affinities, num_obs_segments,
cu_mask, num_samples);
if ( cudaSuccess != cudaGetLastError() )
printf( "seg per seg!\n" );
cu_add_matrix_rows_slow<<<block_size_small, thread_size_small>>>(cu_seg_affinity_score, cu_seg_affinity_score_per_seg, num_samples, num_obs_segments, NULL);
cu_compute_segment_affinity_score_final<<<block_size_small, thread_size_small>>>(cu_seg_affinity_score, cu_score_comps, cu_params, round, num_samples);
if ( cudaSuccess != cudaGetLastError() )
printf( "seg affinity!\n" );
/*if (params->use_fpfh)
compute_fpfh_score(cu_fpfh_score, cu_score_comps, cu_model->fpfh_points, cu_model->fpfh_normals, cu_model->fpfh, cu_obs->fpfh_obs, cu_obs->range_image_data, cu_obs->range_image,
cu_obs->fg_range_image_data, cu_obs->fg_range_image_cnt, cu_obs->fg_range_image_idx, cu_samples_x, cu_samples_q, num_samples, params->num_validation_points,
cu_model->score_comp_models->b_fpfh, params, cu_params, round);*/
}
double *cu_edge_scores;
cu_malloc(&cu_edge_scores, num_samples * sizeof(double), "edge_scores");
cudaMemset(cu_edge_scores, 0, num_samples * sizeof(double));
if ( cudaSuccess != cudaGetLastError() )
printf( "memset!\n" );
if (edge_scoring) {
int n_edge = cu_model->max_num_edges;
int *cu_n;
cu_malloc(&cu_n, num_samples * sizeof(int), "n");
dim3 block_size_n_edge(ceil(1.0 * n_edge / thread_size_sum.x), num_samples);
double *cu_P;
cu_malloc(&cu_P, num_samples * n_edge * 3*sizeof(double), "cu_P");
get_range_edge_points(cu_n, cu_P, num_samples, n_edge, cu_n, num_validation_points, cu_model, cu_vi, 0, NULL, NULL, block_size_n_edge, thread_size_sum, block_size_small, thread_size_small);
cu_transform_cloud<<<block_size_n_edge, thread_size_sum>>>(cu_P, cu_P, cu_samples_x, cu_samples_q, num_samples, n_edge, cu_n);
if ( cudaSuccess != cudaGetLastError() )
printf( "transform cloud\n" );
compute_edge_score(cu_edge_scores, cu_score_comps, cu_P, cu_obs->range_image_data, cu_obs->range_image, num_samples, n_edge, cu_n, cu_obs->edge_image, cu_model->score_comp_models->b_edge,
cu_model->score_comp_models->b_edge_occ, params, cu_params, round, block_size_n_edge, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
cu_free(cu_n, "n");
cu_free(cu_P, "P");
}
cu_add_all_scores<<<block_size_small, thread_size_small>>>(cu_scores, cu_xyz_score, cu_normal_score, cu_vis_score, cu_seg_affinity_score, cu_edge_scores, cu_fpfh_score, num_samples);
if ( cudaSuccess != cudaGetLastError() )
printf( "Final addition!\n" );
if (round == 3) {
double *sample_scores;
safe_malloc(sample_scores, num_samples * num_components, double);
if (cudaMemcpy(sample_scores, cu_score_comps, num_samples * num_components * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
printf("sample_scores\n");
}
for (i = 0; i < num_samples; ++i) {
samples[i].num_scores = num_components;
safe_malloc(samples[i].scores, num_components, double);
memcpy(samples[i].scores, &sample_scores[i * num_components], num_components * sizeof(double));
}
}
// NEXT(sanja): Make calls for each score component async.
cu_free(cu_normals, "normals");
cu_free(cu_vis_prob, "vis_prob");
cu_free(cu_vis_prob_sums, "vis_prob_sums");
cu_free(cu_vis_pmf, "vis_pmf");
cu_free(cu_vi, "vi");
cu_free(cu_noise_models, "noise_models");
cu_free(cu_xyz_score, "xyz_scores");
cu_free(cu_normal_score, "normal_scores");
cu_free(cu_vis_score, "vis_score");
cu_free(cu_seg_affinity_score, "seg_aff");
if (round >= 3) {
cu_free(cu_seg_affinity_score_per_seg, "seg_aff_per_seg");
cu_free(cu_mask, "mask");
cu_free(cu_num_segments, "num_segments");
cu_free(cu_segments_idx, "segments_idx");
}
cu_free(cu_edge_scores, "edge_scores");
if (round == 3)
cu_free(cu_score_comps, "score comps");
}
cudaMemcpy(scores, cu_scores, num_samples * sizeof(double), cudaMemcpyDeviceToHost);
cu_free(cu_samples_x, "samples_x"); cu_free(cu_samples_q, "samples_y");
cu_free(cu_idx, "idx");
cu_free(cu_cloud, "cloud");
cu_free(cu_xi, "xi"); cu_free(cu_yi, "yi");
cu_free(cu_scores, "scores");
cudaDeviceSynchronize();
}
__device__ void cu_matrix_cell_gradient(double *g, int i, int j, double *X, int n, int m)
{
if (i == 0)
g[0] = X[1*m + j] - X[0*m + j];
else if (i == n-1)
g[0] = X[(n-1)*m + j] - X[(n-2)*m + j];
else
g[0] = (X[(i+1)*m + j] - X[(i-1) * m + j]) / 2.0;
if (j == 0)
g[1] = X[i*m + 1] - X[i*m + 0];
else if (j == m-1)
g[1] = X[i*m + m-1] - X[i*m + m-2];
else
g[1] = (X[i*m + j+1] - X[i*m + j-1]) / 2.0;
}
// get the jacobian of R*x w.r.t. q
__device__ void cu_point_rotation_jacobian(double out[][4], double *q, double *x)
{
double q1 = q[0];
double q2 = q[1];
double q3 = q[2];
double q4 = q[3];
double v1 = x[0];
double v2 = x[1];
double v3 = x[2];
out[0][0] = 2*(q1*v1 + q3*v3 - q4*v2); out[0][1] = 2*(q2*v1 + q3*v2 + q4*v3); out[0][2] = 2*(q1*v3 + q2*v2 - q3*v1); out[0][3] = 2*(q2*v3 - q1*v2 - q4*v1);
out[1][0] = 2*(q1*v2 - q2*v3 + q4*v1); out[1][1] = 2*(q3*v1 - q2*v2 - q1*v3); out[1][2] = 2*(q2*v1 + q3*v2 + q4*v3); out[1][3] = 2*(q1*v1 + q3*v3 - q4*v2);
out[2][0] = 2*(q1*v3 + q2*v2 - q3*v1); out[2][1] = 2*(q1*v2 - q2*v3 + q4*v1); out[2][2] = 2*(q4*v2 - q3*v3 - q1*v1); out[2][3] = 2*(q2*v1 + q3*v2 + q4*v3);
}
__device__ void cu_range_image_pixel_pose_jacobian(double dij_dxq[][7], cu_range_image_data_t range_image_data, double *model_point, double *x, double *q)
{
// transform model point by model pose (x,q)
double p[3];
double R[3][3];
cu_quaternion_to_rotation_matrix(R,q);
cu_matrix_vec_mult_3(p, R, model_point, 3);
cu_add(p, p, x, 3);
double p1 = p[0];
double p2 = p[1];
double p3 = p[2];
double r = cu_norm(p,3);
// compute jacobian of (i,j) w.r.t. p (and x)
double ci = 1.0 / (p1*p1 + p3*p3);
double cj = r * sqrt(ci);
double dij_dp[2][3] = {{ci*p3, 0, -ci*p1}, {cj*p1*p2, cj*(p2*p2-r*r), cj*p2*p3}};
// compute jacobian of (i,j) w.r.t. q
double dp_dq[3][4];
cu_point_rotation_jacobian(dp_dq, q, model_point);
double dij_dq[2][4];
cu_matrix_mult_2_3_4(dij_dq, dij_dp, dp_dq);
// copy jacobians into dij_dxq
dij_dxq[0][0] = dij_dp[0][0]; dij_dxq[0][1] = dij_dp[0][1]; dij_dxq[0][2] = dij_dp[0][2];
dij_dxq[1][0] = dij_dp[1][0]; dij_dxq[1][1] = dij_dp[1][1]; dij_dxq[1][2] = dij_dp[1][2];
dij_dxq[0][3] = dij_dq[0][0]; dij_dxq[0][4] = dij_dq[0][1]; dij_dxq[0][5] = dij_dq[0][2]; dij_dxq[0][6] = dij_dq[0][3];
dij_dxq[1][3] = dij_dq[1][0]; dij_dxq[1][4] = dij_dq[1][1]; dij_dxq[1][5] = dij_dq[1][2]; dij_dxq[1][6] = dij_dq[1][3];
// divide by range image resolution
cu_mult(dij_dxq[0], dij_dxq[0], 1.0/range_image_data.res, 7);
cu_mult(dij_dxq[1], dij_dxq[1], 1.0/range_image_data.res, 7); // NOTE(sanja): This can probably be one multiplication of length 14, but I am somewhat defensive right now
}
__global__ void cu_edge_score_gradient_individual(double *edge_gradient_score_individual, double *dI_dxq_individual, double *vis_prob, double *vis_pmf, int *xi, int *yi, double *P, double *x, double *q,
cu_double_matrix_t edge_image, cu_range_image_data_t range_image_data, cu_double_matrix_t range_image, int num_samples, int n, int *n_arr) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= n_arr[i])
return;
int i_arr = j + i * n;
edge_gradient_score_individual[i_arr] = 0.0;
int k;
for (k = 0; k < 7; ++k)
dI_dxq_individual[k + 7*i_arr] = 0.0;
if (vis_prob[i_arr] < .01)
return;
if (xi[i_arr] == -1 || yi[i_arr] == -1)
return;
// add pixel edge score to total score
edge_gradient_score_individual[i_arr] = vis_pmf[i_arr] * edge_image.ptr[xi[i_arr] * edge_image.m + yi[i_arr]];
// get edge image gradient at current pixel
double dI_dij[2];
cu_matrix_cell_gradient(dI_dij, xi[i_arr], yi[i_arr], edge_image.ptr, range_image_data.w, range_image_data.h);
// get gradient of pixel location w.r.t. model pose (x,q)
double dij_dxq[2][7];
cu_range_image_pixel_pose_jacobian(dij_dxq, range_image_data, &P[3*i_arr], &x[3 * i], &q[4*i]);
// get gradient of this point's edge score w.r.t. model pose (x,q)
double dI_dxq[7];
cu_vec_matrix_mult_7(dI_dxq, dI_dij, dij_dxq, 2);
cu_mult(&dI_dxq_individual[7*i_arr], dI_dxq, vis_pmf[i_arr], 7);
}
__global__ void cu_edge_gradient_score_final(double *edge_score, double *G_edge, int num_samples, int n_edge, scope_params_t *params, int scope_round) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
double w = (scope_round==2 ? params->score2_edge_weight : params->score3_edge_weight);
cu_mult(&(G_edge[7*i]), &(G_edge[7*i]), w, 7);
edge_score[i] *= w;
}
void edge_score_gradient(double *cu_edge_gradient_score, double *cu_G_edge, double *cu_samples_x, double *cu_samples_q, double *cu_P2, double *cu_P, int num_samples, int n_edge, int *cu_n,
cu_double_matrix_t range_image, cu_range_image_data_t range_image_data, cu_double_matrix_t edge_image, scope_params_t *cu_params, scope_params_t *params, int scope_round,
dim3 block, dim3 thread, dim3 block_size_sum, dim3 thread_size_sum, dim3 block_size_small, dim3 thread_size_small) {
cu_transform_cloud<<<block, thread>>>(cu_P2, cu_P, cu_samples_x, cu_samples_q, num_samples, n_edge, cu_n);
if ( cudaSuccess != cudaGetLastError() )
printf( "cloud transform!\n" );
int *cu_xi;
cu_malloc(&cu_xi, num_samples*n_edge * sizeof(int), "xi");
int *cu_yi;
cu_malloc(&cu_yi, num_samples*n_edge * sizeof(int), "yi");
cu_populate_xi_yi<<<block, thread>>>(cu_xi, cu_yi, cu_P2, range_image_data, num_samples, n_edge, cu_n);
if ( cudaSuccess != cudaGetLastError() )
printf( "populate xi yi!\n" );
// compute visibility of sampled model edges
int vis_pixel_radius = 2;
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_samples * n_edge * sizeof(double), "vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_samples * n_edge * sizeof(double), "vis_pmf");
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_P2, NULL, cu_xi, cu_yi, range_image, range_image_data, vis_pixel_radius, num_samples, n_edge, cu_n, params, block, thread,
// block_size_small, thread_size_small, 1); // HERE!!!
block_size_sum, thread_size_sum, 0);
double *cu_edge_gradient_score_individual;
double *cu_dI_dxq_individual;
cu_malloc(&cu_edge_gradient_score_individual, num_samples * n_edge * sizeof(double), "edge_gradient_score_individual");
cu_malloc(&cu_dI_dxq_individual, num_samples * n_edge * 7 * sizeof(double), "dI_dxq_individual");
cu_edge_score_gradient_individual<<<block, thread>>>(cu_edge_gradient_score_individual, cu_dI_dxq_individual, cu_vis_prob, cu_vis_pmf, cu_xi, cu_yi, cu_P, cu_samples_x, cu_samples_q, edge_image,
range_image_data, range_image, num_samples, n_edge, cu_n);
if ( cudaSuccess != cudaGetLastError() )
printf( "edge score gradient!\n" );
// sum up edge_scores and gradients <--- HERE!!!
//cu_add_matrix_rows_slow<<<block_size_small, thread_size_small>>>(cu_edge_gradient_score, cu_edge_gradient_score_individual, num_samples, n_edge, cu_n);
cu_add_matrix_rows_medium<<<block_size_sum, thread_size_sum>>>(cu_edge_gradient_score, cu_edge_gradient_score_individual, num_samples, n_edge, cu_n);
//cu_add_matrix_3d_slow<<<block_size_small, thread_size_small>>>(cu_G_edge, cu_dI_dxq_individual, num_samples, n_edge, cu_n, 7);
cu_add_matrix_3d_medium<<<block_size_sum, thread_size_sum>>>(cu_G_edge, cu_dI_dxq_individual, num_samples, n_edge, cu_n, 7);
cu_edge_gradient_score_final<<<block_size_small, thread_size_small>>>(cu_edge_gradient_score, cu_G_edge, num_samples, n_edge, cu_params, scope_round);
if ( cudaSuccess != cudaGetLastError() )
printf( "edge gradient final!\n" );
cu_free(cu_xi, "xi");
cu_free(cu_yi, "yi");
cu_free(cu_vis_prob, "vis_prob");
cu_free(cu_vis_prob_sums, "vis_prob_sums");
cu_free(cu_vis_pmf, "vis_pmf");
cu_free(cu_edge_gradient_score_individual, "edge gradient score individual");
cu_free(cu_dI_dxq_individual, "dI_dxq_individual");
}
/*
* get the plane equation coefficients (c[0]*x + c[1]*y + c[2]*z + c[3] = 0) from (point,normal)
*/
__device__ void cu_xyzn_to_plane(double *c, double *point, double *normal)
{
c[0] = normal[0];
c[1] = normal[1];
c[2] = normal[2];
c[3] = -cu_dot(point, normal, 3);
}
__global__ void cu_xyzn_score_gradient_individual(double *xyzn_gradient_score_individual, double *G_xyzn_individual, double *vis_prob, double *vis_pmf, int *cu_xi, int *cu_yi,
double *cloud2, double *cloud, double *normals2, double *normals, double *samples_x, double *samples_q,
cu_int_matrix_t range_image_cnt, cu_double_matrix3d_t range_image_points, cu_double_matrix3d_t range_image_normals, scope_noise_model_t *noise_models,
int num_samples, int num_surface_points, scope_params_t *params, int score_round) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= num_samples || j >= num_surface_points)
return;
int i_arr = i * num_surface_points + j;
xyzn_gradient_score_individual[i_arr] = 0.0;
int k;
for (k = 0; k < 7; ++k)
G_xyzn_individual[7*i_arr + k] = 0.0;
if (vis_prob[i_arr] < .01)
return;
int xi = cu_xi[i_arr];
int yi = cu_yi[i_arr];
// get range image cell
if (xi == -1 || yi == -1)
return;
double range_sigma = params->range_sigma * noise_models[i_arr].range_sigma;
double normal_sigma = params->normal_sigma * noise_models[i_arr].normal_sigma;
double dmax_xyz = 2*range_sigma;
double dmax_normal = 2*normal_sigma;
double d_xyz = dmax_xyz;
double d_normal = dmax_normal;
double c[4]; // range image cell plane coeffs
if (range_image_cnt.ptr[xi * range_image_cnt.m + yi] > 0) {
// get distance from model point to range image cell plane
cu_xyzn_to_plane(c, &range_image_points.ptr[xi*range_image_points.m * range_image_points.p + yi * range_image_points.p],
&range_image_normals.ptr[xi*range_image_normals.m * range_image_normals.p + yi * range_image_normals.p]);
d_xyz = fabs(cu_dot(c, &cloud2[3*i_arr], 3) + c[3]);
//d_xyz /= noise_models[i].range_sigma;
d_xyz = MIN(d_xyz, dmax_xyz);
d_normal = 1.0 - cu_dot(&normals2[3*i_arr], &range_image_normals.ptr[xi*range_image_normals.m * range_image_normals.p + yi * range_image_normals.p], 3);
//d_normal /= noise_models[i].normal_sigma;
d_normal = MIN(d_normal, dmax_normal);
}
double xyz_weight = (score_round == 2 ? params->score2_xyz_weight : params->score3_xyz_weight);
double normal_weight = (score_round == 2 ? params->score2_normal_weight : params->score3_normal_weight);
xyzn_gradient_score_individual[i_arr] += xyz_weight * vis_pmf[i_arr] * log(cu_normpdf(d_xyz, 0, range_sigma));
xyzn_gradient_score_individual[i_arr] += normal_weight * vis_pmf[i_arr] * log(cu_normpdf(d_normal, 0, normal_sigma));
// get gradient of this point's xyz score w.r.t. model pose (x,q)
if (d_xyz < dmax_xyz) {
double dp_dq[3][4];
cu_point_rotation_jacobian(dp_dq, &(samples_q[4*i]), &cloud[3*i_arr]);
double df_dp[3];
df_dp[0] = c[0]; df_dp[1] = c[1]; df_dp[2] = c[2];
//double rs = range_sigma * noise_models[i].range_sigma;
cu_mult(df_dp, df_dp, -(c[3] + cu_dot(&cloud2[3*i_arr], c, 3)) / (range_sigma*range_sigma), 3);
G_xyzn_individual[7*i_arr + 0] = df_dp[0]; G_xyzn_individual[7*i_arr + 1] = df_dp[1]; G_xyzn_individual[7*i_arr + 2] = df_dp[2];
cu_vec_matrix_mult_4(&G_xyzn_individual[7*i_arr + 3], df_dp, dp_dq, 3);
cu_mult(&G_xyzn_individual[7*i_arr], &G_xyzn_individual[7*i_arr], xyz_weight * vis_pmf[i_arr], 7);
}
// get gradient of this point's normal score w.r.t. model pose (x,q)
if (d_normal < dmax_normal) {
double dpn_dq[3][4];
cu_point_rotation_jacobian(dpn_dq, &(samples_q[4*i]), &normals[3*i_arr]);
double df_dpn[3];
df_dpn[0] = c[0]; df_dpn[1] = c[1]; df_dpn[2] = c[2];
//double ns = normal_sigma * noise_models[i].normal_sigma;
cu_mult(df_dpn, df_dpn, (1 - cu_dot(&normals2[3*i_arr], c, 3)) / (normal_sigma*normal_sigma), 3);
double G_normal[7] = {0,0,0,0,0,0,0};
cu_vec_matrix_mult_4(&G_normal[3], df_dpn, dpn_dq, 3);
cu_mult(G_normal, G_normal, normal_weight * vis_pmf[i_arr], 7);
cu_add(&G_xyzn_individual[7*i_arr], &G_xyzn_individual[7*i_arr], G_normal, 7);
}
}
__global__ void cu_matrix_add(double *cu_G, double *cu_G_edge, double *cu_G_xyzn, int num_samples, int m) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
for (int k = 0; k < m; ++k) {
cu_G[m * i + k] = cu_G_edge[m * i + k] + cu_G_xyzn[m * i + k];
}
}
__global__ void cu_init_arr(double *cu_best_score, double init_val, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_best_score[i] = init_val;
}
void xyzn_score_gradient(double *cu_xyzn_gradient_score, double *cu_G_xyzn, double *cu_samples_x, double *cu_samples_q, double *cu_cloud2, double *cu_cloud, double *cu_normals2, double *cu_normals,
int transform, scope_noise_model_t *cu_noise_models, cu_range_image_data_t range_image_data, cu_double_matrix_t range_image, cu_int_matrix_t range_image_cnt,
cu_double_matrix3d_t range_image_points, cu_double_matrix3d_t range_image_normals,
int num_samples, int num_surface_points, scope_params_t *cu_params, scope_params_t *params, int score_round,
dim3 block_size, dim3 threads_per_block, dim3 block_size_small, dim3 thread_size_small, dim3 block_size_sum, dim3 thread_size_sum) {
if (transform) {
cu_transform_cloud<<<block_size, threads_per_block>>>(cu_cloud2, cu_cloud, cu_samples_x, cu_samples_q, num_samples, num_surface_points, NULL);
cu_transform_cloud<<<block_size, threads_per_block>>>(cu_normals2, cu_normals, NULL, cu_samples_q, num_samples, num_surface_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "transform cloud!\n" );
}
int *cu_xi;
cu_malloc(&cu_xi, num_samples*num_surface_points * sizeof(int), "xi");
int *cu_yi;
cu_malloc(&cu_yi, num_samples*num_surface_points * sizeof(int), "yi");
cu_populate_xi_yi<<<block_size, threads_per_block>>>(cu_xi, cu_yi, cu_cloud2, range_image_data, num_samples, num_surface_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "populate xi yi xyzn gradient!\n" );
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_samples * num_surface_points * sizeof(double), "vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_samples * num_surface_points * sizeof(double), "vis_pmf");
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_cloud2, cu_normals2, cu_xi, cu_yi, range_image, range_image_data, 0, num_samples, num_surface_points, NULL, params,
block_size, threads_per_block, block_size_sum, thread_size_sum, 0);
double *cu_xyzn_gradient_score_individual;
cu_malloc(&cu_xyzn_gradient_score_individual, num_samples * num_surface_points * sizeof(double), "xyzn_score_gradient_individual");
double *cu_G_xyzn_individual;
cu_malloc(&cu_G_xyzn_individual, num_samples * num_surface_points * 7 * sizeof(double), "G_xyzn_individual");
cu_xyzn_score_gradient_individual<<<block_size, threads_per_block>>>(cu_xyzn_gradient_score_individual, cu_G_xyzn_individual, cu_vis_prob, cu_vis_pmf, cu_xi, cu_yi, cu_cloud2, cu_cloud, cu_normals2,
cu_normals, cu_samples_x, cu_samples_q, range_image_cnt, range_image_points, range_image_normals, cu_noise_models,
num_samples, num_surface_points, cu_params, score_round);
if ( cudaSuccess != cudaGetLastError() )
printf( "xyzn gradient individual!\n" );
// sum up xyzn_scores and gradients
//cu_add_matrix_rows_slow<<<block_size_small, thread_size_small>>>(cu_xyzn_gradient_score, cu_xyzn_gradient_score_individual, num_samples, num_surface_points, NULL); // HERE
cu_add_matrix_rows_medium<<<block_size_sum, thread_size_sum>>>(cu_xyzn_gradient_score, cu_xyzn_gradient_score_individual, num_samples, num_surface_points, NULL);
//cu_add_matrix_3d_slow<<<block_size_small, thread_size_small>>>(cu_G_xyzn, cu_G_xyzn_individual, num_samples, num_surface_points, NULL, 7); // HERE!!!
cu_add_matrix_3d_medium<<<block_size_sum, thread_size_sum>>>(cu_G_xyzn, cu_G_xyzn_individual, num_samples, num_surface_points, NULL, 7);
if ( cudaSuccess != cudaGetLastError() )
printf( "add mat 3d slow!\n" );
cu_free(cu_xi, "xi");
cu_free(cu_yi, "yi");
cu_free(cu_vis_prob, "vis_prob");
cu_free(cu_vis_prob_sums, "vis_prob_sums");
cu_free(cu_vis_pmf, "vis_pmf");
cu_free(cu_xyzn_gradient_score_individual, "edge gradient score individual");
cu_free(cu_G_xyzn_individual, "dI_dxq_individual");
}
__global__ void cu_normalize_matrix_rows(double *cu_out, double *cu_in, int num_samples, int width) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_normalize(&(cu_out[i*width]), &(cu_in[i*width]), width);
}
__global__ void cu_mult_matrix_rows(double *cu_out, double *cu_in, double mult, double *cu_mult_arr, int num_samples, int width) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
double f = mult;
if (cu_mult_arr)
f *= cu_mult_arr[i];
cu_mult(&(cu_out[i * width]), &(cu_in[i*width]), f, width);
}
__global__ void cu_add_dxq(double *cu_x2, double *cu_q2, double *cu_samples_x, double *cu_samples_q, double *cu_dxq, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
cu_x2[3*i] = cu_samples_x[3*i] + cu_dxq[7*i]; cu_x2[3*i + 1] = cu_samples_x[3*i + 1] + cu_dxq[7*i + 1]; cu_x2[3*i + 2] = cu_samples_x[3*i + 2] + cu_dxq[7*i + 2];
cu_q2[4*i] = cu_samples_q[4*i] + cu_dxq[7*i + 3]; cu_q2[4*i + 1] = cu_samples_q[4*i + 1] + cu_dxq[7*i + 4];
cu_q2[4*i + 2] = cu_samples_q[4*i + 2] + cu_dxq[7*i + 5]; cu_q2[4*i + 3] = cu_samples_q[4*i + 3] + cu_dxq[7*i + 6];
}
__global__ void cu_update_best(double *cu_best_score, double *cu_best_x, double *cu_best_q, double *cu_best_step, double *cu_scores, double *cu_x2, double *cu_q2, double *cu_step, double step, int num_samples) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_samples)
return;
if (cu_best_score[i] < cu_scores[i]) {
cu_best_score[i] = cu_scores[i];
cu_best_x[3*i] = cu_x2[3*i]; cu_best_x[3*i+1] = cu_x2[3*i+1]; cu_best_x[3*i+2] = cu_x2[3*i+2];
cu_best_q[4*i] = cu_q2[4*i]; cu_best_q[4*i+1] = cu_q2[4*i+1]; cu_best_q[4*i+2] = cu_q2[4*i+2]; cu_best_q[4*i+3] = cu_q2[4*i+3];
cu_best_step[i] = step * cu_step[i];
}
}
void align_models_gradient(scope_sample_t *samples, int num_samples, cu_model_data_t *cu_model, cu_obs_data_t *cu_obs, scope_params_t *cu_params, scope_params_t *params,
int num_points, int model_points, int round) {
if (num_samples == 0) {
printf("Align models, no samples!\n");
}
//TODO(sanja): These dim3s need some serious reorganizing/renaming
int num_surface_points = (num_points > 0 ? num_points : model_points);
dim3 threads_per_block(256, 1, 1);
dim3 block_size(ceil(1.0 * num_surface_points / threads_per_block.x), num_samples);
dim3 thread_size_small(64);
dim3 block_size_small(ceil(1.0 * num_samples/thread_size_small.x));
dim3 thread_size_sum(256);
dim3 block_size_sum(1, num_samples);
dim3 thread_size_sum_small(64);
//TODO: make these params
int max_iter = 20;
// unpack args
double *cu_samples_x;
cu_malloc(&cu_samples_x, num_samples * 3 * sizeof(double), "samples_x");
double *cu_samples_q;
cu_malloc(&cu_samples_q, num_samples * 4 * sizeof(double), "samples_y");
unpack_x_q(cu_samples_x, cu_samples_q, samples, num_samples);
int n_edge = cu_model->max_num_edges;
int *cu_n;
cu_malloc(&cu_n, num_samples * sizeof(int), "n");
dim3 block_size_n_edge(ceil(1.0 * n_edge / thread_size_sum.x), num_samples);
double *cu_P;
cu_malloc(&cu_P, num_samples * n_edge * 3*sizeof(double), "cu_P");
int *cu_vi;
cu_malloc(&cu_vi, num_samples * sizeof(int), "vi");
get_range_edge_points(cu_n, cu_P, num_samples, n_edge, cu_n, num_points, cu_model, cu_vi, 1, cu_samples_x, cu_samples_q, block_size_n_edge, thread_size_sum, block_size_small, thread_size_small);
double *cu_P2;
cu_malloc(&cu_P2, num_samples * n_edge * 3*sizeof(double), "cu_P2");
// get model surface points
int *cu_idx;
cu_malloc(&cu_idx, num_samples * num_surface_points * sizeof(int), "idxs");
get_validation_points(cu_idx, model_points, num_surface_points, num_samples, block_size, threads_per_block);
double *cu_cloud, *cu_cloud2, *cu_normals, *cu_normals2;
cu_malloc(&cu_cloud, num_samples * num_surface_points * 3 * sizeof(double), "cloud");
cu_malloc(&cu_cloud2, num_samples * num_surface_points * 3 * sizeof(double), "cloud2");
cu_malloc(&cu_normals, num_samples * num_surface_points * 3 * sizeof(double), "normals");
cu_malloc(&cu_normals2, num_samples * num_surface_points * 3 * sizeof(double), "normals2");
cu_reorder_rows<<<block_size, threads_per_block>>>(cu_cloud, cu_model->points, cu_idx, num_samples, num_surface_points, 3, NULL);
cu_reorder_rows<<<block_size, threads_per_block>>>(cu_normals, cu_model->normals, cu_idx, num_samples, num_surface_points, 3, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "reorder rows!\n" );
scope_noise_model_t *cu_noise_models;
cu_malloc(&cu_noise_models, num_samples * num_surface_points * sizeof(scope_noise_model_t), "noise_models");
double *cu_G_edge, *cu_G_xyzn, *cu_G;
cu_malloc(&cu_G_edge, 7*num_samples * sizeof(double), "G_edge");
cu_malloc(&cu_G_xyzn, 7*num_samples * sizeof(double), "G_xyzn");
cu_malloc(&cu_G, 7*num_samples * sizeof(double), "G");
double *cu_edge_gradient_score;
cu_malloc(&cu_edge_gradient_score, num_samples * sizeof(double), "edge_gradient_score");
double *cu_xyzn_gradient_score;
cu_malloc(&cu_xyzn_gradient_score, num_samples * sizeof(double), "edge_gradient_score");
int *cu_xi;
cu_malloc(&cu_xi, num_samples*num_surface_points * sizeof(int), "xi");
int *cu_yi;
cu_malloc(&cu_yi, num_samples*num_surface_points * sizeof(int), "yi");
double *cu_vis_prob;
cu_malloc(&cu_vis_prob, num_samples * num_surface_points * sizeof(double), "vis_prob");
double *cu_vis_prob_sums;
cu_malloc(&cu_vis_prob_sums, num_samples * sizeof(double), "vis_prob_sums");
double *cu_vis_pmf;
cu_malloc(&cu_vis_pmf, num_samples * num_surface_points * sizeof(double), "vis_pmf");
double *cu_edge_scores, *cu_xyz_scores, *cu_normal_scores, *cu_scores;
cu_malloc(&cu_edge_scores, num_samples * sizeof(double), "edge_scores");
cu_malloc(&cu_xyz_scores, num_samples * sizeof(double), "xyz_scores");
cu_malloc(&cu_normal_scores, num_samples * sizeof(double), "normal_scores");
cu_malloc(&cu_scores, num_samples * sizeof(double), "scores");
double *cu_x2, *cu_q2, *cu_dxq;
cu_malloc(&cu_x2, num_samples * 3 * sizeof(double), "x2");
cu_malloc(&cu_q2, num_samples * 4 * sizeof(double), "q2");
cu_malloc(&cu_dxq, num_samples * 7 * sizeof(double), "dxq");
double step = .01; // step size in gradient ascent
double step_mult[3] = {.6, 1, 1.6};
double *cu_step;
cu_malloc(&cu_step, num_samples * sizeof(double), "step");
cu_init_arr<<<block_size_small, thread_size_small>>>(cu_step, step, num_samples);
double *cu_best_score, *cu_best_step;
cu_malloc(&cu_best_score, num_samples * sizeof(double), "best_score");
cu_malloc(&cu_best_step, num_samples * sizeof(double), "best_step");
double *cu_best_x, *cu_best_q;
cu_malloc(&cu_best_x, num_samples * 3 * sizeof(double), "best_x");
cu_malloc(&cu_best_q, num_samples * 4 * sizeof(double), "best_q");
int j, iter;
double init_val = -10000000.0;
for (iter = 0; iter < max_iter; iter++) {
cu_transform_cloud<<<block_size, threads_per_block>>>(cu_cloud2, cu_cloud, cu_samples_x, cu_samples_q, num_samples, num_surface_points, NULL);
cu_transform_cloud<<<block_size, threads_per_block>>>(cu_normals2, cu_normals, NULL, cu_samples_q, num_samples, num_surface_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "transform cloud!\n" );
cu_get_viewpoints<<<block_size_small, thread_size_small>>>(cu_vi, num_samples, cu_samples_x, cu_samples_q, cu_model->range_edges_model_views);
cu_get_noise_models<<<block_size, threads_per_block>>>(cu_noise_models, cu_cloud2, cu_normals2, cu_idx, cu_vi, cu_model->ved, cu_model->normalvar, num_samples, num_surface_points);
if ( cudaSuccess != cudaGetLastError() )
printf( "get noise models!\n" );
cudaMemset(cu_G_edge, 0, 7*num_samples * sizeof(double));
cudaMemset(cu_G_xyzn, 0, 7*num_samples * sizeof(double));
edge_score_gradient(cu_edge_gradient_score, cu_G_edge, cu_samples_x, cu_samples_q, cu_P2, cu_P, num_samples, n_edge, cu_n, cu_obs->range_image, cu_obs->range_image_data, cu_obs->edge_image,
cu_params, params, round, block_size_n_edge, thread_size_sum, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
xyzn_score_gradient(cu_xyzn_gradient_score, cu_G_xyzn, cu_samples_x, cu_samples_q, cu_cloud2, cu_cloud, cu_normals2, cu_normals, 0, cu_noise_models, cu_obs->range_image_data, cu_obs->range_image,
cu_obs->range_image_cnt, cu_obs->range_image_points, cu_obs->range_image_normals, num_samples, num_surface_points, cu_params, params, round,
block_size, threads_per_block, block_size_small, thread_size_small, block_size_sum, thread_size_sum);
cu_matrix_add<<<block_size_small, thread_size_small>>>(cu_G, cu_G_edge, cu_G_xyzn, num_samples, 7);
cu_init_arr<<<block_size_small, thread_size_small>>>(cu_best_score, init_val, num_samples);
if ( cudaSuccess != cudaGetLastError() )
printf( "init arr!\n" );
cudaMemset(cu_best_step, 0, num_samples * sizeof(double));
for (j = 0; j < 3; ++j) {
// take a step in the direction of the gradient
cu_normalize_matrix_rows<<<block_size_small, thread_size_small>>>(cu_G, cu_G, num_samples, 7);
cu_mult_matrix_rows<<<block_size_small, thread_size_small>>>(cu_dxq, cu_G, step_mult[j], cu_step, num_samples, 7);
cu_add_dxq<<<block_size_small, thread_size_small>>>(cu_x2, cu_q2, cu_samples_x, cu_samples_q, cu_dxq, num_samples);
cu_normalize_matrix_rows<<<block_size_small, thread_size_small>>>(cu_q2, cu_q2, num_samples, 4);
if ( cudaSuccess != cudaGetLastError() )
printf( "prep stuff for step direction!\n" );
cu_transform_cloud<<<block_size, threads_per_block>>>(cu_cloud2, cu_cloud, cu_x2, cu_q2, num_samples, num_surface_points, NULL);
cu_transform_cloud<<<block_size, threads_per_block>>>(cu_normals2, cu_normals, NULL, cu_q2, num_samples, num_surface_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "transform clouds!\n" );
cu_populate_xi_yi<<<block_size, threads_per_block>>>(cu_xi, cu_yi, cu_cloud2, cu_obs->range_image_data, num_samples, num_surface_points, NULL);
if ( cudaSuccess != cudaGetLastError() )
printf( "populate xi yi!\n" );
get_vis_prob_sums_and_pmf(cu_vis_prob, cu_vis_prob_sums, cu_vis_pmf, cu_cloud2, cu_normals2, cu_xi, cu_yi, cu_obs->range_image, cu_obs->range_image_data, 0, num_samples, num_surface_points, NULL, params,
block_size, threads_per_block, block_size_sum, thread_size_sum, 0);
// transform edge points
cu_transform_cloud<<<block_size_n_edge, thread_size_sum>>>(cu_P2, cu_P, cu_x2, cu_q2, num_samples, n_edge, cu_n);
if ( cudaSuccess != cudaGetLastError() )
printf( "transform edge!\n" );
// evaluate the score
compute_edge_score(cu_edge_scores, NULL, cu_P2, cu_obs->range_image_data, cu_obs->range_image, num_samples, n_edge, cu_n, cu_obs->edge_image, cu_model->score_comp_models->b_edge,
cu_model->score_comp_models->b_edge_occ, params, cu_params, round, block_size_n_edge, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
compute_xyz_score(cu_xyz_scores, NULL, cu_cloud2, cu_xi, cu_yi, cu_vis_pmf, cu_noise_models, cu_obs->range_image, cu_obs->range_image_data, cu_obs->range_image_cnt, num_samples, num_surface_points,
cu_params, round, cu_model->score_comp_models->b_xyz, block_size, threads_per_block, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
compute_normal_score(cu_normal_scores, NULL, cu_normals2, cu_vis_pmf, cu_noise_models, num_samples, num_surface_points, cu_xi, cu_yi, cu_obs->range_image_cnt, cu_obs->range_image_normals,
cu_model->score_comp_models->b_normal, cu_params, round, block_size, threads_per_block, block_size_sum, thread_size_sum, block_size_small, thread_size_small);
cu_add_3_scores<<<block_size_small, thread_size_small>>>(cu_scores, cu_xyz_scores, cu_normal_scores, cu_edge_scores, num_samples);
if ( cudaSuccess != cudaGetLastError() )
printf( "add 3 scores!\n" );
cu_update_best<<<block_size_small, thread_size_small>>>(cu_best_score, cu_best_x, cu_best_q, cu_best_step, cu_scores, cu_x2, cu_q2, cu_step, step_mult[j], num_samples);
if ( cudaSuccess != cudaGetLastError() )
printf( "update best!\n" );
}
cudaMemcpy(cu_samples_x, cu_best_x, 3*num_samples*sizeof(double), cudaMemcpyDeviceToDevice);
cudaMemcpy(cu_samples_q, cu_best_q, 4*num_samples*sizeof(double), cudaMemcpyDeviceToDevice);
cudaMemcpy(cu_step, cu_best_step, num_samples * sizeof(double), cudaMemcpyDeviceToDevice); // NOTE(sanja): According to StackOverflow, might be expensive
}
double **samples_x = new_matrix2(num_samples, 3);
double **samples_q = new_matrix2(num_samples, 4);
cudaMemcpy(samples_x[0], cu_samples_x, 3 * num_samples * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(samples_q[0], cu_samples_q, 4 * num_samples * sizeof(double), cudaMemcpyDeviceToHost);
int i;
for (i = 0; i < num_samples; ++i) {
memcpy(samples[i].x, samples_x[i], 3 * sizeof(double));
}
for (i = 0; i < num_samples; ++i) {
memcpy(samples[i].q, samples_q[i], 4 * sizeof(double));
}
free_matrix2(samples_x);
free_matrix2(samples_q);
cu_free(cu_samples_x, "x free");
cu_free(cu_samples_q, "q free");
cu_free(cu_n, "n free");
cu_free(cu_P, "P free");
cu_free(cu_vi, "vi free");
cu_free(cu_P2, "P2 free");
cu_free(cu_idx, "idx free");
cu_free(cu_cloud, "cloud free");
cu_free(cu_cloud2, "cloud2 free");
cu_free(cu_normals, "normals free");
cu_free(cu_normals2, "normals2 free");
cu_free(cu_G_edge, "G_edge free");
cu_free(cu_G_xyzn, "G_xyzn free");
cu_free(cu_G, "G free");
cu_free(cu_edge_gradient_score, "edge_gradient_score");
cu_free(cu_xyzn_gradient_score, "edge_gradient_score");
cu_free(cu_xi, "xi");
cu_free(cu_yi, "yi");
cu_free(cu_vis_prob, "vis_prob");
cu_free(cu_vis_prob_sums, "vis_prob_sums");
cu_free(cu_vis_pmf, "vis_pmf");
cu_free(cu_edge_scores, "edge_scores");
cu_free(cu_xyz_scores, "xyz_scores");
cu_free(cu_normal_scores, "normal_scores");
cu_free(cu_scores, "scores");
cu_free(cu_best_score, "best_score");
cu_free(cu_best_step, "best_step");
cu_free(cu_best_x, "best_x");
cu_free(cu_best_q, "best_q");
cu_free(cu_step, "step");
cudaDeviceSynchronize();
}
__global__ void cu_knn(float *nn_d2, int *nn_idx, double *ref, double *query, int ref_n, int query_n, int d, int k) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= query_n || j >= ref_n)
return;
__shared__ float shared_d2[THREADS_KNN * KNN_SIZE];
__shared__ int shared_idx[THREADS_KNN * KNN_SIZE];
__shared__ float tmp_d2[THREADS_KNN * KNN_SIZE / 2];
__shared__ int tmp_idx[THREADS_KNN * KNN_SIZE / 2];
for (int k = 0; k < KNN_SIZE; ++k) {
shared_d2[j * KNN_SIZE + k] = 1000000.0;
shared_idx[j * KNN_SIZE + k] = -1;
}
// Initialize the arrays
float dist;
int last = 0;
for (int kk = j; kk < ref_n; kk += blockDim.x) {
dist = 0.0;
for (int l = 0; l < d; ++l)
dist += (ref[d * kk + l] - query[d * i + l]) * (ref[d * kk + l] - query[d * i + l]);
if (last == KNN_SIZE && dist >= shared_d2[j * KNN_SIZE + last-1])
continue;
if (last < KNN_SIZE)
++last;
if (last < KNN_SIZE || (last == KNN_SIZE && shared_d2[j * KNN_SIZE + last-1] > dist)) {
shared_d2[j * KNN_SIZE + last-1] = dist;
shared_idx[j * KNN_SIZE + last-1] = kk;
}
for (int l = last-1; l > 0; --l) {
if (shared_d2[j * KNN_SIZE +l] < shared_d2[j * KNN_SIZE +l-1]) {
float tmp_d2 = shared_d2[j * KNN_SIZE + l]; shared_d2[j * KNN_SIZE + l] = shared_d2[j * KNN_SIZE + l-1]; shared_d2[j * KNN_SIZE + l-1] = tmp_d2;
int tmp_idx = shared_idx[j * KNN_SIZE + l]; shared_idx[j * KNN_SIZE + l] = shared_idx[j * KNN_SIZE + l-1]; shared_idx[j * KNN_SIZE + l-1] = tmp_idx;
}
}
}
__syncthreads();
// Merge partial queues
for (int num_threads = blockDim.x / 2; num_threads > 0; num_threads >>= 1) {
// Merge two partial queues
if (j < num_threads) {
int a = 0, b = 0;
int other = j + num_threads;
int c = 0;
while (a < KNN_SIZE && b < KNN_SIZE && c < KNN_SIZE) {
if (shared_d2[j * KNN_SIZE + a] < shared_d2[other * KNN_SIZE + b]) {
tmp_d2[j * KNN_SIZE + c] = shared_d2[j * KNN_SIZE + a];
tmp_idx[j * KNN_SIZE + c++] = shared_idx[j * KNN_SIZE + a++];
} else {
tmp_d2[j * KNN_SIZE + c] = shared_d2[other * KNN_SIZE + b];
tmp_idx[j * KNN_SIZE + c++] = shared_idx[other * KNN_SIZE + b++];
}
}
// Because we only have KNN_SIZE things in tmp, once one array runs out, we are done.
// Copy the final list into the correct shared memory location
for (a = 0; a < KNN_SIZE; ++a) {
shared_d2[j * KNN_SIZE + a] = tmp_d2[j * KNN_SIZE + a];
shared_idx[j * KNN_SIZE + a] = tmp_idx[j * KNN_SIZE + a];
}
}
__syncthreads();
}
// Copy data from shared memory into the global memory
if (j < KNN_SIZE) {
nn_d2[i * KNN_SIZE + j] = shared_d2[0 * KNN_SIZE + j];
nn_idx[i * KNN_SIZE + j] = shared_idx[0 * KNN_SIZE + j];
}
}
void knn(float *nn_d2, int *nn_idx, double *reference, double *query, int ref_n, int d, int k, int start, int batch_size) {
dim3 block_size(1, batch_size, 1);
dim3 threads_per_block(THREADS_KNN, 1, 1);
double *cu_ref;
double *cu_query;
cu_malloc(&cu_ref, ref_n * d * sizeof(double), "ref");
cu_malloc(&cu_query, batch_size * d * sizeof(double), "query");
cudaMemcpy(cu_ref, reference, ref_n * d * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(cu_query, query, batch_size * d * sizeof(double), cudaMemcpyHostToDevice);
float *cu_nn_d2;
int *cu_nn_idx;
cu_malloc(&cu_nn_d2, batch_size * k * sizeof(float), "nn_d2");
cu_malloc(&cu_nn_idx, batch_size * k * sizeof(int), "nn_idx");
double t_tmp = get_time_ms();
cu_knn<<<block_size, threads_per_block>>>(cu_nn_d2, cu_nn_idx, cu_ref, cu_query, ref_n, batch_size, d, k);
if ( cudaSuccess != cudaGetLastError() )
printf( "knn!\n" );
cudaMemcpy(nn_d2, cu_nn_d2, batch_size * k * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(nn_idx, cu_nn_idx, batch_size * k * sizeof(int), cudaMemcpyDeviceToHost);
cu_free(cu_ref, "ref");
cu_free(cu_query, "query");
cu_free(cu_nn_d2, "nn_d2");
cu_free(cu_nn_idx, "nn_idx");
}
void testAdd3d() {
double *A, *B;
int side = 100;
int total = side*side*side;
safe_malloc(A, total, double);
safe_malloc(B, side*side, double);
for (int i = 0; i < total; ++i) {
A[i] = 1;
}
double *cu_A;
double *cu_B;
cu_malloc(&cu_A, total * sizeof(double), "a");
cudaMemcpy(cu_A, A, total * sizeof(double), cudaMemcpyHostToDevice);
cu_malloc(&cu_B, side*side * sizeof(double), "a");
dim3 block(1, side, 1);
dim3 thread(side/2, 1, 1);
cu_add_matrix_3d_medium<<<block, thread>>>(cu_B, cu_A, side, side, NULL, side);
//cu_add_matrix_3d_slow<<<side, 1>>>(cu_B, cu_A, side, side, NULL, side);
cudaMemcpy(B, cu_B, side*side*sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < side; ++i) {
for (int j = 0; j < side; ++j) {
printf("%lf ", B[i * side + j]);
}
printf("\n\n");
}
}
void sample_all_first_fpfh_correspondences(scope_sample_t *samples, int *num_samples_init, int num_samples, scope_model_data_t *model_data, scope_obs_data_t *obs_data, scope_params_t *params) {
extern double knn_t; // dbug
knn_t = 0.0;
extern int knn_calls;
knn_calls = 0;
int batch_size = 200;
params->knn = KNN_SIZE;
int nn_idx[params->knn * batch_size];
float nn_d2[params->knn * batch_size];
int i;
double t0 = get_time_ms();
// Create random permutation of points to avoid continuous sampling
int idx[obs_data->fpfh_obs->num_points];
for (i = 0; i < obs_data->fpfh_obs->num_points; i++) {
idx[i] = i;
}
randperm(idx, obs_data->fpfh_obs->num_points, obs_data->fpfh_obs->num_points);
int picked = 0;
int start = 0;
int width = obs_data->fpfh_obs->fpfh_length;
while (picked < num_samples && start < obs_data->fpfh_obs->num_points) {
if (start + batch_size > obs_data->fpfh_obs->num_points) {
batch_size = obs_data->fpfh_obs->num_points - start;
}
double query_pts[batch_size * width];
for (i = 0; i < batch_size; ++i) {
memcpy(&query_pts[i * width], obs_data->fpfh_obs->fpfh[idx[i + start]], width * sizeof(double));
}
t0 = get_time_ms();
knn(nn_d2, nn_idx, model_data->fpfh_model->fpfh[0], query_pts, model_data->fpfh_model->num_points, width, params->knn, start, batch_size);
cudaDeviceSynchronize();
knn_t += get_time_ms() - t0;
knn_calls += batch_size;
for (i = 0; i < batch_size && picked < num_samples; ++i) {
if (nn_d2[i * params->knn] < params->f_sigma * params->f_sigma) {
int c_obs = idx[i + start];
double p[params->knn];
int j;
for (j = 0; j < params->knn; j++)
p[j] = exp(-.5*nn_d2[i * params->knn + j] / (params->f_sigma * params->f_sigma));
normalize_pmf(p, p, params->knn);
j = pmfrand(p, params->knn);
int c_model = nn_idx[i * params->knn + j];
samples[picked].c_obs[0] = c_obs;
samples[picked].c_model[0] = c_model;
samples[picked].c_type[0] = C_TYPE_FPFH;
samples[picked].nc = 1;
// compute correspondence score
samples[picked].c_score[0] = log(normpdf(sqrt(nn_d2[i * params->knn + j]), 0, params->f_sigma));
++picked;
}
}
start += batch_size;
}
*num_samples_init = picked; // In case we terminated early because we ran out of good points to sample
}
|
ba51badbfaf5e21ba6ee18f5563117720a8f9c26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include <stdint.h>
#include<stdio.h>
#include<fstream>
#include <stdlib.h>
#include <malloc.h>
using namespace std;
#define REPEAT 1
#define STRIDE 1
#define CACHELINE 8
#define ALLIGNMENT 64
typedef unsigned long long Dtype;
//typedef double Dtype;
//typedef int Dtype;
__global__ void VecAdd(Dtype** A, int* N, unsigned long long* d_time, Dtype* xj, Dtype* xi);
int gcf(int a, int b)
{
if (a == 0) return b;
return gcf(b % a, a);
}
int main(int argc, char* argv[])
{
if(argc != 3)
{
std::cout << "Wrong number of argument!! Exiting program !!!";
return 0;
}
// struct timeval tv1, tv2;
int N = atoi(argv[1]);
int stride = atoi(argv[2]);
unsigned long long *d_time, h_time;
Dtype *xj, *xi;
Dtype *h_A, **d_A;
int *d_N;
std::ofstream fp;
srand (time(NULL));
fp.open("/home/hpc/ihpc/ihpc002h/gpu-exp/mThesis/exp5/data/result.txt", std::ofstream::app);
h_A = (Dtype*)memalign(ALLIGNMENT,(N+2)*sizeof(Dtype));
hipMalloc(&d_A, (N+2)*sizeof(Dtype));
hipMalloc(&d_time, sizeof(unsigned long long));
hipMalloc(&xj, sizeof(Dtype));
hipMalloc(&xi, sizeof(Dtype));
hipMalloc(&d_N, sizeof(int));
//int step = gcf (STRIDE, N);
for(unsigned int i=0; i < N ; i++)
{
//stride = rand()%20;
h_A[i] = ((Dtype)(uintptr_t)d_A) + ( (i + stride) % N)*sizeof(Dtype);
// h_A[i] = i+1;
}
h_A[N]=0.0;
h_A[N+1]=0.0;
hipMemcpy(d_A, h_A, (N+2)*sizeof(Dtype), hipMemcpyHostToDevice );
hipMemcpy(d_N, &N, sizeof(int), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( VecAdd), dim3(1),dim3(1), 0, 0, d_A, d_N, d_time, xj, xi);
hipMemcpy(&h_time, d_time, sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
fp << N*8.0/1024.0 << " " << h_time << std::endl;
for(int i =0; i < N ; i++)
{
//printf("%f ",(h_A[i]));
}
hipFree(d_A);
hipFree(d_time);
free(h_A);
fp.close();
}
| ba51badbfaf5e21ba6ee18f5563117720a8f9c26.cu | #include<iostream>
#include <stdint.h>
#include<stdio.h>
#include<fstream>
#include <stdlib.h>
#include <malloc.h>
using namespace std;
#define REPEAT 1
#define STRIDE 1
#define CACHELINE 8
#define ALLIGNMENT 64
typedef unsigned long long Dtype;
//typedef double Dtype;
//typedef int Dtype;
__global__ void VecAdd(Dtype** A, int* N, unsigned long long* d_time, Dtype* xj, Dtype* xi);
int gcf(int a, int b)
{
if (a == 0) return b;
return gcf(b % a, a);
}
int main(int argc, char* argv[])
{
if(argc != 3)
{
std::cout << "Wrong number of argument!! Exiting program !!!";
return 0;
}
// struct timeval tv1, tv2;
int N = atoi(argv[1]);
int stride = atoi(argv[2]);
unsigned long long *d_time, h_time;
Dtype *xj, *xi;
Dtype *h_A, **d_A;
int *d_N;
std::ofstream fp;
srand (time(NULL));
fp.open("/home/hpc/ihpc/ihpc002h/gpu-exp/mThesis/exp5/data/result.txt", std::ofstream::app);
h_A = (Dtype*)memalign(ALLIGNMENT,(N+2)*sizeof(Dtype));
cudaMalloc(&d_A, (N+2)*sizeof(Dtype));
cudaMalloc(&d_time, sizeof(unsigned long long));
cudaMalloc(&xj, sizeof(Dtype));
cudaMalloc(&xi, sizeof(Dtype));
cudaMalloc(&d_N, sizeof(int));
//int step = gcf (STRIDE, N);
for(unsigned int i=0; i < N ; i++)
{
//stride = rand()%20;
h_A[i] = ((Dtype)(uintptr_t)d_A) + ( (i + stride) % N)*sizeof(Dtype);
// h_A[i] = i+1;
}
h_A[N]=0.0;
h_A[N+1]=0.0;
cudaMemcpy(d_A, h_A, (N+2)*sizeof(Dtype), cudaMemcpyHostToDevice );
cudaMemcpy(d_N, &N, sizeof(int), cudaMemcpyHostToDevice );
VecAdd<<<1,1>>>(d_A, d_N, d_time, xj, xi);
cudaMemcpy(&h_time, d_time, sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
fp << N*8.0/1024.0 << " " << h_time << std::endl;
for(int i =0; i < N ; i++)
{
//printf("%f ",(h_A[i]));
}
cudaFree(d_A);
cudaFree(d_time);
free(h_A);
fp.close();
}
|
d5555a604ecad8e93994724172ac4b92db8ae850.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#include "CycleTimer.h"
// add thrust
// #include <thrust/scan.h>
// #include <thrust/sort.h>
// #include <thrust/device_ptr.h>
// #include <thrust/device_malloc.h>
// #include <thrust/device_free.h>
// #include <thrust/execution_policy.h>
// #define array_type short
#define array_type unsigned short
#define BLOCK_DIM_X 32
#define BLOCK_DIM_Y 32
__managed__ int num_ones;
/* Helper function to round up to a power of 2.
*/
static inline long long int nextPow2(long long int n)
{
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++;
return n;
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceVelocity);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
// dim3 blockDim(32, 32, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else {
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
}
hipDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == BOUNCING_BALLS) {
hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == HYPNOSIS) {
hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == FIREWORKS) {
hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, );
}
hipDeviceSynchronize();
}
//-----------------above haven't changed
__global__ void
incl_sweep_up(int N, int dim, int twod, int twod1, array_type* output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N && ( (index/dim) % twod1 ==0) )
{output[index+ dim*(twod1 -1)] += output[index+ dim*(twod -1)];}
}
__global__ void
incl_sweep_down(int N, int dim, int twod, int twod1, array_type* output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if ( (dim-1< index) && (index < N) && ( (index/dim) % twod1 ==0) ){
output[index+ dim*(twod-1)] += output[index- dim*1];}
}
//--- the above should be correct
__global__ void
obtain_seperator(int total_size, int num_circ, int num_circ_true, int num_boxes, array_type* circ_cover_flag, int* separators, int partitionId, int partitionNum){
//
int index = blockIdx.x * blockDim.x + threadIdx.x;
// int circleid = index/num_boxes + partitionId * partitionNum;
//update the separators by the way
if (index<num_boxes) {separators[index]=circ_cover_flag[(num_circ_true-1)*num_boxes+index];}
// printf(" separa %d, loca %d", separators[index], (num_circ_true-1)*num_boxes+index);}
}
__global__ void
concurrent_write_ids(int total_size, int num_circ, int num_circ_true, int num_boxes, array_type* circ_cover_flag, int* circ_cover_id, int* separators, int partitionId, int partitionNum){
//
int index = blockIdx.x * blockDim.x + threadIdx.x;
int circleid = index/num_boxes;
int blockid = index%num_boxes; // index-num_boxes*circleid;
if (index<total_size){
if (circleid==0){
if (circ_cover_flag[index]==1){
int new_loc = num_circ_true*blockid;
//printf("index %d, new_loc %d", index, new_loc);
circ_cover_id[new_loc]=0;}}
else{
// if (circleid>0){
if ( circ_cover_flag[index] - circ_cover_flag[index-num_boxes] ==1){
int new_loc = blockid*num_circ_true +circ_cover_flag[index] -1;
//if (circ_cover_flag[index]==2){ }
circ_cover_id[new_loc] = circleid + partitionId * partitionNum; }
}
}
//update the separators by the way
if (index<num_boxes) {separators[index]=circ_cover_flag[(num_circ_true-1)*num_boxes+index];}
// printf(" separa %d, loca %d", separators[index], (num_circ_true-1)*num_boxes+index);}
}
__global__ void
concurrent_write_ids_v2(int total_size, int num_circ, int num_circ_true, int num_box_max, int num_boxes, array_type* circ_cover_flag, int* circ_cover_id, int partitionId, int partitionNum){
//
int index = blockIdx.x * blockDim.x + threadIdx.x;
int circleid = index/num_boxes;
int blockid = index%num_boxes; // index-num_boxes*circleid;
if (index<total_size){
if (circleid==0){
if (circ_cover_flag[index]==1){
int new_loc = num_box_max*blockid;
//printf("index %d, new_loc %d", index, new_loc);
circ_cover_id[new_loc]=0;}}
else{
// if (circleid>0){
if ( circ_cover_flag[index] ==1){
int new_loc = blockid*num_box_max +circ_cover_flag[index] -1;
//if (circ_cover_flag[index]==2){ }
circ_cover_id[new_loc] = circleid + partitionId * partitionNum; }
}
}
// again? need sort? is this efficient? sort: Pair: box_id, circle_id?
}
void multi_dim_inclusive_scan(int N, int lens, int dim, array_type* device_result){
int blocksize = 512;
int num_blocks = (N+blocksize-1)/blocksize;
// printf("N=%d,block size = %d, number of blocks %d \n",N,blocksize,num_blocks);
for (int twod =1; twod <lens; twod *=2){
int twod1 = twod*2;
hipLaunchKernelGGL(( incl_sweep_up), dim3(num_blocks), dim3(blocksize) , 0, 0, N, dim, twod, twod1, device_result);
}
for (int twod = lens/4; twod >=1; twod /=2){
int twod1 = twod*2;
hipLaunchKernelGGL(( incl_sweep_down), dim3(num_blocks), dim3(blocksize) , 0, 0, N, dim, twod, twod1, device_result);
}
}
#include "circleBoxTest.cu_inl"
__global__ void findCircsInBlock(array_type* circ_cover_flag, int num_total_blocks, int num_blockx, int num_blocky, int partitionId, int partitionNum) {
// step1: find the circle idx and find the block idx
int Idx = blockDim.x * blockIdx.x + threadIdx.x; // B*numCircles
if (Idx>= partitionNum*num_total_blocks) {return;}
int circleId = Idx / num_total_blocks + partitionId * partitionNum; //obtain the circle Id
// int circleId = Idx / num_total_blocks; //obtain the circle Id
int blockId = Idx % num_total_blocks; //obtain the block Id
// step2: justify whether this circle is in this block
// can we use circlesBoxTest?
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
int index3 = 3 * circleId;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[circleId];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if( circleInBoxConservative(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
circ_cover_flag[Idx] = 1;
}
else{
circ_cover_flag[Idx] = 0;
}
__syncthreads();
}
__global__ void findNumCircsInBlock(int* separators, int num_total_blocks, int num_blockx, int num_blocky, int numPartitions) {
// Aim to find separators not via multi_dim_inclusive_scan
// check sharedMem at https://www.cnblogs.com/xiaoxiaoyibu/p/11402607.html ; to optimize memory access
__shared__ int numCirclesPerPixel[BLOCK_DIM_X * BLOCK_DIM_Y];
int numPixels = BLOCK_DIM_X * BLOCK_DIM_Y;
int blockId = blockIdx.y * num_blockx + blockIdx.x;
if (blockId >= num_total_blocks){return;}
int pixelId = threadIdx.y * BLOCK_DIM_X + threadIdx.x;
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
//Each thread would take responsibility for partition of Circles
int numCirclesPerPartition = (cuConstRendererParams.numCircles + numPartitions - 1) / numPartitions;
// obtain the start and end
int start = numCirclesPerPartition * pixelId;
int end = numCirclesPerPartition * (pixelId+1);
if (pixelId == (numPixels - 1)){
end = cuConstRendererParams.numCircles;
}
int numCirclesInBlockPartition = 0;
// To find whether they are in this block and update separators[blockId]
// How to do???
for (int i = start; i <end; i++){
if (i >= cuConstRendererParams.numCircles){return;}
int index3 = 3 * i;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if( circleInBoxConservative(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
numCirclesInBlockPartition += 1;
}
}
// such that we can have in each thread how many circles are in this block
// then we do what? we want to sum up of the numCirclesInBlockPartition
numCirclesPerPixel[pixelId] = numCirclesInBlockPartition;
__syncthreads();
// parallel reduction
for (unsigned int j = numPixels / 2; j > 0; j >>= 1)
{
if (pixelId < j)
numCirclesPerPixel[pixelId] += numCirclesPerPixel[pixelId + j];
__syncthreads();
}
if (pixelId == 0)
separators[blockId] = numCirclesPerPixel[0];
}
__global__ void findCircCoverIdInBlock(int* separators, int* circ_cover_flag, int num_total_blocks, int num_blockx, int num_blocky, int numPartitions) {
// Fail to do: Aim to find circ_cover_id not via multi_dim_inclusive_scan; by the aid of separators
// check sharedMem at https://www.cnblogs.com/xiaoxiaoyibu/p/11402607.html ; to optimize memory access
__shared__ int numCirclesPerPixel[BLOCK_DIM_X * BLOCK_DIM_Y];
int numPixels = BLOCK_DIM_X * BLOCK_DIM_Y;
int blockId = blockIdx.y * num_blockx + blockIdx.x;
if (blockId >= num_total_blocks){return;}
int pixelId = threadIdx.y * BLOCK_DIM_X + threadIdx.x;
int numCirclesWithinThisBlock = 0;
// obtain #circle within this block
if (blockId == 0){numCirclesWithinThisBlock = separators[blockId];}
else{numCirclesWithinThisBlock = separators[blockId];}
// initialize a shared mem with size numCirclesWithinThisBlock
// __shared__ int circCoverIdThisBlock[numCirclesWithinThisBlock];
extern __shared__ int circCoverIdThisBlock[];
// still like findNumCircsInBlock but we need to sort the circle id
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
//Each thread would take responsibility for partition of Circles
int numCirclesPerPartition = (cuConstRendererParams.numCircles + numPartitions - 1) / numPartitions;
// obtain the start and end
int start = numCirclesPerPartition * pixelId;
int end = numCirclesPerPartition * (pixelId+1);
if (pixelId == (BLOCK_DIM_X * BLOCK_DIM_Y - 1)){
end = cuConstRendererParams.numCircles;
}
int numCirclesInBlockPartition = 0;
// To find whether they are in this block and update separators[blockId]
// How to do???
for (int i = start; i < end; i++){
if (i >= cuConstRendererParams.numCircles){return;}
int index3 = 3 * i;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if( circleInBox(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
numCirclesInBlockPartition += 1;
}
}
// such that we can have in each thread how many circles are in this block
// then we do what? we want to sum up of the numCirclesInBlockPartition
numCirclesPerPixel[pixelId] = numCirclesInBlockPartition;
__syncthreads();
// parallel reduction
for (unsigned int j = numPixels / 2; j > 0; j >>= 1)
{
if (pixelId < j)
numCirclesPerPixel[pixelId] += numCirclesPerPixel[pixelId + j];
__syncthreads();
}
if (pixelId == 0)
separators[blockId] = numCirclesPerPixel[0];
}
__inline__ __device__ void
incl_scan_shared_mem(int threadIndex, unsigned int* Input, int size){
for(int twod = 1; twod < size; twod <<= 1){
int twod1 = twod*2;
if((threadIndex & (twod1 - 1)) == 0)
{Input[threadIndex+ (twod1 -1)] += Input[threadIndex+ (twod -1)];}
__syncthreads();
}
for(int twod = size/4; twod >=1; twod >>= 1){
int twod1 = twod*2;
if((threadIndex>0) && ((threadIndex & (twod1 - 1)) == 0))
{Input[threadIndex+ twod -1] += Input[threadIndex -1];}
__syncthreads();
}
}
__global__ void kernelRenderCircles_shared_mem(int* separators, int num_total_blocks, int num_blockx, int num_blocky, int numPartitions) {
// Use partition to seperate numCircles can not fully parallel due to multiDimScan
// Use sharedMem to optimize memory access
__shared__ unsigned int numCirclesPerPixel[BLOCK_DIM_X * BLOCK_DIM_Y];
int numPixels = BLOCK_DIM_X * BLOCK_DIM_Y;
int blockId = blockIdx.y * num_blockx + blockIdx.x;
if (blockId >= num_total_blocks){return;}
int pixelId = threadIdx.y * BLOCK_DIM_X + threadIdx.x;
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
//Each thread would take responsibility for partition of Circles
int numCirclesPerPartition = (cuConstRendererParams.numCircles + numPartitions - 1) / numPartitions;
// obtain the start and end
int start = numCirclesPerPartition * pixelId;
int end = numCirclesPerPartition * (pixelId+1);
if (pixelId == (numPixels - 1)){
end = cuConstRendererParams.numCircles;
}
int numCirclesInBlockPartition = 0;
int circ_cover_id_p[100];
// To find whether they are in this block and update separators[blockId]
// Add local recorder to record the cover_circ_id
for (int i = start; i < end; i++){
if(i < cuConstRendererParams.numCircles){
int index3 = 3 * i;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if(circleInBoxConservative(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
circ_cover_id_p[numCirclesInBlockPartition] = i;
numCirclesInBlockPartition += 1;
}
}
}
// such that we can have in each thread how many circles are in this block
// then we do what? we want to sum up of the numCirclesInBlockPartition
numCirclesPerPixel[pixelId] = numCirclesInBlockPartition;
__syncthreads();
// TODO: we need a inclusive scan here and update separators! we can check the seperators
incl_scan_shared_mem(pixelId, numCirclesPerPixel,BLOCK_DIM_X * BLOCK_DIM_Y);
__syncthreads();
int totalCircles = numCirclesPerPixel[numPixels - 1];
separators[blockId] = numCirclesPerPixel[numPixels - 1];
__syncthreads();
__shared__ int circ_cover_id_b[3000]; // 2500 is enough for circleInBox()
int startAddr = 0;
if (pixelId != 0) {startAddr = numCirclesPerPixel[pixelId - 1];}
// // how to update? AT! __syncthreads();
for (int i =0; i < numCirclesInBlockPartition; i++){
circ_cover_id_b[i + startAddr] = circ_cover_id_p[i];
}
__syncthreads();
// parallel reduction
// no need for parallel reduction use a inclusive scan is enough
// for (unsigned int j = numPixels / 2; j > 0; j >>= 1)
// {
// if (pixelId < j)
// numCirclesPerPixel[pixelId] += numCirclesPerPixel[pixelId + j];
// __syncthreads();
// }
// if (pixelId == 0)
// separators[blockId] = numCirclesPerPixel[0];
// // directly render is okay! we donn't need another render
// // pixel data
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
if (pixelY >= imageHeight || pixelX >= imageWidth) return;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
for (int i = 0; i < totalCircles; i ++){
int circleIdx = circ_cover_id_b[i];
int index3 = circleIdx * 3;
// read postion and radius then use shadePixel to update
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
shadePixel(circleIdx, pixelCenterNorm, p, imgPtr);
}
}
__global__ void kernelRenderCircles_shared_mem_skip(int* separators, int num_total_blocks, int num_blockx, int num_blocky, int numPartitions) {
// Use partition to seperate numCircles can not fully parallel due to multiDimScan
// Skip first several circles due to so much circles are overlapped!
// This heurstic is applied to big graph: rand+bigliitle
__shared__ unsigned int numCirclesPerPixel[BLOCK_DIM_X * BLOCK_DIM_Y];
int numPixels = BLOCK_DIM_X * BLOCK_DIM_Y;
int blockId = blockIdx.y * num_blockx + blockIdx.x;
if (blockId >= num_total_blocks){return;}
int pixelId = threadIdx.y * BLOCK_DIM_X + threadIdx.x;
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
//Each thread would take responsibility for partition of Circles
int numCirclesPerPartition = (cuConstRendererParams.numCircles + numPartitions - 1) / numPartitions;
// obtain the start and end
int start = numCirclesPerPartition * pixelId;
int end = numCirclesPerPartition * (pixelId+1);
if (pixelId == (numPixels - 1)){
end = cuConstRendererParams.numCircles;
}
int numCirclesInBlockPartition = 0;
int circ_cover_id_p[100];
// To find whether they are in this block and update separators[blockId]
// Add local recorder to record the cover_circ_id
for (int i = start; i < end; i++){
if(i < cuConstRendererParams.numCircles){
int index3 = 3 * i;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if(circleInBoxConservative(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
circ_cover_id_p[numCirclesInBlockPartition] = i;
numCirclesInBlockPartition += 1;
}
}
}
// such that we can have in each thread how many circles are in this block
// then we do what? we want to sum up of the numCirclesInBlockPartition
numCirclesPerPixel[pixelId] = numCirclesInBlockPartition;
__syncthreads();
// TODO: we need a inclusive scan here and update separators! we can check the seperators
incl_scan_shared_mem(pixelId, numCirclesPerPixel,BLOCK_DIM_X * BLOCK_DIM_Y);
__syncthreads();
int totalCircles = numCirclesPerPixel[numPixels - 1];
separators[blockId] = numCirclesPerPixel[numPixels - 1];
__syncthreads();
// // printf("%d ", totalCircles);
// // update block-wise circ_cover_id here
__shared__ int circ_cover_id_b[3000]; // 2500 is enough for circleInBox()
int startAddr = 0;
if (pixelId != 0) {startAddr = numCirclesPerPixel[pixelId - 1];}
// // how to update? AT! __syncthreads();
for (int i =0; i < numCirclesInBlockPartition; i++){
circ_cover_id_b[i + startAddr] = circ_cover_id_p[i];
}
__syncthreads();
// parallel reduction
// no need for parallel reduction use a inclusive scan is enough
// for (unsigned int j = numPixels / 2; j > 0; j >>= 1)
// {
// if (pixelId < j)
// numCirclesPerPixel[pixelId] += numCirclesPerPixel[pixelId + j];
// __syncthreads();
// }
// if (pixelId == 0)
// separators[blockId] = numCirclesPerPixel[0];
// // directly render is okay! we donn't need another render
// // pixel data
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
if (pixelY >= imageHeight || pixelX >= imageWidth) return;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
// Tune the where we start to skip
int startPlace = 0;
if (totalCircles > 2000) {startPlace = 1700;}
else if (totalCircles > 1500) {startPlace = 1000;}
else if (totalCircles > 1000) {startPlace = 700;}
else if (totalCircles > 700) {startPlace = 500;}
else if (totalCircles > 100) {startPlace = 70;}
for (int i = startPlace; i < totalCircles; i ++){
int circleIdx = circ_cover_id_b[i];
int index3 = circleIdx * 3;
// read postion and radius then use shadePixel to update
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
shadePixel(circleIdx, pixelCenterNorm, p, imgPtr);
}
}
/*
void debug_set1(){
int* debug_flag= new int[20];
int debug[20] = {1,1,0,0,1, 0,0,1,0,0, 0,1,1,0,0, 0,0,0,1,1};
memmove(debug_flag, debug, 20*sizeof(int));
int* debug_flag_result = new int[20];
int* debug_id_result = new int[20];
for (int i = 0; i < 20; i++){
printf("%d ", debug_flag[i]);
}
printf("\n");
int* device_flag;
int* device_id;
int* device_separat;
int N_rd = nextPow2(4);
int B = 5;
int* debug_separators = new int[B];
int total = N_rd*B;
printf("total %d \n",total);
hipMalloc((void **)&device_flag, sizeof(int) * total);
hipMalloc((void **)&device_id, sizeof(int) * total);
hipMalloc((void **)&device_separat, sizeof(int) * B);
hipMemcpy(device_flag, debug_flag, total * sizeof(int),hipMemcpyHostToDevice);
multi_dim_inclusive_scan(total, N_rd, B, device_flag);
concurrent_write_ids<<<10,10>>>(total, N_rd, 4, B, device_flag, device_id, device_separat);
hipMemcpy(debug_flag_result, device_flag, total * sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(debug_id_result, device_id, total * sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(debug_separators, device_separat, B * sizeof(int),hipMemcpyDeviceToHost);
//hipMemcpy(debug_id_result, device_id, total * sizeof(int),hipMemcpyDeviceToHost);
//print
for (int i = 0; i < 20; i++){
printf("%d ", debug_flag_result[i]);
}
printf("\n");
for (int i = 0; i < 20; i++){
printf("%d ", debug_id_result[i]);
}
printf("\n");
for (int i = 0; i < B; i++){
printf("%d ", debug_separators[i]);
}
printf("\n");
}
void debug_set2(){
int* debug_flag= new int[20];
int debug[20] = {1,1,0,0,1, 0,0,1,0,0, 0,1,1,0,0, 0,0,0,1,1};
memmove(debug_flag, debug, 20*sizeof(int));
int* debug_flag_result = new int[20];
int* debug_id_result = new int[20];
for (int i = 0; i < 20; i++){
printf("%d ", debug_flag[i]);
}
printf("\n");
int* device_flag;
int* device_id;
int* device_separat;
int N=5;
int N_rd = nextPow2(N);
int B = 4;
int* debug_separators = new int[B];
int total = N_rd*B;
int total_rd = N_rd*B;
printf("total %d \n",total_rd);
hipMalloc((void **)&device_flag, sizeof(int) * total_rd);
hipMalloc((void **)&device_id, sizeof(int) * total_rd);
hipMalloc((void **)&device_separat, sizeof(int) * B);
hipMemcpy(device_flag, debug_flag, total * sizeof(int),hipMemcpyHostToDevice);
multi_dim_inclusive_scan(total_rd, N_rd, B, device_flag);
concurrent_write_ids<<<10,10>>>(total, N_rd, N, B, device_flag, device_id, device_separat);
hipMemcpy(debug_flag_result, device_flag, total * sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(debug_id_result, device_id, total_rd * sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(debug_separators, device_separat, B * sizeof(int),hipMemcpyDeviceToHost);
//hipMemcpy(debug_id_result, device_id, total * sizeof(int),hipMemcpyDeviceToHost);
//print
for (int i = 0; i < 20; i++){
printf("%d ", debug_flag_result[i]);
}
printf("\n");
for (int i = 0; i < 20; i++){
printf("%d ", debug_id_result[i]);
}
printf("\n");
for (int i = 0; i < B; i++){
printf("%d ", debug_separators[i]);
}
printf("\n");
}
*/
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles(int* seperators, int* circ_cover_id, int num_blockx, int num_blocky, int numCircles) {
// obtain block id
int blockId = blockIdx.y * num_blockx + blockIdx.x;
// obtain start circle and end circle using the seperators
// int startCirc = seperators[blockId];
// int endCirc = seperators[blockId+1];
// int numCircsForCurrentBlock = endCirc - startCirc;
int numCircsForCurrentBlock = seperators[blockId];
// we can access the circle id through the circ_cover_id array: N*B
int startAddInCoverId = numCircles * blockId;
// startAddInCoverId + numCircForCurrentBlock
// update all the pixels within this blockId
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
if (pixelY >= imageHeight || pixelX >= imageWidth) return;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
// iterate over all the circles on this block
// AT: update by order
for (int idx = 0; idx < numCircsForCurrentBlock; idx++){
int circleIdx = circ_cover_id[startAddInCoverId + idx];
// if ( (threadIdx.x==0) && (threadIdx.y==0)) {printf("%d %d ",blockId,circleIdx);}
int index3 = circleIdx * 3;
// read postion and radius then use shadePixel to update
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
shadePixel(circleIdx, pixelCenterNorm, p, imgPtr);
}
}
__global__ void kernelRenderCircles_simple(int numCircles, int num_blockx, int num_blocky){
// just use this simple render for simple case
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// for all pixels in the region
// update each pixel based on given sequence of circles on each region
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
if (pixelY >= imageHeight || pixelX >= imageWidth) return;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4 *imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
// iterate over all circles on this region
for (int idx = 0; idx < numCircles; idx++){
// update pixel under circle order
int circleId = idx;
int index3 = 3 * circleId;
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
shadePixel(circleId, pixelCenterNorm, p, imgPtr);
}
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
// dim3 blockDim(256, 1);
// dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
//debug_set1();
//debug_set2();
int block_dimx = 32;
int block_dimy = 32;
int num_blockx = (image->width+block_dimx-1)/block_dimx;
int num_blocky = (image->height+block_dimy-1)/block_dimy;
int num_total_blocks = num_blockx*num_blocky;
int* circ_cover_id;
int* separators; // size:num_total_blocks [num_circ per block]
array_type* circ_cover_flag; // the most big array [0,1]
if (numCircles < 10000){
int num_circ_rd = nextPow2(numCircles); //rounded numCircles
long total_size = numCircles*num_total_blocks;
long total_size_rd = num_circ_rd*num_total_blocks;
//int* check_ids = new int[total_size];
// int* check_sps = new int[num_total_blocks];
//int* check_flags = new int[total_size_rd];
// the grid size we process now is total_size;
int block_size_1d = 512; // can ajust
int num_block_1d = (total_size_rd+block_size_1d-1)/block_size_1d;
// double time0 = CycleTimer::currentSeconds();
hipMalloc((void **)&circ_cover_flag, sizeof(array_type) * total_size_rd);
hipMalloc((void **)&circ_cover_id, sizeof(int) * total_size);
hipMalloc((void **)&separators, sizeof(int) * num_total_blocks);
hipDeviceSynchronize();
// double time1 = CycleTimer::currentSeconds();
// printf("step 0 %f s\n",time1-time0);
// STEP1
// give status 0/1 to the circ_cover_flag based on coverage
hipLaunchKernelGGL(( findCircsInBlock), dim3(num_block_1d),dim3(block_size_1d), 0, 0, circ_cover_flag, num_total_blocks, num_blockx, num_blocky, 0, numCircles);
hipDeviceSynchronize();
// check codes
// array_type* checkarray = NULL;
// checkarray = (array_type*)malloc(sizeof(array_type) * num_total_blocks);
// hipMemcpy(checkarray, circ_cover_flag, sizeof(array_type) * total_size, hipMemcpyDeviceToHost);
// for (long i = 0; i < total_size; i++){
// printf("check circle %d in block %d : %d\n", i / num_total_blocks, i % num_total_blocks, checkarray[i]);
// }
// STEP2
// use a multidimensional scan to find the number of circles each block and this ids
// save 2 1d arrays: the location increment in the array, the separators.
//(1) scan the array obtained above
// double time2 = CycleTimer::currentSeconds();
// printf("step 1 %f s\n",time2-time1);
multi_dim_inclusive_scan(total_size_rd, num_circ_rd, num_total_blocks, circ_cover_flag); //check circ_cover_flag
hipDeviceSynchronize();
// double time3 = CycleTimer::currentSeconds();
// printf("step 2(1) %f s\n",time3-time2);
//(2) concurrent_write id and separators
hipLaunchKernelGGL(( concurrent_write_ids), dim3(num_block_1d),dim3(block_size_1d), 0, 0, total_size, num_circ_rd, numCircles, num_total_blocks, \
circ_cover_flag, circ_cover_id, separators, 0, numCircles); //check circ_cover_id,separators
hipDeviceSynchronize();
// check codes
// hipMemcpy(check_sps, separators, num_total_blocks * sizeof(int),hipMemcpyDeviceToHost);
// for (int i = 0; i < num_total_blocks; i++){
// if (i%num_blockx==0) {printf("\n");}
// printf("%d ", check_sps[i]);
// }
// STEP3: render
// define dim for block
dim3 blockDimBlock(block_dimx, block_dimy);
dim3 gridDimBlock(num_blockx, num_blocky);
// int* separators2; // size:num_total_blocks [num_circ per block]
// hipMalloc((void **)&separators2, sizeof(int) * num_total_blocks);
// int* check_sps2 = new int[num_total_blocks];
// findNumCircsInBlock<<<gridDimBlock, blockDimBlock>>> (separators2, num_total_blocks, num_blockx, num_blocky, block_dimx*block_dimy);
// hipDeviceSynchronize();
// hipMemcpy(check_sps2, separators2, num_total_blocks * sizeof(int),hipMemcpyDeviceToHost);
// printf("\n");
// for (int i = 0; i < num_total_blocks; i++){
// if (i%num_blockx==0) {printf("\n");}
// printf("%d ", check_sps2[i]);
// }
// double time4 = CycleTimer::currentSeconds();
// printf("step 2(3) %f s\n",time4-time3);
//right now, the last
//hipDeviceSynchronize();
//step3: use the separators and circ_cover_id to render the circle
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDimBlock), dim3(blockDimBlock), 0, 0, separators, circ_cover_id, num_blockx, num_blocky, numCircles);
hipDeviceSynchronize();
// double time5 = CycleTimer::currentSeconds();
// printf("step 3 %f s \n",time5-time4);
}
else if (numCircles < 100000){
int partitionNum = 1;
int numCirclesPerPartition = numCircles / partitionNum;
int num_circ_rd_p = nextPow2(numCirclesPerPartition);
long total_size_p = numCirclesPerPartition * num_total_blocks;
long total_size_rd_p = num_circ_rd_p * num_total_blocks;
// the grid size we process now is total_size;
int block_size_1d = 512; // can ajust
int num_block_1d = (total_size_rd_p + block_size_1d-1)/block_size_1d;
double time0 = CycleTimer::currentSeconds();
hipMalloc((void **)&circ_cover_flag, sizeof(array_type) * total_size_rd_p);
hipMalloc((void **)&circ_cover_id, sizeof(int) * total_size_p);
hipMalloc((void **)&separators, sizeof(int) * num_total_blocks);
// int* check_sps = new int[num_total_blocks];
// double time1 = CycleTimer::currentSeconds();
// printf("step 0 %f s\n",time1-time0);
// double time2_sum = 0;
// double time3_sum = 0;
// double time4_sum = 0;
// double time5_sum = 0;
// double timeC_sum = 0;
for (int i = 0; i < partitionNum; i++){
// double time1_n = CycleTimer::currentSeconds();
//step1: give status 0/1 to the circ_cover_flag based on coverage
hipLaunchKernelGGL(( findCircsInBlock), dim3(num_block_1d),dim3(block_size_1d), 0, 0, circ_cover_flag, num_total_blocks, num_blockx, num_blocky, i, numCirclesPerPartition);
hipDeviceSynchronize();
//step2: use a multidimensional scan to find the number of circles each block and this ids
//save 2 1d arrays: the location increment in the array, the separators.
//(1) scan the array obtained above
// double time2 = CycleTimer::currentSeconds();
// time2_sum += (time2 - time1_n);
multi_dim_inclusive_scan(total_size_rd_p, num_circ_rd_p, num_total_blocks, circ_cover_flag); //check circ_cover_flag
//(2) concurrent_write id and separators
hipDeviceSynchronize();
// double time3 = CycleTimer::currentSeconds();
// time3_sum += (time3 - time2);
// here we obtain the circ_cover_id
hipLaunchKernelGGL(( concurrent_write_ids), dim3(num_block_1d),dim3(block_size_1d), 0, 0, total_size_p, num_circ_rd_p, numCircles, num_total_blocks, \
circ_cover_flag, circ_cover_id, separators, 0, numCircles); //check circ_cover_id,separators
hipDeviceSynchronize();
// double time4 = CycleTimer::currentSeconds();
// time4_sum += (time4 - time3);
// define dim for block
dim3 blockDimBlock(block_dimx, block_dimy);
dim3 gridDimBlock(num_blockx, num_blocky);
// double timeC = CycleTimer::currentSeconds();
// // time4_sum += (time4 - time3);
// int* separators2; // size:num_total_blocks [num_circ per block]
// hipMalloc((void **)&separators2, sizeof(int) * num_total_blocks);
// int* check_sps2 = new int[num_total_blocks];
// findNumCircsInBlock<<<gridDimBlock, blockDimBlock>>> (separators2, num_total_blocks, num_blockx, num_blocky, block_dimx*block_dimy);
// hipDeviceSynchronize();
// double time4n = CycleTimer::currentSeconds();
// timeC_sum += (time4n - timeC);
// hipMemcpy(check_sps2, separators2, num_total_blocks * sizeof(int),hipMemcpyDeviceToHost);
// printf("\n");
// for (int i = 0; i < num_total_blocks; i++){
// if (i%num_blockx==0) {printf("\n");}
// printf("%d ", check_sps2[i]);
// }
//step3: use the separators and circ_cover_id to render the circle
// define dim for block
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDimBlock), dim3(blockDimBlock), 0, 0, separators, circ_cover_id, num_blockx, num_blocky, numCirclesPerPartition);
hipDeviceSynchronize();
double time5 = CycleTimer::currentSeconds();
// time5_sum += (time5 - time4n);
}
// printf("step 1 %f s\n",time2_sum);
// printf("step 2(1) %f s\n",time3_sum);
// printf("step 2(3) %f s\n",time4_sum);
// printf("step 2(c) %f s\n",timeC_sum);
// printf("step 3 %f s \n",time5_sum);
}
else{
int partitionNum = 1;
int numCirclesPerPartition = numCircles / partitionNum;
int num_circ_rd_p = nextPow2(numCirclesPerPartition);
long total_size_p = numCirclesPerPartition * num_total_blocks;
long total_size_rd_p = num_circ_rd_p * num_total_blocks;
// the grid size we process now is total_size;
int block_size_1d = 512; // can ajust
int num_block_1d = (total_size_rd_p + block_size_1d-1)/block_size_1d;
double time0 = CycleTimer::currentSeconds();
hipMalloc((void **)&circ_cover_flag, sizeof(array_type) * total_size_rd_p);
hipMalloc((void **)&circ_cover_id, sizeof(int) * total_size_p);
hipMalloc((void **)&separators, sizeof(int) * num_total_blocks);
// int* check_sps = new int[num_total_blocks];
// double time1 = CycleTimer::currentSeconds();
// printf("step 0 %f s\n",time1-time0);
// double time2_sum = 0;
// double time3_sum = 0;
// double time4_sum = 0;
// double time5_sum = 0;
// double timeC_sum = 0;
for (int i = 0; i < partitionNum; i++){
// double time1_n = CycleTimer::currentSeconds();
//step1: give status 0/1 to the circ_cover_flag based on coverage
hipLaunchKernelGGL(( findCircsInBlock), dim3(num_block_1d),dim3(block_size_1d), 0, 0, circ_cover_flag, num_total_blocks, num_blockx, num_blocky, i, numCirclesPerPartition);
hipDeviceSynchronize();
//step2: use a multidimensional scan to find the number of circles each block and this ids
//save 2 1d arrays: the location increment in the array, the separators.
//(1) scan the array obtained above
// double time2 = CycleTimer::currentSeconds();
// time2_sum += (time2 - time1_n);
multi_dim_inclusive_scan(total_size_rd_p, num_circ_rd_p, num_total_blocks, circ_cover_flag); //check circ_cover_flag
//(2) concurrent_write id and separators
hipDeviceSynchronize();
// double time3 = CycleTimer::currentSeconds();
// time3_sum += (time3 - time2);
// here we obtain the circ_cover_id
hipLaunchKernelGGL(( concurrent_write_ids), dim3(num_block_1d),dim3(block_size_1d), 0, 0, total_size_p, num_circ_rd_p, numCircles, num_total_blocks, \
circ_cover_flag, circ_cover_id, separators, 0, numCircles); //check circ_cover_id,separators
hipDeviceSynchronize();
// double time4 = CycleTimer::currentSeconds();
// time4_sum += (time4 - time3);
// define dim for block
dim3 blockDimBlock(block_dimx, block_dimy);
dim3 gridDimBlock(num_blockx, num_blocky);
// double timeC = CycleTimer::currentSeconds();
// // time4_sum += (time4 - time3);
// int* separators2; // size:num_total_blocks [num_circ per block]
// hipMalloc((void **)&separators2, sizeof(int) * num_total_blocks);
// int* check_sps2 = new int[num_total_blocks];
// findNumCircsInBlock<<<gridDimBlock, blockDimBlock>>> (separators2, num_total_blocks, num_blockx, num_blocky, block_dimx*block_dimy);
// hipDeviceSynchronize();
// double time4n = CycleTimer::currentSeconds();
// timeC_sum += (time4n - timeC);
// hipMemcpy(check_sps2, separators2, num_total_blocks * sizeof(int),hipMemcpyDeviceToHost);
// printf("\n");
// for (int i = 0; i < num_total_blocks; i++){
// if (i%num_blockx==0) {printf("\n");}
// printf("%d ", check_sps2[i]);
// }
//step3: use the separators and circ_cover_id to render the circle
// define dim for block
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDimBlock), dim3(blockDimBlock), 0, 0, separators, circ_cover_id, num_blockx, num_blocky, numCirclesPerPartition);
hipDeviceSynchronize();
double time5 = CycleTimer::currentSeconds();
// time5_sum += (time5 - time4n);
}
// printf("step 1 %f s\n",time2_sum);
// printf("step 2(1) %f s\n",time3_sum);
// printf("step 2(3) %f s\n",time4_sum);
// printf("step 2(c) %f s\n",timeC_sum);
// printf("step 3 %f s \n",time5_sum);
}
//step4: small size
//step4: small size
//-------
hipFree(circ_cover_flag);
hipFree(circ_cover_id);
hipFree(separators);
}
| d5555a604ecad8e93994724172ac4b92db8ae850.cu | #include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#include "CycleTimer.h"
// add thrust
// #include <thrust/scan.h>
// #include <thrust/sort.h>
// #include <thrust/device_ptr.h>
// #include <thrust/device_malloc.h>
// #include <thrust/device_free.h>
// #include <thrust/execution_policy.h>
// #define array_type short
#define array_type unsigned short
#define BLOCK_DIM_X 32
#define BLOCK_DIM_Y 32
__managed__ int num_ones;
/* Helper function to round up to a power of 2.
*/
static inline long long int nextPow2(long long int n)
{
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++;
return n;
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceVelocity);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
// dim3 blockDim(32, 32, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
kernelClearImageSnowflake<<<gridDim, blockDim>>>();
} else {
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
}
cudaDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
kernelAdvanceSnowflake<<<gridDim, blockDim>>>();
} else if (sceneName == BOUNCING_BALLS) {
kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>();
} else if (sceneName == HYPNOSIS) {
kernelAdvanceHypnosis<<<gridDim, blockDim>>>();
} else if (sceneName == FIREWORKS) {
kernelAdvanceFireWorks<<<gridDim, blockDim>>>();
}
cudaDeviceSynchronize();
}
//-----------------above haven't changed
__global__ void
incl_sweep_up(int N, int dim, int twod, int twod1, array_type* output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N && ( (index/dim) % twod1 ==0) )
{output[index+ dim*(twod1 -1)] += output[index+ dim*(twod -1)];}
}
__global__ void
incl_sweep_down(int N, int dim, int twod, int twod1, array_type* output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if ( (dim-1< index) && (index < N) && ( (index/dim) % twod1 ==0) ){
output[index+ dim*(twod-1)] += output[index- dim*1];}
}
//--- the above should be correct
__global__ void
obtain_seperator(int total_size, int num_circ, int num_circ_true, int num_boxes, array_type* circ_cover_flag, int* separators, int partitionId, int partitionNum){
//
int index = blockIdx.x * blockDim.x + threadIdx.x;
// int circleid = index/num_boxes + partitionId * partitionNum;
//update the separators by the way
if (index<num_boxes) {separators[index]=circ_cover_flag[(num_circ_true-1)*num_boxes+index];}
// printf(" separa %d, loca %d", separators[index], (num_circ_true-1)*num_boxes+index);}
}
__global__ void
concurrent_write_ids(int total_size, int num_circ, int num_circ_true, int num_boxes, array_type* circ_cover_flag, int* circ_cover_id, int* separators, int partitionId, int partitionNum){
//
int index = blockIdx.x * blockDim.x + threadIdx.x;
int circleid = index/num_boxes;
int blockid = index%num_boxes; // index-num_boxes*circleid;
if (index<total_size){
if (circleid==0){
if (circ_cover_flag[index]==1){
int new_loc = num_circ_true*blockid;
//printf("index %d, new_loc %d", index, new_loc);
circ_cover_id[new_loc]=0;}}
else{
// if (circleid>0){
if ( circ_cover_flag[index] - circ_cover_flag[index-num_boxes] ==1){
int new_loc = blockid*num_circ_true +circ_cover_flag[index] -1;
//if (circ_cover_flag[index]==2){ }
circ_cover_id[new_loc] = circleid + partitionId * partitionNum; }
}
}
//update the separators by the way
if (index<num_boxes) {separators[index]=circ_cover_flag[(num_circ_true-1)*num_boxes+index];}
// printf(" separa %d, loca %d", separators[index], (num_circ_true-1)*num_boxes+index);}
}
__global__ void
concurrent_write_ids_v2(int total_size, int num_circ, int num_circ_true, int num_box_max, int num_boxes, array_type* circ_cover_flag, int* circ_cover_id, int partitionId, int partitionNum){
//
int index = blockIdx.x * blockDim.x + threadIdx.x;
int circleid = index/num_boxes;
int blockid = index%num_boxes; // index-num_boxes*circleid;
if (index<total_size){
if (circleid==0){
if (circ_cover_flag[index]==1){
int new_loc = num_box_max*blockid;
//printf("index %d, new_loc %d", index, new_loc);
circ_cover_id[new_loc]=0;}}
else{
// if (circleid>0){
if ( circ_cover_flag[index] ==1){
int new_loc = blockid*num_box_max +circ_cover_flag[index] -1;
//if (circ_cover_flag[index]==2){ }
circ_cover_id[new_loc] = circleid + partitionId * partitionNum; }
}
}
// again? need sort? is this efficient? sort: Pair: box_id, circle_id?
}
void multi_dim_inclusive_scan(int N, int lens, int dim, array_type* device_result){
int blocksize = 512;
int num_blocks = (N+blocksize-1)/blocksize;
// printf("N=%d,block size = %d, number of blocks %d \n",N,blocksize,num_blocks);
for (int twod =1; twod <lens; twod *=2){
int twod1 = twod*2;
incl_sweep_up<<< num_blocks, blocksize >>>(N, dim, twod, twod1, device_result);
}
for (int twod = lens/4; twod >=1; twod /=2){
int twod1 = twod*2;
incl_sweep_down<<< num_blocks, blocksize >>>(N, dim, twod, twod1, device_result);
}
}
#include "circleBoxTest.cu_inl"
__global__ void findCircsInBlock(array_type* circ_cover_flag, int num_total_blocks, int num_blockx, int num_blocky, int partitionId, int partitionNum) {
// step1: find the circle idx and find the block idx
int Idx = blockDim.x * blockIdx.x + threadIdx.x; // B*numCircles
if (Idx>= partitionNum*num_total_blocks) {return;}
int circleId = Idx / num_total_blocks + partitionId * partitionNum; //obtain the circle Id
// int circleId = Idx / num_total_blocks; //obtain the circle Id
int blockId = Idx % num_total_blocks; //obtain the block Id
// step2: justify whether this circle is in this block
// can we use circlesBoxTest?
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
int index3 = 3 * circleId;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[circleId];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if( circleInBoxConservative(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
circ_cover_flag[Idx] = 1;
}
else{
circ_cover_flag[Idx] = 0;
}
__syncthreads();
}
__global__ void findNumCircsInBlock(int* separators, int num_total_blocks, int num_blockx, int num_blocky, int numPartitions) {
// Aim to find separators not via multi_dim_inclusive_scan
// check sharedMem at https://www.cnblogs.com/xiaoxiaoyibu/p/11402607.html ; to optimize memory access
__shared__ int numCirclesPerPixel[BLOCK_DIM_X * BLOCK_DIM_Y];
int numPixels = BLOCK_DIM_X * BLOCK_DIM_Y;
int blockId = blockIdx.y * num_blockx + blockIdx.x;
if (blockId >= num_total_blocks){return;}
int pixelId = threadIdx.y * BLOCK_DIM_X + threadIdx.x;
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
//Each thread would take responsibility for partition of Circles
int numCirclesPerPartition = (cuConstRendererParams.numCircles + numPartitions - 1) / numPartitions;
// obtain the start and end
int start = numCirclesPerPartition * pixelId;
int end = numCirclesPerPartition * (pixelId+1);
if (pixelId == (numPixels - 1)){
end = cuConstRendererParams.numCircles;
}
int numCirclesInBlockPartition = 0;
// To find whether they are in this block and update separators[blockId]
// How to do???
for (int i = start; i <end; i++){
if (i >= cuConstRendererParams.numCircles){return;}
int index3 = 3 * i;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if( circleInBoxConservative(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
numCirclesInBlockPartition += 1;
}
}
// such that we can have in each thread how many circles are in this block
// then we do what? we want to sum up of the numCirclesInBlockPartition
numCirclesPerPixel[pixelId] = numCirclesInBlockPartition;
__syncthreads();
// parallel reduction
for (unsigned int j = numPixels / 2; j > 0; j >>= 1)
{
if (pixelId < j)
numCirclesPerPixel[pixelId] += numCirclesPerPixel[pixelId + j];
__syncthreads();
}
if (pixelId == 0)
separators[blockId] = numCirclesPerPixel[0];
}
__global__ void findCircCoverIdInBlock(int* separators, int* circ_cover_flag, int num_total_blocks, int num_blockx, int num_blocky, int numPartitions) {
// Fail to do: Aim to find circ_cover_id not via multi_dim_inclusive_scan; by the aid of separators
// check sharedMem at https://www.cnblogs.com/xiaoxiaoyibu/p/11402607.html ; to optimize memory access
__shared__ int numCirclesPerPixel[BLOCK_DIM_X * BLOCK_DIM_Y];
int numPixels = BLOCK_DIM_X * BLOCK_DIM_Y;
int blockId = blockIdx.y * num_blockx + blockIdx.x;
if (blockId >= num_total_blocks){return;}
int pixelId = threadIdx.y * BLOCK_DIM_X + threadIdx.x;
int numCirclesWithinThisBlock = 0;
// obtain #circle within this block
if (blockId == 0){numCirclesWithinThisBlock = separators[blockId];}
else{numCirclesWithinThisBlock = separators[blockId];}
// initialize a shared mem with size numCirclesWithinThisBlock
// __shared__ int circCoverIdThisBlock[numCirclesWithinThisBlock];
extern __shared__ int circCoverIdThisBlock[];
// still like findNumCircsInBlock but we need to sort the circle id
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
//Each thread would take responsibility for partition of Circles
int numCirclesPerPartition = (cuConstRendererParams.numCircles + numPartitions - 1) / numPartitions;
// obtain the start and end
int start = numCirclesPerPartition * pixelId;
int end = numCirclesPerPartition * (pixelId+1);
if (pixelId == (BLOCK_DIM_X * BLOCK_DIM_Y - 1)){
end = cuConstRendererParams.numCircles;
}
int numCirclesInBlockPartition = 0;
// To find whether they are in this block and update separators[blockId]
// How to do???
for (int i = start; i < end; i++){
if (i >= cuConstRendererParams.numCircles){return;}
int index3 = 3 * i;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if( circleInBox(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
numCirclesInBlockPartition += 1;
}
}
// such that we can have in each thread how many circles are in this block
// then we do what? we want to sum up of the numCirclesInBlockPartition
numCirclesPerPixel[pixelId] = numCirclesInBlockPartition;
__syncthreads();
// parallel reduction
for (unsigned int j = numPixels / 2; j > 0; j >>= 1)
{
if (pixelId < j)
numCirclesPerPixel[pixelId] += numCirclesPerPixel[pixelId + j];
__syncthreads();
}
if (pixelId == 0)
separators[blockId] = numCirclesPerPixel[0];
}
__inline__ __device__ void
incl_scan_shared_mem(int threadIndex, unsigned int* Input, int size){
for(int twod = 1; twod < size; twod <<= 1){
int twod1 = twod*2;
if((threadIndex & (twod1 - 1)) == 0)
{Input[threadIndex+ (twod1 -1)] += Input[threadIndex+ (twod -1)];}
__syncthreads();
}
for(int twod = size/4; twod >=1; twod >>= 1){
int twod1 = twod*2;
if((threadIndex>0) && ((threadIndex & (twod1 - 1)) == 0))
{Input[threadIndex+ twod -1] += Input[threadIndex -1];}
__syncthreads();
}
}
__global__ void kernelRenderCircles_shared_mem(int* separators, int num_total_blocks, int num_blockx, int num_blocky, int numPartitions) {
// Use partition to seperate numCircles can not fully parallel due to multiDimScan
// Use sharedMem to optimize memory access
__shared__ unsigned int numCirclesPerPixel[BLOCK_DIM_X * BLOCK_DIM_Y];
int numPixels = BLOCK_DIM_X * BLOCK_DIM_Y;
int blockId = blockIdx.y * num_blockx + blockIdx.x;
if (blockId >= num_total_blocks){return;}
int pixelId = threadIdx.y * BLOCK_DIM_X + threadIdx.x;
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
//Each thread would take responsibility for partition of Circles
int numCirclesPerPartition = (cuConstRendererParams.numCircles + numPartitions - 1) / numPartitions;
// obtain the start and end
int start = numCirclesPerPartition * pixelId;
int end = numCirclesPerPartition * (pixelId+1);
if (pixelId == (numPixels - 1)){
end = cuConstRendererParams.numCircles;
}
int numCirclesInBlockPartition = 0;
int circ_cover_id_p[100];
// To find whether they are in this block and update separators[blockId]
// Add local recorder to record the cover_circ_id
for (int i = start; i < end; i++){
if(i < cuConstRendererParams.numCircles){
int index3 = 3 * i;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if(circleInBoxConservative(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
circ_cover_id_p[numCirclesInBlockPartition] = i;
numCirclesInBlockPartition += 1;
}
}
}
// such that we can have in each thread how many circles are in this block
// then we do what? we want to sum up of the numCirclesInBlockPartition
numCirclesPerPixel[pixelId] = numCirclesInBlockPartition;
__syncthreads();
// TODO: we need a inclusive scan here and update separators! we can check the seperators
incl_scan_shared_mem(pixelId, numCirclesPerPixel,BLOCK_DIM_X * BLOCK_DIM_Y);
__syncthreads();
int totalCircles = numCirclesPerPixel[numPixels - 1];
separators[blockId] = numCirclesPerPixel[numPixels - 1];
__syncthreads();
__shared__ int circ_cover_id_b[3000]; // 2500 is enough for circleInBox()
int startAddr = 0;
if (pixelId != 0) {startAddr = numCirclesPerPixel[pixelId - 1];}
// // how to update? AT! __syncthreads();
for (int i =0; i < numCirclesInBlockPartition; i++){
circ_cover_id_b[i + startAddr] = circ_cover_id_p[i];
}
__syncthreads();
// parallel reduction
// no need for parallel reduction use a inclusive scan is enough
// for (unsigned int j = numPixels / 2; j > 0; j >>= 1)
// {
// if (pixelId < j)
// numCirclesPerPixel[pixelId] += numCirclesPerPixel[pixelId + j];
// __syncthreads();
// }
// if (pixelId == 0)
// separators[blockId] = numCirclesPerPixel[0];
// // directly render is okay! we donn't need another render
// // pixel data
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
if (pixelY >= imageHeight || pixelX >= imageWidth) return;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
for (int i = 0; i < totalCircles; i ++){
int circleIdx = circ_cover_id_b[i];
int index3 = circleIdx * 3;
// read postion and radius then use shadePixel to update
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
shadePixel(circleIdx, pixelCenterNorm, p, imgPtr);
}
}
__global__ void kernelRenderCircles_shared_mem_skip(int* separators, int num_total_blocks, int num_blockx, int num_blocky, int numPartitions) {
// Use partition to seperate numCircles can not fully parallel due to multiDimScan
// Skip first several circles due to so much circles are overlapped!
// This heurstic is applied to big graph: rand+bigliitle
__shared__ unsigned int numCirclesPerPixel[BLOCK_DIM_X * BLOCK_DIM_Y];
int numPixels = BLOCK_DIM_X * BLOCK_DIM_Y;
int blockId = blockIdx.y * num_blockx + blockIdx.x;
if (blockId >= num_total_blocks){return;}
int pixelId = threadIdx.y * BLOCK_DIM_X + threadIdx.x;
//step2.1 obtain the block size
// image params
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// compute the size of block
int blockId_dimX = blockId % num_blockx;
int blockId_dimY = blockId / num_blockx;
short blockMinX = BLOCK_DIM_X * blockId_dimX;
short blockMaxX = BLOCK_DIM_X * (blockId_dimX + 1);
short blockMinY = BLOCK_DIM_Y * blockId_dimY;
short blockMaxY = BLOCK_DIM_Y * (blockId_dimY + 1);
float blockL = blockMinX * invWidth;
float blockR = blockMaxX * invWidth;
float blockB = blockMinY * invHeight;
float blockT = blockMaxY * invHeight;
//step2.2 obtain the circle size
//Each thread would take responsibility for partition of Circles
int numCirclesPerPartition = (cuConstRendererParams.numCircles + numPartitions - 1) / numPartitions;
// obtain the start and end
int start = numCirclesPerPartition * pixelId;
int end = numCirclesPerPartition * (pixelId+1);
if (pixelId == (numPixels - 1)){
end = cuConstRendererParams.numCircles;
}
int numCirclesInBlockPartition = 0;
int circ_cover_id_p[100];
// To find whether they are in this block and update separators[blockId]
// Add local recorder to record the cover_circ_id
for (int i = start; i < end; i++){
if(i < cuConstRendererParams.numCircles){
int index3 = 3 * i;
// read postion and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
// use the circleInBoxConservative in circleBoxTest.cu_inl
if(circleInBoxConservative(p.x, p.y, rad, blockL, blockR, blockT, blockB) ){
circ_cover_id_p[numCirclesInBlockPartition] = i;
numCirclesInBlockPartition += 1;
}
}
}
// such that we can have in each thread how many circles are in this block
// then we do what? we want to sum up of the numCirclesInBlockPartition
numCirclesPerPixel[pixelId] = numCirclesInBlockPartition;
__syncthreads();
// TODO: we need a inclusive scan here and update separators! we can check the seperators
incl_scan_shared_mem(pixelId, numCirclesPerPixel,BLOCK_DIM_X * BLOCK_DIM_Y);
__syncthreads();
int totalCircles = numCirclesPerPixel[numPixels - 1];
separators[blockId] = numCirclesPerPixel[numPixels - 1];
__syncthreads();
// // printf("%d ", totalCircles);
// // update block-wise circ_cover_id here
__shared__ int circ_cover_id_b[3000]; // 2500 is enough for circleInBox()
int startAddr = 0;
if (pixelId != 0) {startAddr = numCirclesPerPixel[pixelId - 1];}
// // how to update? AT! __syncthreads();
for (int i =0; i < numCirclesInBlockPartition; i++){
circ_cover_id_b[i + startAddr] = circ_cover_id_p[i];
}
__syncthreads();
// parallel reduction
// no need for parallel reduction use a inclusive scan is enough
// for (unsigned int j = numPixels / 2; j > 0; j >>= 1)
// {
// if (pixelId < j)
// numCirclesPerPixel[pixelId] += numCirclesPerPixel[pixelId + j];
// __syncthreads();
// }
// if (pixelId == 0)
// separators[blockId] = numCirclesPerPixel[0];
// // directly render is okay! we donn't need another render
// // pixel data
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
if (pixelY >= imageHeight || pixelX >= imageWidth) return;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
// Tune the where we start to skip
int startPlace = 0;
if (totalCircles > 2000) {startPlace = 1700;}
else if (totalCircles > 1500) {startPlace = 1000;}
else if (totalCircles > 1000) {startPlace = 700;}
else if (totalCircles > 700) {startPlace = 500;}
else if (totalCircles > 100) {startPlace = 70;}
for (int i = startPlace; i < totalCircles; i ++){
int circleIdx = circ_cover_id_b[i];
int index3 = circleIdx * 3;
// read postion and radius then use shadePixel to update
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
shadePixel(circleIdx, pixelCenterNorm, p, imgPtr);
}
}
/*
void debug_set1(){
int* debug_flag= new int[20];
int debug[20] = {1,1,0,0,1, 0,0,1,0,0, 0,1,1,0,0, 0,0,0,1,1};
memmove(debug_flag, debug, 20*sizeof(int));
int* debug_flag_result = new int[20];
int* debug_id_result = new int[20];
for (int i = 0; i < 20; i++){
printf("%d ", debug_flag[i]);
}
printf("\n");
int* device_flag;
int* device_id;
int* device_separat;
int N_rd = nextPow2(4);
int B = 5;
int* debug_separators = new int[B];
int total = N_rd*B;
printf("total %d \n",total);
cudaMalloc((void **)&device_flag, sizeof(int) * total);
cudaMalloc((void **)&device_id, sizeof(int) * total);
cudaMalloc((void **)&device_separat, sizeof(int) * B);
cudaMemcpy(device_flag, debug_flag, total * sizeof(int),cudaMemcpyHostToDevice);
multi_dim_inclusive_scan(total, N_rd, B, device_flag);
concurrent_write_ids<<<10,10>>>(total, N_rd, 4, B, device_flag, device_id, device_separat);
cudaMemcpy(debug_flag_result, device_flag, total * sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(debug_id_result, device_id, total * sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(debug_separators, device_separat, B * sizeof(int),cudaMemcpyDeviceToHost);
//cudaMemcpy(debug_id_result, device_id, total * sizeof(int),cudaMemcpyDeviceToHost);
//print
for (int i = 0; i < 20; i++){
printf("%d ", debug_flag_result[i]);
}
printf("\n");
for (int i = 0; i < 20; i++){
printf("%d ", debug_id_result[i]);
}
printf("\n");
for (int i = 0; i < B; i++){
printf("%d ", debug_separators[i]);
}
printf("\n");
}
void debug_set2(){
int* debug_flag= new int[20];
int debug[20] = {1,1,0,0,1, 0,0,1,0,0, 0,1,1,0,0, 0,0,0,1,1};
memmove(debug_flag, debug, 20*sizeof(int));
int* debug_flag_result = new int[20];
int* debug_id_result = new int[20];
for (int i = 0; i < 20; i++){
printf("%d ", debug_flag[i]);
}
printf("\n");
int* device_flag;
int* device_id;
int* device_separat;
int N=5;
int N_rd = nextPow2(N);
int B = 4;
int* debug_separators = new int[B];
int total = N_rd*B;
int total_rd = N_rd*B;
printf("total %d \n",total_rd);
cudaMalloc((void **)&device_flag, sizeof(int) * total_rd);
cudaMalloc((void **)&device_id, sizeof(int) * total_rd);
cudaMalloc((void **)&device_separat, sizeof(int) * B);
cudaMemcpy(device_flag, debug_flag, total * sizeof(int),cudaMemcpyHostToDevice);
multi_dim_inclusive_scan(total_rd, N_rd, B, device_flag);
concurrent_write_ids<<<10,10>>>(total, N_rd, N, B, device_flag, device_id, device_separat);
cudaMemcpy(debug_flag_result, device_flag, total * sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(debug_id_result, device_id, total_rd * sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(debug_separators, device_separat, B * sizeof(int),cudaMemcpyDeviceToHost);
//cudaMemcpy(debug_id_result, device_id, total * sizeof(int),cudaMemcpyDeviceToHost);
//print
for (int i = 0; i < 20; i++){
printf("%d ", debug_flag_result[i]);
}
printf("\n");
for (int i = 0; i < 20; i++){
printf("%d ", debug_id_result[i]);
}
printf("\n");
for (int i = 0; i < B; i++){
printf("%d ", debug_separators[i]);
}
printf("\n");
}
*/
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles(int* seperators, int* circ_cover_id, int num_blockx, int num_blocky, int numCircles) {
// obtain block id
int blockId = blockIdx.y * num_blockx + blockIdx.x;
// obtain start circle and end circle using the seperators
// int startCirc = seperators[blockId];
// int endCirc = seperators[blockId+1];
// int numCircsForCurrentBlock = endCirc - startCirc;
int numCircsForCurrentBlock = seperators[blockId];
// we can access the circle id through the circ_cover_id array: N*B
int startAddInCoverId = numCircles * blockId;
// startAddInCoverId + numCircForCurrentBlock
// update all the pixels within this blockId
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
if (pixelY >= imageHeight || pixelX >= imageWidth) return;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
// iterate over all the circles on this block
// AT: update by order
for (int idx = 0; idx < numCircsForCurrentBlock; idx++){
int circleIdx = circ_cover_id[startAddInCoverId + idx];
// if ( (threadIdx.x==0) && (threadIdx.y==0)) {printf("%d %d ",blockId,circleIdx);}
int index3 = circleIdx * 3;
// read postion and radius then use shadePixel to update
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
shadePixel(circleIdx, pixelCenterNorm, p, imgPtr);
}
}
__global__ void kernelRenderCircles_simple(int numCircles, int num_blockx, int num_blocky){
// just use this simple render for simple case
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// for all pixels in the region
// update each pixel based on given sequence of circles on each region
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
if (pixelY >= imageHeight || pixelX >= imageWidth) return;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4 *imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
// iterate over all circles on this region
for (int idx = 0; idx < numCircles; idx++){
// update pixel under circle order
int circleId = idx;
int index3 = 3 * circleId;
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
shadePixel(circleId, pixelCenterNorm, p, imgPtr);
}
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
// dim3 blockDim(256, 1);
// dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
//debug_set1();
//debug_set2();
int block_dimx = 32;
int block_dimy = 32;
int num_blockx = (image->width+block_dimx-1)/block_dimx;
int num_blocky = (image->height+block_dimy-1)/block_dimy;
int num_total_blocks = num_blockx*num_blocky;
int* circ_cover_id;
int* separators; // size:num_total_blocks [num_circ per block]
array_type* circ_cover_flag; // the most big array [0,1]
if (numCircles < 10000){
int num_circ_rd = nextPow2(numCircles); //rounded numCircles
long total_size = numCircles*num_total_blocks;
long total_size_rd = num_circ_rd*num_total_blocks;
//int* check_ids = new int[total_size];
// int* check_sps = new int[num_total_blocks];
//int* check_flags = new int[total_size_rd];
// the grid size we process now is total_size;
int block_size_1d = 512; // can ajust
int num_block_1d = (total_size_rd+block_size_1d-1)/block_size_1d;
// double time0 = CycleTimer::currentSeconds();
cudaMalloc((void **)&circ_cover_flag, sizeof(array_type) * total_size_rd);
cudaMalloc((void **)&circ_cover_id, sizeof(int) * total_size);
cudaMalloc((void **)&separators, sizeof(int) * num_total_blocks);
cudaDeviceSynchronize();
// double time1 = CycleTimer::currentSeconds();
// printf("step 0 %f s\n",time1-time0);
// STEP1
// give status 0/1 to the circ_cover_flag based on coverage
findCircsInBlock<<<num_block_1d,block_size_1d>>> (circ_cover_flag, num_total_blocks, num_blockx, num_blocky, 0, numCircles);
cudaDeviceSynchronize();
// check codes
// array_type* checkarray = NULL;
// checkarray = (array_type*)malloc(sizeof(array_type) * num_total_blocks);
// cudaMemcpy(checkarray, circ_cover_flag, sizeof(array_type) * total_size, cudaMemcpyDeviceToHost);
// for (long i = 0; i < total_size; i++){
// printf("check circle %d in block %d : %d\n", i / num_total_blocks, i % num_total_blocks, checkarray[i]);
// }
// STEP2
// use a multidimensional scan to find the number of circles each block and this ids
// save 2 1d arrays: the location increment in the array, the separators.
//(1) scan the array obtained above
// double time2 = CycleTimer::currentSeconds();
// printf("step 1 %f s\n",time2-time1);
multi_dim_inclusive_scan(total_size_rd, num_circ_rd, num_total_blocks, circ_cover_flag); //check circ_cover_flag
cudaDeviceSynchronize();
// double time3 = CycleTimer::currentSeconds();
// printf("step 2(1) %f s\n",time3-time2);
//(2) concurrent_write id and separators
concurrent_write_ids<<<num_block_1d,block_size_1d>>>(total_size, num_circ_rd, numCircles, num_total_blocks, \
circ_cover_flag, circ_cover_id, separators, 0, numCircles); //check circ_cover_id,separators
cudaDeviceSynchronize();
// check codes
// cudaMemcpy(check_sps, separators, num_total_blocks * sizeof(int),cudaMemcpyDeviceToHost);
// for (int i = 0; i < num_total_blocks; i++){
// if (i%num_blockx==0) {printf("\n");}
// printf("%d ", check_sps[i]);
// }
// STEP3: render
// define dim for block
dim3 blockDimBlock(block_dimx, block_dimy);
dim3 gridDimBlock(num_blockx, num_blocky);
// int* separators2; // size:num_total_blocks [num_circ per block]
// cudaMalloc((void **)&separators2, sizeof(int) * num_total_blocks);
// int* check_sps2 = new int[num_total_blocks];
// findNumCircsInBlock<<<gridDimBlock, blockDimBlock>>> (separators2, num_total_blocks, num_blockx, num_blocky, block_dimx*block_dimy);
// cudaDeviceSynchronize();
// cudaMemcpy(check_sps2, separators2, num_total_blocks * sizeof(int),cudaMemcpyDeviceToHost);
// printf("\n");
// for (int i = 0; i < num_total_blocks; i++){
// if (i%num_blockx==0) {printf("\n");}
// printf("%d ", check_sps2[i]);
// }
// double time4 = CycleTimer::currentSeconds();
// printf("step 2(3) %f s\n",time4-time3);
//right now, the last
//cudaDeviceSynchronize();
//step3: use the separators and circ_cover_id to render the circle
kernelRenderCircles<<<gridDimBlock, blockDimBlock>>>(separators, circ_cover_id, num_blockx, num_blocky, numCircles);
cudaDeviceSynchronize();
// double time5 = CycleTimer::currentSeconds();
// printf("step 3 %f s \n",time5-time4);
}
else if (numCircles < 100000){
int partitionNum = 1;
int numCirclesPerPartition = numCircles / partitionNum;
int num_circ_rd_p = nextPow2(numCirclesPerPartition);
long total_size_p = numCirclesPerPartition * num_total_blocks;
long total_size_rd_p = num_circ_rd_p * num_total_blocks;
// the grid size we process now is total_size;
int block_size_1d = 512; // can ajust
int num_block_1d = (total_size_rd_p + block_size_1d-1)/block_size_1d;
double time0 = CycleTimer::currentSeconds();
cudaMalloc((void **)&circ_cover_flag, sizeof(array_type) * total_size_rd_p);
cudaMalloc((void **)&circ_cover_id, sizeof(int) * total_size_p);
cudaMalloc((void **)&separators, sizeof(int) * num_total_blocks);
// int* check_sps = new int[num_total_blocks];
// double time1 = CycleTimer::currentSeconds();
// printf("step 0 %f s\n",time1-time0);
// double time2_sum = 0;
// double time3_sum = 0;
// double time4_sum = 0;
// double time5_sum = 0;
// double timeC_sum = 0;
for (int i = 0; i < partitionNum; i++){
// double time1_n = CycleTimer::currentSeconds();
//step1: give status 0/1 to the circ_cover_flag based on coverage
findCircsInBlock<<<num_block_1d,block_size_1d>>> (circ_cover_flag, num_total_blocks, num_blockx, num_blocky, i, numCirclesPerPartition);
cudaDeviceSynchronize();
//step2: use a multidimensional scan to find the number of circles each block and this ids
//save 2 1d arrays: the location increment in the array, the separators.
//(1) scan the array obtained above
// double time2 = CycleTimer::currentSeconds();
// time2_sum += (time2 - time1_n);
multi_dim_inclusive_scan(total_size_rd_p, num_circ_rd_p, num_total_blocks, circ_cover_flag); //check circ_cover_flag
//(2) concurrent_write id and separators
cudaDeviceSynchronize();
// double time3 = CycleTimer::currentSeconds();
// time3_sum += (time3 - time2);
// here we obtain the circ_cover_id
concurrent_write_ids<<<num_block_1d,block_size_1d>>>(total_size_p, num_circ_rd_p, numCircles, num_total_blocks, \
circ_cover_flag, circ_cover_id, separators, 0, numCircles); //check circ_cover_id,separators
cudaDeviceSynchronize();
// double time4 = CycleTimer::currentSeconds();
// time4_sum += (time4 - time3);
// define dim for block
dim3 blockDimBlock(block_dimx, block_dimy);
dim3 gridDimBlock(num_blockx, num_blocky);
// double timeC = CycleTimer::currentSeconds();
// // time4_sum += (time4 - time3);
// int* separators2; // size:num_total_blocks [num_circ per block]
// cudaMalloc((void **)&separators2, sizeof(int) * num_total_blocks);
// int* check_sps2 = new int[num_total_blocks];
// findNumCircsInBlock<<<gridDimBlock, blockDimBlock>>> (separators2, num_total_blocks, num_blockx, num_blocky, block_dimx*block_dimy);
// cudaDeviceSynchronize();
// double time4n = CycleTimer::currentSeconds();
// timeC_sum += (time4n - timeC);
// cudaMemcpy(check_sps2, separators2, num_total_blocks * sizeof(int),cudaMemcpyDeviceToHost);
// printf("\n");
// for (int i = 0; i < num_total_blocks; i++){
// if (i%num_blockx==0) {printf("\n");}
// printf("%d ", check_sps2[i]);
// }
//step3: use the separators and circ_cover_id to render the circle
// define dim for block
kernelRenderCircles<<<gridDimBlock, blockDimBlock>>>(separators, circ_cover_id, num_blockx, num_blocky, numCirclesPerPartition);
cudaDeviceSynchronize();
double time5 = CycleTimer::currentSeconds();
// time5_sum += (time5 - time4n);
}
// printf("step 1 %f s\n",time2_sum);
// printf("step 2(1) %f s\n",time3_sum);
// printf("step 2(3) %f s\n",time4_sum);
// printf("step 2(c) %f s\n",timeC_sum);
// printf("step 3 %f s \n",time5_sum);
}
else{
int partitionNum = 1;
int numCirclesPerPartition = numCircles / partitionNum;
int num_circ_rd_p = nextPow2(numCirclesPerPartition);
long total_size_p = numCirclesPerPartition * num_total_blocks;
long total_size_rd_p = num_circ_rd_p * num_total_blocks;
// the grid size we process now is total_size;
int block_size_1d = 512; // can ajust
int num_block_1d = (total_size_rd_p + block_size_1d-1)/block_size_1d;
double time0 = CycleTimer::currentSeconds();
cudaMalloc((void **)&circ_cover_flag, sizeof(array_type) * total_size_rd_p);
cudaMalloc((void **)&circ_cover_id, sizeof(int) * total_size_p);
cudaMalloc((void **)&separators, sizeof(int) * num_total_blocks);
// int* check_sps = new int[num_total_blocks];
// double time1 = CycleTimer::currentSeconds();
// printf("step 0 %f s\n",time1-time0);
// double time2_sum = 0;
// double time3_sum = 0;
// double time4_sum = 0;
// double time5_sum = 0;
// double timeC_sum = 0;
for (int i = 0; i < partitionNum; i++){
// double time1_n = CycleTimer::currentSeconds();
//step1: give status 0/1 to the circ_cover_flag based on coverage
findCircsInBlock<<<num_block_1d,block_size_1d>>> (circ_cover_flag, num_total_blocks, num_blockx, num_blocky, i, numCirclesPerPartition);
cudaDeviceSynchronize();
//step2: use a multidimensional scan to find the number of circles each block and this ids
//save 2 1d arrays: the location increment in the array, the separators.
//(1) scan the array obtained above
// double time2 = CycleTimer::currentSeconds();
// time2_sum += (time2 - time1_n);
multi_dim_inclusive_scan(total_size_rd_p, num_circ_rd_p, num_total_blocks, circ_cover_flag); //check circ_cover_flag
//(2) concurrent_write id and separators
cudaDeviceSynchronize();
// double time3 = CycleTimer::currentSeconds();
// time3_sum += (time3 - time2);
// here we obtain the circ_cover_id
concurrent_write_ids<<<num_block_1d,block_size_1d>>>(total_size_p, num_circ_rd_p, numCircles, num_total_blocks, \
circ_cover_flag, circ_cover_id, separators, 0, numCircles); //check circ_cover_id,separators
cudaDeviceSynchronize();
// double time4 = CycleTimer::currentSeconds();
// time4_sum += (time4 - time3);
// define dim for block
dim3 blockDimBlock(block_dimx, block_dimy);
dim3 gridDimBlock(num_blockx, num_blocky);
// double timeC = CycleTimer::currentSeconds();
// // time4_sum += (time4 - time3);
// int* separators2; // size:num_total_blocks [num_circ per block]
// cudaMalloc((void **)&separators2, sizeof(int) * num_total_blocks);
// int* check_sps2 = new int[num_total_blocks];
// findNumCircsInBlock<<<gridDimBlock, blockDimBlock>>> (separators2, num_total_blocks, num_blockx, num_blocky, block_dimx*block_dimy);
// cudaDeviceSynchronize();
// double time4n = CycleTimer::currentSeconds();
// timeC_sum += (time4n - timeC);
// cudaMemcpy(check_sps2, separators2, num_total_blocks * sizeof(int),cudaMemcpyDeviceToHost);
// printf("\n");
// for (int i = 0; i < num_total_blocks; i++){
// if (i%num_blockx==0) {printf("\n");}
// printf("%d ", check_sps2[i]);
// }
//step3: use the separators and circ_cover_id to render the circle
// define dim for block
kernelRenderCircles<<<gridDimBlock, blockDimBlock>>>(separators, circ_cover_id, num_blockx, num_blocky, numCirclesPerPartition);
cudaDeviceSynchronize();
double time5 = CycleTimer::currentSeconds();
// time5_sum += (time5 - time4n);
}
// printf("step 1 %f s\n",time2_sum);
// printf("step 2(1) %f s\n",time3_sum);
// printf("step 2(3) %f s\n",time4_sum);
// printf("step 2(c) %f s\n",timeC_sum);
// printf("step 3 %f s \n",time5_sum);
}
//step4: small size
//step4: small size
//-------
cudaFree(circ_cover_flag);
cudaFree(circ_cover_id);
cudaFree(separators);
}
|
2fcfee333dbed854a7fbcfe8c092b8dce2398aa8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "FilmGradeKernelA.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *p_Input = NULL;
hipMalloc(&p_Input, XSIZE*YSIZE);
int p_Width = XSIZE;
int p_Height = YSIZE;
float p_Exp = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
FilmGradeKernelA), dim3(gridBlock),dim3(threadBlock), 0, 0, p_Input,p_Width,p_Height,p_Exp);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
FilmGradeKernelA), dim3(gridBlock),dim3(threadBlock), 0, 0, p_Input,p_Width,p_Height,p_Exp);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
FilmGradeKernelA), dim3(gridBlock),dim3(threadBlock), 0, 0, p_Input,p_Width,p_Height,p_Exp);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2fcfee333dbed854a7fbcfe8c092b8dce2398aa8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "FilmGradeKernelA.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *p_Input = NULL;
cudaMalloc(&p_Input, XSIZE*YSIZE);
int p_Width = XSIZE;
int p_Height = YSIZE;
float p_Exp = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
FilmGradeKernelA<<<gridBlock,threadBlock>>>(p_Input,p_Width,p_Height,p_Exp);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
FilmGradeKernelA<<<gridBlock,threadBlock>>>(p_Input,p_Width,p_Height,p_Exp);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
FilmGradeKernelA<<<gridBlock,threadBlock>>>(p_Input,p_Width,p_Height,p_Exp);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2d18f00a6d242032ff5049a90699730f5003a4d6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// CUDA sample demonstrating a GEMM computation using the Warp Matrix Multiply
// and Accumulate API introduced in CUDA 9.
// In this program, the compute_gemm kernel computes the result of a matrix multiplication
// and addition: D = alpha * A * B + beta * C. The dimensions of both C and D matrices
// are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x K_GLOBAL (row-major), the B matrix
// is K_GLOBAL x N_GLOBAL (column-major).
// In that kernel, each CTA computes one 128 x 128 tile of the resulting matrix
// per iteration. When the tile is computed, the CTA stores it to the global memory
// and begins a new iteration, selecting a new 128 x 128 tile to compute.
// Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes eight
// 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array.
// Warps compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by
// moving through the K_GLOBAL dimension of the A and B matrices and accumulating
// the intermediate result in the local thread state.
// There are a number of simple optimizations used in the algorithm:
// - The CTA copies the 128 x 128 tile of the C matrix from the global memory to
// shared memory. After that is done, each warp loads the C matrix fragments from
// shared memory, thus avoiding a random global memory access.
// - On each internal iteration, the CTA copies a portion of the A and B matrices from
// global memory to shared memory. After that, all warps in the CTA reuse the A and B
// data from shared memory, thus reducing the number of data copies from global memory.
// - The portions of the A and B matrices are stored in shared memory with an additional
// padding (skew) to reduce the number of shared memory access bank conflicts.
// (See a detailed explanation near the SKEW_HALF macro definition.)
// - When the CTA finishes computing the tiles of the resulting matrix, each warp stores
// its subtiles to shared memory. The CTA then copies the shared memory contents to
// global memory, again avoiding redundant random global memory accesses.
// - Note that the CTA tile size is chosen to maximize the GPU register utilization,
// but carefully enough to avoid local memory use.
#include <assert.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <mma.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 16
#define N 16
#define K 16
// GEMM configuration.
#define M_TILES 256
#define N_TILES 256
#define K_TILES 256
#define M_GLOBAL (M * M_TILES)
#define N_GLOBAL (N * N_TILES)
#define K_GLOBAL (K * K_TILES)
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 8
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B matrix
// in shared memory to minimize possible bank conflicts.
// Before performing the nvcuda::wmma::mma_sync operation, the warp must load the matrix
// data using the nvcuda::wmma::load_matrix_sync operation. Although the memory access pattern
// is not specified for that function, each lane in the warp can read one or multiple matrix
// elements from different matrix rows or columns.
// For shared memory, such access can result in bank conflicts if different rows / columns
// of the matrix map to the same bank. By shifting each row and column by a few bytes, we
// make sure that they map to different banks, thus reducing the number of possible bank
// conflicts.
// The number of 8 two-byte "half" elements is chosen as the minimum possible shift because
// we must keep each row and column 128-bit aligned, as required by nvcuda::wmma::load_matrix_sync.
#define SKEW_HALF 8
#define checkKernelErrors(expr) do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, # expr, hipGetErrorString(__err)); \
abort(); \
} \
} while(0)
using namespace nvcuda;
__host__ void init_host_matrices(float *a, float *b, float *c)
{
for (int i = 0; i < M_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
a[i*K_GLOBAL+j] = (float)(rand() % 3);
}
}
for (int i = 0; i < N_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
b[i*K_GLOBAL+j] = (float)(rand() % 3);
}
}
for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) {
c[t] = (float)(rand() % 3);
}
}
__global__ void init_device_matrices(const float *A_h, const float *B_h, const float *C_h, half *A, half *B, float *C, float *D)
{
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x)
A[i] = __float2half(A_h[i]);
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x)
B[i] = __float2half(B_h[i]);
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x)
C[i] = C_h[i];
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x)
D[i] = 0;
}
__global__ void compute_gemm(const half *A, const half *B, const float *C, float *D, float alpha, float beta)
{
extern __shared__ half shmem[][CHUNK_K * K + SKEW_HALF];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
float *shmem_warp_tile_ptr = (float*)&shmem[0][0] + (warpId/2) * SHMEM_STRIDE * K * 2 + (warpId%2) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
float *shmem_warp_stream_ptr = (float*)&shmem[0][0] + warpId * SHMEM_STRIDE * K;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 128 x 128 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const float *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < K; i++) {
typedef int4 copy_t;
*((copy_t *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) =
*((copy_t *)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId);
}
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const half *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % 4) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % 4) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
int4 *lane_ptr = (int4*)(warp_ptr + tile_k * K + (laneId / (WARP_SIZE/2)) * K_GLOBAL) + (laneId % (WARP_SIZE/2));
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / (WARP_SIZE/2);
#pragma unroll
for(int i = 0; i < (WARP_SIZE/2); i++) {
// Copy 16 bytes at once in each lane.
*((int4*)&shmem[shmem_idx][0] + (laneId % (WARP_SIZE/2))) = *lane_ptr;
// Advance the global memory pointer and the shared memory index.
lane_ptr = (int4*)((half*)lane_ptr + K_GLOBAL * 2);
shmem_idx += 2;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, half, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, half, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const half *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_HALF);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const half *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_HALF);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
float *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < K; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
}
int main(int argc, char **argv)
{
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
// Tensor cores require a GPU of Volta (SM7X) architecture or higher.
if (deviceProp.major < 7) {
printf("cudaTensorCoreGemm requires requires SM 7.0 or higher to use Tensor Cores. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES);
printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES);
printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES);
float *A_h = NULL;
float *B_h = NULL;
float *C_h = NULL;
checkCudaErrors(hipMallocManaged((void**)&A_h, sizeof(float) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(hipMallocManaged((void**)&B_h, sizeof(float) * K_GLOBAL * N_GLOBAL));
checkCudaErrors(hipMallocManaged((void**)&C_h, sizeof(float) * M_GLOBAL * N_GLOBAL));
half *A = NULL;
half *B = NULL;
float *C = NULL;
float *D = NULL;
checkCudaErrors(hipMalloc((void**)&A, sizeof(half) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(hipMalloc((void**)&B, sizeof(half) * N_GLOBAL * K_GLOBAL));
checkCudaErrors(hipMalloc((void**)&C, sizeof(float) * M_GLOBAL * N_GLOBAL));
checkCudaErrors(hipMalloc((void**)&D, sizeof(float) * M_GLOBAL * N_GLOBAL));
assert(((unsigned long long)A) % 128 == 0);
assert(((unsigned long long)B) % 128 == 0);
assert(((unsigned long long)C) % 128 == 0);
assert(((unsigned long long)D) % 128 == 0);
init_host_matrices(A_h, B_h, C_h);
printf("Preparing data for GPU...\n");
hipLaunchKernelGGL(( checkKernelErrors((init_device_matrices), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), 0, 0, A_h, B_h, C_h, A, B, C, D)));
checkCudaErrors(hipDeviceSynchronize());
enum { SHMEM_SZ = sizeof(half) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_HALF) * 2 };
printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL);
checkCudaErrors(hipFuncSetAttribute(compute_gemm, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
printf("Computing...\n");
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
const float alpha = 1.1f;
const float beta = 1.2f;
hipLaunchKernelGGL(( checkKernelErrors((compute_gemm), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta)));
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
float milliseconds = 0;
checkCudaErrors(hipEventElapsedTime(&milliseconds, start, stop));
printf("Time: %f ms\n", milliseconds);
printf("TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12);
checkCudaErrors(hipFree((void*)A_h));
checkCudaErrors(hipFree((void*)B_h));
checkCudaErrors(hipFree((void*)C_h));
checkCudaErrors(hipFree((void*)A));
checkCudaErrors(hipFree((void*)B));
checkCudaErrors(hipFree((void*)C));
checkCudaErrors(hipFree((void*)D));
return 0;
}
| 2d18f00a6d242032ff5049a90699730f5003a4d6.cu | /*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// CUDA sample demonstrating a GEMM computation using the Warp Matrix Multiply
// and Accumulate API introduced in CUDA 9.
// In this program, the compute_gemm kernel computes the result of a matrix multiplication
// and addition: D = alpha * A * B + beta * C. The dimensions of both C and D matrices
// are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x K_GLOBAL (row-major), the B matrix
// is K_GLOBAL x N_GLOBAL (column-major).
// In that kernel, each CTA computes one 128 x 128 tile of the resulting matrix
// per iteration. When the tile is computed, the CTA stores it to the global memory
// and begins a new iteration, selecting a new 128 x 128 tile to compute.
// Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes eight
// 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array.
// Warps compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by
// moving through the K_GLOBAL dimension of the A and B matrices and accumulating
// the intermediate result in the local thread state.
// There are a number of simple optimizations used in the algorithm:
// - The CTA copies the 128 x 128 tile of the C matrix from the global memory to
// shared memory. After that is done, each warp loads the C matrix fragments from
// shared memory, thus avoiding a random global memory access.
// - On each internal iteration, the CTA copies a portion of the A and B matrices from
// global memory to shared memory. After that, all warps in the CTA reuse the A and B
// data from shared memory, thus reducing the number of data copies from global memory.
// - The portions of the A and B matrices are stored in shared memory with an additional
// padding (skew) to reduce the number of shared memory access bank conflicts.
// (See a detailed explanation near the SKEW_HALF macro definition.)
// - When the CTA finishes computing the tiles of the resulting matrix, each warp stores
// its subtiles to shared memory. The CTA then copies the shared memory contents to
// global memory, again avoiding redundant random global memory accesses.
// - Note that the CTA tile size is chosen to maximize the GPU register utilization,
// but carefully enough to avoid local memory use.
#include <assert.h>
#include <stdio.h>
#include <cuda.h>
#include <mma.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 16
#define N 16
#define K 16
// GEMM configuration.
#define M_TILES 256
#define N_TILES 256
#define K_TILES 256
#define M_GLOBAL (M * M_TILES)
#define N_GLOBAL (N * N_TILES)
#define K_GLOBAL (K * K_TILES)
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 8
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B matrix
// in shared memory to minimize possible bank conflicts.
// Before performing the nvcuda::wmma::mma_sync operation, the warp must load the matrix
// data using the nvcuda::wmma::load_matrix_sync operation. Although the memory access pattern
// is not specified for that function, each lane in the warp can read one or multiple matrix
// elements from different matrix rows or columns.
// For shared memory, such access can result in bank conflicts if different rows / columns
// of the matrix map to the same bank. By shifting each row and column by a few bytes, we
// make sure that they map to different banks, thus reducing the number of possible bank
// conflicts.
// The number of 8 two-byte "half" elements is chosen as the minimum possible shift because
// we must keep each row and column 128-bit aligned, as required by nvcuda::wmma::load_matrix_sync.
#define SKEW_HALF 8
#define checkKernelErrors(expr) do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, # expr, cudaGetErrorString(__err)); \
abort(); \
} \
} while(0)
using namespace nvcuda;
__host__ void init_host_matrices(float *a, float *b, float *c)
{
for (int i = 0; i < M_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
a[i*K_GLOBAL+j] = (float)(rand() % 3);
}
}
for (int i = 0; i < N_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
b[i*K_GLOBAL+j] = (float)(rand() % 3);
}
}
for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) {
c[t] = (float)(rand() % 3);
}
}
__global__ void init_device_matrices(const float *A_h, const float *B_h, const float *C_h, half *A, half *B, float *C, float *D)
{
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x)
A[i] = __float2half(A_h[i]);
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x)
B[i] = __float2half(B_h[i]);
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x)
C[i] = C_h[i];
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x)
D[i] = 0;
}
__global__ void compute_gemm(const half *A, const half *B, const float *C, float *D, float alpha, float beta)
{
extern __shared__ half shmem[][CHUNK_K * K + SKEW_HALF];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
float *shmem_warp_tile_ptr = (float*)&shmem[0][0] + (warpId/2) * SHMEM_STRIDE * K * 2 + (warpId%2) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
float *shmem_warp_stream_ptr = (float*)&shmem[0][0] + warpId * SHMEM_STRIDE * K;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 128 x 128 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const float *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < K; i++) {
typedef int4 copy_t;
*((copy_t *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) =
*((copy_t *)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId);
}
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const half *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % 4) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % 4) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
int4 *lane_ptr = (int4*)(warp_ptr + tile_k * K + (laneId / (WARP_SIZE/2)) * K_GLOBAL) + (laneId % (WARP_SIZE/2));
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / (WARP_SIZE/2);
#pragma unroll
for(int i = 0; i < (WARP_SIZE/2); i++) {
// Copy 16 bytes at once in each lane.
*((int4*)&shmem[shmem_idx][0] + (laneId % (WARP_SIZE/2))) = *lane_ptr;
// Advance the global memory pointer and the shared memory index.
lane_ptr = (int4*)((half*)lane_ptr + K_GLOBAL * 2);
shmem_idx += 2;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, half, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, half, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const half *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_HALF);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const half *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_HALF);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
float *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < K; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
}
int main(int argc, char **argv)
{
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
// Tensor cores require a GPU of Volta (SM7X) architecture or higher.
if (deviceProp.major < 7) {
printf("cudaTensorCoreGemm requires requires SM 7.0 or higher to use Tensor Cores. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES);
printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES);
printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES);
float *A_h = NULL;
float *B_h = NULL;
float *C_h = NULL;
checkCudaErrors(cudaMallocManaged((void**)&A_h, sizeof(float) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(cudaMallocManaged((void**)&B_h, sizeof(float) * K_GLOBAL * N_GLOBAL));
checkCudaErrors(cudaMallocManaged((void**)&C_h, sizeof(float) * M_GLOBAL * N_GLOBAL));
half *A = NULL;
half *B = NULL;
float *C = NULL;
float *D = NULL;
checkCudaErrors(cudaMalloc((void**)&A, sizeof(half) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(cudaMalloc((void**)&B, sizeof(half) * N_GLOBAL * K_GLOBAL));
checkCudaErrors(cudaMalloc((void**)&C, sizeof(float) * M_GLOBAL * N_GLOBAL));
checkCudaErrors(cudaMalloc((void**)&D, sizeof(float) * M_GLOBAL * N_GLOBAL));
assert(((unsigned long long)A) % 128 == 0);
assert(((unsigned long long)B) % 128 == 0);
assert(((unsigned long long)C) % 128 == 0);
assert(((unsigned long long)D) % 128 == 0);
init_host_matrices(A_h, B_h, C_h);
printf("Preparing data for GPU...\n");
checkKernelErrors((init_device_matrices<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK>>>(A_h, B_h, C_h, A, B, C, D)));
checkCudaErrors(cudaDeviceSynchronize());
enum { SHMEM_SZ = sizeof(half) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_HALF) * 2 };
printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL);
checkCudaErrors(cudaFuncSetAttribute(compute_gemm, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
printf("Computing...\n");
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
const float alpha = 1.1f;
const float beta = 1.2f;
checkKernelErrors((compute_gemm<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C, D, alpha, beta)));
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
float milliseconds = 0;
checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, stop));
printf("Time: %f ms\n", milliseconds);
printf("TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12);
checkCudaErrors(cudaFree((void*)A_h));
checkCudaErrors(cudaFree((void*)B_h));
checkCudaErrors(cudaFree((void*)C_h));
checkCudaErrors(cudaFree((void*)A));
checkCudaErrors(cudaFree((void*)B));
checkCudaErrors(cudaFree((void*)C));
checkCudaErrors(cudaFree((void*)D));
return 0;
}
|
db516711a1b9dcdf2559b6c8faa1db49c5b6957d.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlat.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/IVFAppend.cuh>
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/RemapIndices.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <limits>
#include <thrust/host_vector.h>
#include <unordered_map>
namespace faiss { namespace gpu {
IVFFlat::IVFFlat(GpuResources* res,
FlatIndex* quantizer,
faiss::MetricType metric,
float metricArg,
bool useResidual,
faiss::ScalarQuantizer* scalarQ,
IndicesOptions indicesOptions,
MemorySpace space) :
IVFBase(res,
metric,
metricArg,
quantizer,
scalarQ ? scalarQ->code_size :
sizeof(float) * quantizer->getDim(),
indicesOptions,
space),
useResidual_(useResidual),
scalarQ_(scalarQ ? new GpuScalarQuantizer(res, *scalarQ) : nullptr) {
}
IVFFlat::~IVFFlat() {
}
void
IVFFlat::addCodeVectorsFromCpu(int listId,
const unsigned char* vecs,
const long* indices,
size_t numVecs) {
// This list must already exist
FAISS_ASSERT(listId < deviceListData_.size());
auto stream = resources_->getDefaultStreamCurrentDevice();
// If there's nothing to add, then there's nothing we have to do
if (numVecs == 0) {
return;
}
size_t lengthInBytes = numVecs * bytesPerVector_;
auto& listData = deviceListData_[listId];
auto prevData = listData->data();
// We only have int32 length representations on the GPU per each
// list; the length is in sizeof(char)
FAISS_ASSERT(listData->size() + lengthInBytes <=
(size_t) std::numeric_limits<int>::max());
listData->append(vecs,
lengthInBytes,
stream,
true /* exact reserved size */);
// Handle the indices as well
addIndicesFromCpu_(listId, indices, numVecs);
// This list address may have changed due to vector resizing, but
// only bother updating it on the device if it has changed
if (prevData != listData->data()) {
deviceListDataPointers_[listId] = listData->data();
}
// And our size has changed too
int listLength = listData->size() / bytesPerVector_;
deviceListLengths_[listId] = listLength;
// We update this as well, since the multi-pass algorithm uses it
maxListLength_ = ::max(maxListLength_, listLength);
// device_vector add is potentially happening on a different stream
// than our default stream
if (stream != 0) {
streamWait({stream}, {0});
}
}
int
IVFFlat::classifyAndAddVectors(Tensor<float, 2, true>& vecs,
Tensor<long, 1, true>& indices) {
FAISS_ASSERT(vecs.getSize(0) == indices.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
auto stream = resources_->getDefaultStreamCurrentDevice();
// Number of valid vectors that we actually add; we return this
int numAdded = 0;
DeviceTensor<float, 2, true> listDistance2d(
resources_, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), 1});
DeviceTensor<int, 2, true> listIds2d(
resources_, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), 1});
auto listIds = listIds2d.view<1>({vecs.getSize(0)});
quantizer_->query(vecs, 1, metric_, metricArg_,
listDistance2d, listIds2d, false);
// Calculate residuals for these vectors, if needed
DeviceTensor<float, 2, true> residuals(
resources_, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), dim_});
if (useResidual_) {
quantizer_->computeResidual(vecs, listIds, residuals);
}
// Copy the lists that we wish to append to back to the CPU
// FIXME: really this can be into pinned memory and a true async
// copy on a different stream; we can start the copy early, but it's
// tiny
HostTensor<int, 1, true> listIdsHost(listIds, stream);
// Now we add the encoded vectors to the individual lists
// First, make sure that there is space available for adding the new
// encoded vectors and indices
// list id -> # being added
std::unordered_map<int, int> assignCounts;
// vector id -> offset in list
// (we already have vector id -> list id in listIds)
HostTensor<int, 1, true> listOffsetHost({listIdsHost.getSize(0)});
for (int i = 0; i < listIds.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
listOffsetHost[i] = -1;
continue;
}
FAISS_ASSERT(listId < numLists_);
++numAdded;
int offset = deviceListData_[listId]->size() / bytesPerVector_;
auto it = assignCounts.find(listId);
if (it != assignCounts.end()) {
offset += it->second;
it->second++;
} else {
assignCounts[listId] = 1;
}
listOffsetHost[i] = offset;
}
// If we didn't add anything (all invalid vectors), no need to
// continue
if (numAdded == 0) {
return 0;
}
// We need to resize the data structures for the inverted lists on
// the GPUs, which means that they might need reallocation, which
// means that their base address may change. Figure out the new base
// addresses, and update those in a batch on the device
{
for (auto& counts : assignCounts) {
auto& data = deviceListData_[counts.first];
data->resize(data->size() + counts.second * bytesPerVector_,
stream);
int newNumVecs = (int) (data->size() / bytesPerVector_);
auto& indices = deviceListIndices_[counts.first];
if ((indicesOptions_ == INDICES_32_BIT) ||
(indicesOptions_ == INDICES_64_BIT)) {
size_t indexSize =
(indicesOptions_ == INDICES_32_BIT) ? sizeof(int) : sizeof(long);
indices->resize(indices->size() + counts.second * indexSize, stream);
} else if (indicesOptions_ == INDICES_CPU) {
// indices are stored on the CPU side
FAISS_ASSERT(counts.first < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[counts.first];
userIndices.resize(newNumVecs);
} else {
// indices are not stored on the GPU or CPU side
FAISS_ASSERT(indicesOptions_ == INDICES_IVF);
}
// This is used by the multi-pass query to decide how much scratch
// space to allocate for intermediate results
maxListLength_ = ::max(maxListLength_, newNumVecs);
}
// Update all pointers to the lists on the device that may have
// changed
{
std::vector<int> listIds(assignCounts.size());
int i = 0;
for (auto& counts : assignCounts) {
listIds[i++] = counts.first;
}
updateDeviceListInfo_(listIds, stream);
}
}
// If we're maintaining the indices on the CPU side, update our
// map. We already resized our map above.
if (indicesOptions_ == INDICES_CPU) {
// We need to maintain the indices on the CPU side
HostTensor<long, 1, true> hostIndices(indices, stream);
for (int i = 0; i < hostIndices.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
continue;
}
int offset = listOffsetHost[i];
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[listId];
FAISS_ASSERT(offset < userIndices.size());
userIndices[offset] = hostIndices[i];
}
}
// We similarly need to actually append the new vectors
{
DeviceTensor<int, 1, true> listOffset(
resources_, makeTempAlloc(AllocType::Other, stream), listOffsetHost);
// Now, for each list to which a vector is being assigned, write it
runIVFFlatInvertedListAppend(listIds,
listOffset,
vecs,
indices,
useResidual_,
residuals,
scalarQ_.get(),
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
stream);
}
return numAdded;
}
void
IVFFlat::query(Tensor<float, 2, true>& queries,
int nprobe,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
auto stream = resources_->getDefaultStreamCurrentDevice();
// These are caught at a higher level
FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
nprobe = ::min(nprobe, quantizer_->getSize());
FAISS_ASSERT(queries.getSize(1) == dim_);
FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0));
// Reserve space for the quantized information
DeviceTensor<float, 2, true> coarseDistances(
resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe});
DeviceTensor<int, 2, true> coarseIndices(
resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe});
// Find the `nprobe` closest lists; we can use int indices both
// internally and externally
quantizer_->query(queries,
nprobe,
metric_,
metricArg_,
coarseDistances,
coarseIndices,
false);
DeviceTensor<float, 3, true> residualBase(
resources_, makeTempAlloc(AllocType::Other, stream),
{queries.getSize(0), nprobe, dim_});
if (useResidual_) {
// Reconstruct vectors from the quantizer
quantizer_->reconstruct(coarseIndices, residualBase);
}
runIVFFlatScan(queries,
coarseIndices,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
metric_,
useResidual_,
residualBase,
scalarQ_.get(),
outDistances,
outIndices,
resources_);
// If the GPU isn't storing indices (they are on the CPU side), we
// need to perform the re-mapping here
// FIXME: we might ultimately be calling this function with inputs
// from the CPU, these are unnecessary copies
if (indicesOptions_ == INDICES_CPU) {
HostTensor<long, 2, true> hostOutIndices(outIndices, stream);
ivfOffsetToUserIndex(hostOutIndices.data(),
numLists_,
hostOutIndices.getSize(0),
hostOutIndices.getSize(1),
listOffsetToUserIndex_);
// Copy back to GPU, since the input to this function is on the
// GPU
outIndices.copyFrom(hostOutIndices, stream);
}
}
} } // namespace
| db516711a1b9dcdf2559b6c8faa1db49c5b6957d.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlat.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/IVFAppend.cuh>
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/RemapIndices.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <limits>
#include <thrust/host_vector.h>
#include <unordered_map>
namespace faiss { namespace gpu {
IVFFlat::IVFFlat(GpuResources* res,
FlatIndex* quantizer,
faiss::MetricType metric,
float metricArg,
bool useResidual,
faiss::ScalarQuantizer* scalarQ,
IndicesOptions indicesOptions,
MemorySpace space) :
IVFBase(res,
metric,
metricArg,
quantizer,
scalarQ ? scalarQ->code_size :
sizeof(float) * quantizer->getDim(),
indicesOptions,
space),
useResidual_(useResidual),
scalarQ_(scalarQ ? new GpuScalarQuantizer(res, *scalarQ) : nullptr) {
}
IVFFlat::~IVFFlat() {
}
void
IVFFlat::addCodeVectorsFromCpu(int listId,
const unsigned char* vecs,
const long* indices,
size_t numVecs) {
// This list must already exist
FAISS_ASSERT(listId < deviceListData_.size());
auto stream = resources_->getDefaultStreamCurrentDevice();
// If there's nothing to add, then there's nothing we have to do
if (numVecs == 0) {
return;
}
size_t lengthInBytes = numVecs * bytesPerVector_;
auto& listData = deviceListData_[listId];
auto prevData = listData->data();
// We only have int32 length representations on the GPU per each
// list; the length is in sizeof(char)
FAISS_ASSERT(listData->size() + lengthInBytes <=
(size_t) std::numeric_limits<int>::max());
listData->append(vecs,
lengthInBytes,
stream,
true /* exact reserved size */);
// Handle the indices as well
addIndicesFromCpu_(listId, indices, numVecs);
// This list address may have changed due to vector resizing, but
// only bother updating it on the device if it has changed
if (prevData != listData->data()) {
deviceListDataPointers_[listId] = listData->data();
}
// And our size has changed too
int listLength = listData->size() / bytesPerVector_;
deviceListLengths_[listId] = listLength;
// We update this as well, since the multi-pass algorithm uses it
maxListLength_ = std::max(maxListLength_, listLength);
// device_vector add is potentially happening on a different stream
// than our default stream
if (stream != 0) {
streamWait({stream}, {0});
}
}
int
IVFFlat::classifyAndAddVectors(Tensor<float, 2, true>& vecs,
Tensor<long, 1, true>& indices) {
FAISS_ASSERT(vecs.getSize(0) == indices.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
auto stream = resources_->getDefaultStreamCurrentDevice();
// Number of valid vectors that we actually add; we return this
int numAdded = 0;
DeviceTensor<float, 2, true> listDistance2d(
resources_, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), 1});
DeviceTensor<int, 2, true> listIds2d(
resources_, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), 1});
auto listIds = listIds2d.view<1>({vecs.getSize(0)});
quantizer_->query(vecs, 1, metric_, metricArg_,
listDistance2d, listIds2d, false);
// Calculate residuals for these vectors, if needed
DeviceTensor<float, 2, true> residuals(
resources_, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), dim_});
if (useResidual_) {
quantizer_->computeResidual(vecs, listIds, residuals);
}
// Copy the lists that we wish to append to back to the CPU
// FIXME: really this can be into pinned memory and a true async
// copy on a different stream; we can start the copy early, but it's
// tiny
HostTensor<int, 1, true> listIdsHost(listIds, stream);
// Now we add the encoded vectors to the individual lists
// First, make sure that there is space available for adding the new
// encoded vectors and indices
// list id -> # being added
std::unordered_map<int, int> assignCounts;
// vector id -> offset in list
// (we already have vector id -> list id in listIds)
HostTensor<int, 1, true> listOffsetHost({listIdsHost.getSize(0)});
for (int i = 0; i < listIds.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
listOffsetHost[i] = -1;
continue;
}
FAISS_ASSERT(listId < numLists_);
++numAdded;
int offset = deviceListData_[listId]->size() / bytesPerVector_;
auto it = assignCounts.find(listId);
if (it != assignCounts.end()) {
offset += it->second;
it->second++;
} else {
assignCounts[listId] = 1;
}
listOffsetHost[i] = offset;
}
// If we didn't add anything (all invalid vectors), no need to
// continue
if (numAdded == 0) {
return 0;
}
// We need to resize the data structures for the inverted lists on
// the GPUs, which means that they might need reallocation, which
// means that their base address may change. Figure out the new base
// addresses, and update those in a batch on the device
{
for (auto& counts : assignCounts) {
auto& data = deviceListData_[counts.first];
data->resize(data->size() + counts.second * bytesPerVector_,
stream);
int newNumVecs = (int) (data->size() / bytesPerVector_);
auto& indices = deviceListIndices_[counts.first];
if ((indicesOptions_ == INDICES_32_BIT) ||
(indicesOptions_ == INDICES_64_BIT)) {
size_t indexSize =
(indicesOptions_ == INDICES_32_BIT) ? sizeof(int) : sizeof(long);
indices->resize(indices->size() + counts.second * indexSize, stream);
} else if (indicesOptions_ == INDICES_CPU) {
// indices are stored on the CPU side
FAISS_ASSERT(counts.first < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[counts.first];
userIndices.resize(newNumVecs);
} else {
// indices are not stored on the GPU or CPU side
FAISS_ASSERT(indicesOptions_ == INDICES_IVF);
}
// This is used by the multi-pass query to decide how much scratch
// space to allocate for intermediate results
maxListLength_ = std::max(maxListLength_, newNumVecs);
}
// Update all pointers to the lists on the device that may have
// changed
{
std::vector<int> listIds(assignCounts.size());
int i = 0;
for (auto& counts : assignCounts) {
listIds[i++] = counts.first;
}
updateDeviceListInfo_(listIds, stream);
}
}
// If we're maintaining the indices on the CPU side, update our
// map. We already resized our map above.
if (indicesOptions_ == INDICES_CPU) {
// We need to maintain the indices on the CPU side
HostTensor<long, 1, true> hostIndices(indices, stream);
for (int i = 0; i < hostIndices.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
continue;
}
int offset = listOffsetHost[i];
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[listId];
FAISS_ASSERT(offset < userIndices.size());
userIndices[offset] = hostIndices[i];
}
}
// We similarly need to actually append the new vectors
{
DeviceTensor<int, 1, true> listOffset(
resources_, makeTempAlloc(AllocType::Other, stream), listOffsetHost);
// Now, for each list to which a vector is being assigned, write it
runIVFFlatInvertedListAppend(listIds,
listOffset,
vecs,
indices,
useResidual_,
residuals,
scalarQ_.get(),
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
stream);
}
return numAdded;
}
void
IVFFlat::query(Tensor<float, 2, true>& queries,
int nprobe,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
auto stream = resources_->getDefaultStreamCurrentDevice();
// These are caught at a higher level
FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
nprobe = std::min(nprobe, quantizer_->getSize());
FAISS_ASSERT(queries.getSize(1) == dim_);
FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0));
// Reserve space for the quantized information
DeviceTensor<float, 2, true> coarseDistances(
resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe});
DeviceTensor<int, 2, true> coarseIndices(
resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe});
// Find the `nprobe` closest lists; we can use int indices both
// internally and externally
quantizer_->query(queries,
nprobe,
metric_,
metricArg_,
coarseDistances,
coarseIndices,
false);
DeviceTensor<float, 3, true> residualBase(
resources_, makeTempAlloc(AllocType::Other, stream),
{queries.getSize(0), nprobe, dim_});
if (useResidual_) {
// Reconstruct vectors from the quantizer
quantizer_->reconstruct(coarseIndices, residualBase);
}
runIVFFlatScan(queries,
coarseIndices,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
metric_,
useResidual_,
residualBase,
scalarQ_.get(),
outDistances,
outIndices,
resources_);
// If the GPU isn't storing indices (they are on the CPU side), we
// need to perform the re-mapping here
// FIXME: we might ultimately be calling this function with inputs
// from the CPU, these are unnecessary copies
if (indicesOptions_ == INDICES_CPU) {
HostTensor<long, 2, true> hostOutIndices(outIndices, stream);
ivfOffsetToUserIndex(hostOutIndices.data(),
numLists_,
hostOutIndices.getSize(0),
hostOutIndices.getSize(1),
listOffsetToUserIndex_);
// Copy back to GPU, since the input to this function is on the
// GPU
outIndices.copyFrom(hostOutIndices, stream);
}
}
} } // namespace
|
54aea4e27dcaba487dc0b36dfeac3fe78d60e2f6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <sparse/csr.cuh>
#include "csr.h"
#include <common/cudart_utils.h>
#include <random/rng.cuh>
#include "test_utils.h"
#include <iostream>
#include <limits>
namespace MLCommon {
namespace Sparse {
template <typename T>
class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
CSRInputs<T> params;
};
const std::vector<CSRInputs<float>> inputsf = {{5, 10, 5, 1234ULL}};
typedef CSRTest<float> CSRToCOO;
TEST_P(CSRToCOO, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
int *ex_scan;
int *result, *verify;
int *ex_scan_h = new int[4]{0, 4, 8, 9};
int *verify_h = new int[10]{0, 0, 0, 0, 1, 1, 1, 1, 2, 3};
raft::allocate(verify, 10);
raft::allocate(ex_scan, 4);
raft::allocate(result, 10, true);
raft::update_device(ex_scan, ex_scan_h, 4, stream);
raft::update_device(verify, verify_h, 10, stream);
csr_to_coo<32>(ex_scan, 4, result, 10, stream);
ASSERT_TRUE(
raft::devArrMatch<int>(verify, result, 10, raft::Compare<float>(), stream));
delete[] ex_scan_h;
delete[] verify_h;
CUDA_CHECK(hipFree(ex_scan));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
hipStreamDestroy(stream);
}
typedef CSRTest<float> CSRRowNormalizeMax;
TEST_P(CSRRowNormalizeMax, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
int *ex_scan;
float *in_vals, *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9};
float in_vals_h[10] = {5.0, 1.0, 0.0, 0.0, 10.0, 1.0, 0.0, 0.0, 1.0, 0.0};
float verify_h[10] = {1.0, 0.2, 0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 1, 0.0};
raft::allocate(in_vals, 10);
raft::allocate(verify, 10);
raft::allocate(ex_scan, 4);
raft::allocate(result, 10, true);
raft::update_device(ex_scan, *&ex_scan_h, 4, stream);
raft::update_device(in_vals, *&in_vals_h, 10, stream);
raft::update_device(verify, *&verify_h, 10, stream);
csr_row_normalize_max<32, float>(ex_scan, in_vals, 10, 4, result, stream);
ASSERT_TRUE(
raft::devArrMatch<float>(verify, result, 10, raft::Compare<float>()));
hipStreamDestroy(stream);
CUDA_CHECK(hipFree(ex_scan));
CUDA_CHECK(hipFree(in_vals));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
}
typedef CSRTest<float> CSRRowNormalizeL1;
TEST_P(CSRRowNormalizeL1, Result) {
int *ex_scan;
float *in_vals, *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9};
float in_vals_h[10] = {1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0};
float verify_h[10] = {0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 1, 0.0};
raft::allocate(in_vals, 10);
raft::allocate(verify, 10);
raft::allocate(ex_scan, 4);
raft::allocate(result, 10, true);
raft::update_device(ex_scan, *&ex_scan_h, 4, 0);
raft::update_device(in_vals, *&in_vals_h, 10, 0);
raft::update_device(verify, *&verify_h, 10, 0);
csr_row_normalize_l1<32, float>(ex_scan, in_vals, 10, 4, result, 0);
hipDeviceSynchronize();
ASSERT_TRUE(
raft::devArrMatch<float>(verify, result, 10, raft::Compare<float>()));
CUDA_CHECK(hipFree(ex_scan));
CUDA_CHECK(hipFree(in_vals));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
}
typedef CSRTest<float> CSRSum;
TEST_P(CSRSum, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
std::shared_ptr<deviceAllocator> alloc(
new raft::mr::device::default_allocator);
int *ex_scan, *ind_ptr_a, *ind_ptr_b, *verify_indptr;
float *in_vals_a, *in_vals_b, *verify;
int ex_scan_h[4] = {0, 4, 8, 9};
int indptr_a_h[10] = {1, 2, 3, 4, 1, 2, 3, 5, 0, 1};
int indptr_b_h[10] = {1, 2, 5, 4, 0, 2, 3, 5, 1, 0};
float in_vals_h[10] = {1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0};
float verify_h[14] = {2.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
int verify_indptr_h[14] = {1, 2, 3, 4, 5, 1, 2, 3, 5, 0, 0, 1, 1, 0};
raft::allocate(in_vals_a, 10);
raft::allocate(in_vals_b, 10);
raft::allocate(verify, 14);
raft::allocate(ex_scan, 4);
raft::allocate(verify_indptr, 14);
raft::allocate(ind_ptr_a, 10);
raft::allocate(ind_ptr_b, 10);
raft::update_device(ex_scan, *&ex_scan_h, 4, stream);
raft::update_device(in_vals_a, *&in_vals_h, 10, stream);
raft::update_device(in_vals_b, *&in_vals_h, 10, stream);
raft::update_device(verify, *&verify_h, 14, stream);
raft::update_device(verify_indptr, *&verify_indptr_h, 14, stream);
raft::update_device(ind_ptr_a, *&indptr_a_h, 10, stream);
raft::update_device(ind_ptr_b, *&indptr_b_h, 10, stream);
int *result_ind;
raft::allocate(result_ind, 4);
int nnz = csr_add_calc_inds<float, 32>(ex_scan, ind_ptr_a, in_vals_a, 10,
ex_scan, ind_ptr_b, in_vals_b, 10, 4,
result_ind, alloc, stream);
int *result_indptr;
float *result_val;
raft::allocate(result_indptr, nnz);
raft::allocate(result_val, nnz);
csr_add_finalize<float, 32>(ex_scan, ind_ptr_a, in_vals_a, 10, ex_scan,
ind_ptr_b, in_vals_b, 10, 4, result_ind,
result_indptr, result_val, stream);
ASSERT_TRUE(nnz == 14);
ASSERT_TRUE(
raft::devArrMatch<float>(verify, result_val, nnz, raft::Compare<float>()));
ASSERT_TRUE(raft::devArrMatch<int>(verify_indptr, result_indptr, nnz,
raft::Compare<int>()));
hipStreamDestroy(stream);
CUDA_CHECK(hipFree(ex_scan));
CUDA_CHECK(hipFree(in_vals_a));
CUDA_CHECK(hipFree(in_vals_b));
CUDA_CHECK(hipFree(ind_ptr_a));
CUDA_CHECK(hipFree(ind_ptr_b));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result_indptr));
CUDA_CHECK(hipFree(result_val));
}
typedef CSRTest<float> CSRRowOpTest;
TEST_P(CSRRowOpTest, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
int *ex_scan;
float *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9};
float verify_h[10] = {0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0};
raft::allocate(verify, 10);
raft::allocate(ex_scan, 4);
raft::allocate(result, 10, true);
raft::update_device(ex_scan, *&ex_scan_h, 4, stream);
raft::update_device(verify, *&verify_h, 10, stream);
csr_row_op<int, 32>(
ex_scan, 4, 10,
[result] __device__(int row, int start_idx, int stop_idx) {
for (int i = start_idx; i < stop_idx; i++) result[i] = row;
},
stream);
ASSERT_TRUE(
raft::devArrMatch<float>(verify, result, 10, raft::Compare<float>()));
hipStreamDestroy(stream);
CUDA_CHECK(hipFree(ex_scan));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
}
typedef CSRTest<float> AdjGraphTest;
TEST_P(AdjGraphTest, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
int *row_ind, *result, *verify;
bool *adj;
int row_ind_h[3] = {0, 3, 6};
bool adj_h[18] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int verify_h[9] = {0, 1, 2, 0, 1, 2, 0, 1, 2};
raft::allocate(row_ind, 3);
raft::allocate(adj, 18);
raft::allocate(result, 9, true);
raft::allocate(verify, 9);
raft::update_device(row_ind, *&row_ind_h, 3, stream);
raft::update_device(adj, *&adj_h, 18, stream);
raft::update_device(verify, *&verify_h, 9, stream);
csr_adj_graph_batched<int, 32>(row_ind, 6, 9, 3, adj, result, stream);
ASSERT_TRUE(raft::devArrMatch<int>(verify, result, 9, raft::Compare<int>()));
hipStreamDestroy(stream);
CUDA_CHECK(hipFree(row_ind));
CUDA_CHECK(hipFree(adj));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
}
typedef CSRTest<float> WeakCCTest;
TEST_P(WeakCCTest, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
std::shared_ptr<deviceAllocator> alloc(
new raft::mr::device::default_allocator);
int *row_ind, *row_ind_ptr, *result, *verify;
int row_ind_h1[3] = {0, 3, 6};
int row_ind_ptr_h1[9] = {0, 1, 2, 0, 1, 2, 0, 1, 2};
int verify_h1[6] = {1, 1, 1, 2147483647, 2147483647, 2147483647};
int row_ind_h2[3] = {0, 2, 4};
int row_ind_ptr_h2[5] = {3, 4, 3, 4, 5};
int verify_h2[6] = {1, 1, 1, 5, 5, 5};
raft::allocate(row_ind, 3);
raft::allocate(row_ind_ptr, 9);
raft::allocate(result, 9, true);
raft::allocate(verify, 9);
device_buffer<bool> xa(alloc, stream, 6);
device_buffer<bool> fa(alloc, stream, 6);
device_buffer<bool> m(alloc, stream, 1);
WeakCCState state(xa.data(), fa.data(), m.data());
/**
* Run batch #1
*/
raft::update_device(row_ind, *&row_ind_h1, 3, stream);
raft::update_device(row_ind_ptr, *&row_ind_ptr_h1, 9, stream);
raft::update_device(verify, *&verify_h1, 6, stream);
weak_cc_batched<int, 32>(result, row_ind, row_ind_ptr, 9, 6, 0, 3, &state,
stream);
hipStreamSynchronize(stream);
ASSERT_TRUE(raft::devArrMatch<int>(verify, result, 6, raft::Compare<int>()));
/**
* Run batch #2
*/
raft::update_device(row_ind, *&row_ind_h2, 3, stream);
raft::update_device(row_ind_ptr, *&row_ind_ptr_h2, 5, stream);
raft::update_device(verify, *&verify_h2, 6, stream);
weak_cc_batched<int, 32>(result, row_ind, row_ind_ptr, 5, 6, 4, 3, &state,
stream);
ASSERT_TRUE(raft::devArrMatch<int>(verify, result, 6, raft::Compare<int>()));
hipStreamSynchronize(stream);
hipStreamDestroy(stream);
CUDA_CHECK(hipFree(row_ind));
CUDA_CHECK(hipFree(row_ind_ptr));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
}
INSTANTIATE_TEST_CASE_P(CSRTests, WeakCCTest, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, AdjGraphTest, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowOpTest, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRToCOO, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeMax,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeL1,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRSum, ::testing::ValuesIn(inputsf));
} // namespace Sparse
} // namespace MLCommon
| 54aea4e27dcaba487dc0b36dfeac3fe78d60e2f6.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <sparse/csr.cuh>
#include "csr.h"
#include <common/cudart_utils.h>
#include <random/rng.cuh>
#include "test_utils.h"
#include <iostream>
#include <limits>
namespace MLCommon {
namespace Sparse {
template <typename T>
class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
CSRInputs<T> params;
};
const std::vector<CSRInputs<float>> inputsf = {{5, 10, 5, 1234ULL}};
typedef CSRTest<float> CSRToCOO;
TEST_P(CSRToCOO, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
int *ex_scan;
int *result, *verify;
int *ex_scan_h = new int[4]{0, 4, 8, 9};
int *verify_h = new int[10]{0, 0, 0, 0, 1, 1, 1, 1, 2, 3};
raft::allocate(verify, 10);
raft::allocate(ex_scan, 4);
raft::allocate(result, 10, true);
raft::update_device(ex_scan, ex_scan_h, 4, stream);
raft::update_device(verify, verify_h, 10, stream);
csr_to_coo<32>(ex_scan, 4, result, 10, stream);
ASSERT_TRUE(
raft::devArrMatch<int>(verify, result, 10, raft::Compare<float>(), stream));
delete[] ex_scan_h;
delete[] verify_h;
CUDA_CHECK(cudaFree(ex_scan));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
cudaStreamDestroy(stream);
}
typedef CSRTest<float> CSRRowNormalizeMax;
TEST_P(CSRRowNormalizeMax, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
int *ex_scan;
float *in_vals, *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9};
float in_vals_h[10] = {5.0, 1.0, 0.0, 0.0, 10.0, 1.0, 0.0, 0.0, 1.0, 0.0};
float verify_h[10] = {1.0, 0.2, 0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 1, 0.0};
raft::allocate(in_vals, 10);
raft::allocate(verify, 10);
raft::allocate(ex_scan, 4);
raft::allocate(result, 10, true);
raft::update_device(ex_scan, *&ex_scan_h, 4, stream);
raft::update_device(in_vals, *&in_vals_h, 10, stream);
raft::update_device(verify, *&verify_h, 10, stream);
csr_row_normalize_max<32, float>(ex_scan, in_vals, 10, 4, result, stream);
ASSERT_TRUE(
raft::devArrMatch<float>(verify, result, 10, raft::Compare<float>()));
cudaStreamDestroy(stream);
CUDA_CHECK(cudaFree(ex_scan));
CUDA_CHECK(cudaFree(in_vals));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
}
typedef CSRTest<float> CSRRowNormalizeL1;
TEST_P(CSRRowNormalizeL1, Result) {
int *ex_scan;
float *in_vals, *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9};
float in_vals_h[10] = {1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0};
float verify_h[10] = {0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 1, 0.0};
raft::allocate(in_vals, 10);
raft::allocate(verify, 10);
raft::allocate(ex_scan, 4);
raft::allocate(result, 10, true);
raft::update_device(ex_scan, *&ex_scan_h, 4, 0);
raft::update_device(in_vals, *&in_vals_h, 10, 0);
raft::update_device(verify, *&verify_h, 10, 0);
csr_row_normalize_l1<32, float>(ex_scan, in_vals, 10, 4, result, 0);
cudaDeviceSynchronize();
ASSERT_TRUE(
raft::devArrMatch<float>(verify, result, 10, raft::Compare<float>()));
CUDA_CHECK(cudaFree(ex_scan));
CUDA_CHECK(cudaFree(in_vals));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
}
typedef CSRTest<float> CSRSum;
TEST_P(CSRSum, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
std::shared_ptr<deviceAllocator> alloc(
new raft::mr::device::default_allocator);
int *ex_scan, *ind_ptr_a, *ind_ptr_b, *verify_indptr;
float *in_vals_a, *in_vals_b, *verify;
int ex_scan_h[4] = {0, 4, 8, 9};
int indptr_a_h[10] = {1, 2, 3, 4, 1, 2, 3, 5, 0, 1};
int indptr_b_h[10] = {1, 2, 5, 4, 0, 2, 3, 5, 1, 0};
float in_vals_h[10] = {1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0};
float verify_h[14] = {2.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
int verify_indptr_h[14] = {1, 2, 3, 4, 5, 1, 2, 3, 5, 0, 0, 1, 1, 0};
raft::allocate(in_vals_a, 10);
raft::allocate(in_vals_b, 10);
raft::allocate(verify, 14);
raft::allocate(ex_scan, 4);
raft::allocate(verify_indptr, 14);
raft::allocate(ind_ptr_a, 10);
raft::allocate(ind_ptr_b, 10);
raft::update_device(ex_scan, *&ex_scan_h, 4, stream);
raft::update_device(in_vals_a, *&in_vals_h, 10, stream);
raft::update_device(in_vals_b, *&in_vals_h, 10, stream);
raft::update_device(verify, *&verify_h, 14, stream);
raft::update_device(verify_indptr, *&verify_indptr_h, 14, stream);
raft::update_device(ind_ptr_a, *&indptr_a_h, 10, stream);
raft::update_device(ind_ptr_b, *&indptr_b_h, 10, stream);
int *result_ind;
raft::allocate(result_ind, 4);
int nnz = csr_add_calc_inds<float, 32>(ex_scan, ind_ptr_a, in_vals_a, 10,
ex_scan, ind_ptr_b, in_vals_b, 10, 4,
result_ind, alloc, stream);
int *result_indptr;
float *result_val;
raft::allocate(result_indptr, nnz);
raft::allocate(result_val, nnz);
csr_add_finalize<float, 32>(ex_scan, ind_ptr_a, in_vals_a, 10, ex_scan,
ind_ptr_b, in_vals_b, 10, 4, result_ind,
result_indptr, result_val, stream);
ASSERT_TRUE(nnz == 14);
ASSERT_TRUE(
raft::devArrMatch<float>(verify, result_val, nnz, raft::Compare<float>()));
ASSERT_TRUE(raft::devArrMatch<int>(verify_indptr, result_indptr, nnz,
raft::Compare<int>()));
cudaStreamDestroy(stream);
CUDA_CHECK(cudaFree(ex_scan));
CUDA_CHECK(cudaFree(in_vals_a));
CUDA_CHECK(cudaFree(in_vals_b));
CUDA_CHECK(cudaFree(ind_ptr_a));
CUDA_CHECK(cudaFree(ind_ptr_b));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result_indptr));
CUDA_CHECK(cudaFree(result_val));
}
typedef CSRTest<float> CSRRowOpTest;
TEST_P(CSRRowOpTest, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
int *ex_scan;
float *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9};
float verify_h[10] = {0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0};
raft::allocate(verify, 10);
raft::allocate(ex_scan, 4);
raft::allocate(result, 10, true);
raft::update_device(ex_scan, *&ex_scan_h, 4, stream);
raft::update_device(verify, *&verify_h, 10, stream);
csr_row_op<int, 32>(
ex_scan, 4, 10,
[result] __device__(int row, int start_idx, int stop_idx) {
for (int i = start_idx; i < stop_idx; i++) result[i] = row;
},
stream);
ASSERT_TRUE(
raft::devArrMatch<float>(verify, result, 10, raft::Compare<float>()));
cudaStreamDestroy(stream);
CUDA_CHECK(cudaFree(ex_scan));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
}
typedef CSRTest<float> AdjGraphTest;
TEST_P(AdjGraphTest, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
int *row_ind, *result, *verify;
bool *adj;
int row_ind_h[3] = {0, 3, 6};
bool adj_h[18] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int verify_h[9] = {0, 1, 2, 0, 1, 2, 0, 1, 2};
raft::allocate(row_ind, 3);
raft::allocate(adj, 18);
raft::allocate(result, 9, true);
raft::allocate(verify, 9);
raft::update_device(row_ind, *&row_ind_h, 3, stream);
raft::update_device(adj, *&adj_h, 18, stream);
raft::update_device(verify, *&verify_h, 9, stream);
csr_adj_graph_batched<int, 32>(row_ind, 6, 9, 3, adj, result, stream);
ASSERT_TRUE(raft::devArrMatch<int>(verify, result, 9, raft::Compare<int>()));
cudaStreamDestroy(stream);
CUDA_CHECK(cudaFree(row_ind));
CUDA_CHECK(cudaFree(adj));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
}
typedef CSRTest<float> WeakCCTest;
TEST_P(WeakCCTest, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
std::shared_ptr<deviceAllocator> alloc(
new raft::mr::device::default_allocator);
int *row_ind, *row_ind_ptr, *result, *verify;
int row_ind_h1[3] = {0, 3, 6};
int row_ind_ptr_h1[9] = {0, 1, 2, 0, 1, 2, 0, 1, 2};
int verify_h1[6] = {1, 1, 1, 2147483647, 2147483647, 2147483647};
int row_ind_h2[3] = {0, 2, 4};
int row_ind_ptr_h2[5] = {3, 4, 3, 4, 5};
int verify_h2[6] = {1, 1, 1, 5, 5, 5};
raft::allocate(row_ind, 3);
raft::allocate(row_ind_ptr, 9);
raft::allocate(result, 9, true);
raft::allocate(verify, 9);
device_buffer<bool> xa(alloc, stream, 6);
device_buffer<bool> fa(alloc, stream, 6);
device_buffer<bool> m(alloc, stream, 1);
WeakCCState state(xa.data(), fa.data(), m.data());
/**
* Run batch #1
*/
raft::update_device(row_ind, *&row_ind_h1, 3, stream);
raft::update_device(row_ind_ptr, *&row_ind_ptr_h1, 9, stream);
raft::update_device(verify, *&verify_h1, 6, stream);
weak_cc_batched<int, 32>(result, row_ind, row_ind_ptr, 9, 6, 0, 3, &state,
stream);
cudaStreamSynchronize(stream);
ASSERT_TRUE(raft::devArrMatch<int>(verify, result, 6, raft::Compare<int>()));
/**
* Run batch #2
*/
raft::update_device(row_ind, *&row_ind_h2, 3, stream);
raft::update_device(row_ind_ptr, *&row_ind_ptr_h2, 5, stream);
raft::update_device(verify, *&verify_h2, 6, stream);
weak_cc_batched<int, 32>(result, row_ind, row_ind_ptr, 5, 6, 4, 3, &state,
stream);
ASSERT_TRUE(raft::devArrMatch<int>(verify, result, 6, raft::Compare<int>()));
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
CUDA_CHECK(cudaFree(row_ind));
CUDA_CHECK(cudaFree(row_ind_ptr));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
}
INSTANTIATE_TEST_CASE_P(CSRTests, WeakCCTest, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, AdjGraphTest, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowOpTest, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRToCOO, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeMax,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeL1,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRSum, ::testing::ValuesIn(inputsf));
} // namespace Sparse
} // namespace MLCommon
|
a6896da63ccaece0d47f0b3638935de68a65d192.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2013-2014 [Author: Po-Wei Chou]
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <dnn-utility.h>
CURAND_STATE::CURAND_STATE(unsigned seed, int N): _states(NULL) {
hipMalloc ( &_states, N * N * sizeof( hiprandState_t ) );
hipLaunchKernelGGL(( setupCuRandState) , dim3(1), dim3(N * N) , 0, 0, _states, seed );
CCE(hipDeviceSynchronize());
}
hiprandState_t* CURAND_STATE::get() const {
return _states;
}
CURAND_STATE::~CURAND_STATE() {
hipFree(_states);
}
__global__ void setupCuRandState( hiprandState_t * state, unsigned long seed ) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
hiprand_init ( seed, x, 0, &state[x] );
}
inline __device__ void get_curand_normal(float& x, hiprandState_t* state) {
x = hiprand_normal(state);
}
inline __device__ void get_curand_uniform(float& x, hiprandState_t* state) {
x = hiprand_uniform(state);
}
inline __device__ void sample_gaussian(float& x, hiprandState_t* state) {
x += hiprand_normal(state);
}
inline __device__ void sample_bernoulli(float& x, hiprandState_t* state) {
x = (float) (x >= hiprand_uniform(state));
}
template <Operation op>
__global__ void element_wise_curand_kernel(float* const data, hiprandState_t* globalState, unsigned int rows, unsigned int cols) {
int tx = threadIdx.x;
int ty = threadIdx.y;
// Matrix index
int x = blockIdx.x*blockDim.x + tx;
int y = blockIdx.y*blockDim.y + ty;
if (x >= cols || y >= rows)
return;
int i = x * rows + y;
int j = tx * blockDim.y + ty;
op(data[i], globalState + j);
__syncthreads();
}
void sample(mat &prob, UNIT_TYPE type) {
static CURAND_STATE state;
ALLOCATE_GRIDS_AND_THREADS(prob.getRows(), prob.getCols());
switch (type) {
case GAUSSIAN:
hipLaunchKernelGGL(( element_wise_curand_kernel<sample_gaussian>), dim3(grids), dim3(threads) , 0, 0, prob.getData(), state.get(), prob.getRows(), prob.getCols());
break;
case BERNOULLI:
hipLaunchKernelGGL(( element_wise_curand_kernel<sample_bernoulli>), dim3(grids), dim3(threads) , 0, 0, prob.getData(), state.get(), prob.getRows(), prob.getCols());
break;
}
CCE(hipDeviceSynchronize());
fill_bias(prob);
}
mat randn(int m, int n) {
#ifdef DEBUG
// Use ext::randn (which is set to seed 0) to debug.
mat x(m, n);
ext::randn(x);
return x;
#else
static CURAND_STATE state;
mat x(m, n);
ALLOCATE_GRIDS_AND_THREADS(m, n);
hipLaunchKernelGGL(( element_wise_curand_kernel<get_curand_normal>), dim3(grids), dim3(threads), 0, 0, x.getData(), state.get(), m, n);
CCE(hipDeviceSynchronize());
return x;
#endif
}
mat rand(int m, int n) {
#ifdef DEBUG
// Use ext::rand (which is set to seed 0) to debug.
mat x(m, n);
ext::rand(x);
return x;
#else
static CURAND_STATE state;
mat x(m, n);
ALLOCATE_GRIDS_AND_THREADS(m, n);
hipLaunchKernelGGL(( element_wise_curand_kernel<get_curand_uniform>), dim3(grids), dim3(threads), 0, 0, x.getData(), state.get(), m, n);
CCE(hipDeviceSynchronize());
return x;
#endif
}
map<int, int> getLabelMapping(const hmat& labels) {
map<int, int> classes;
for (size_t i=0; i<labels.size(); ++i)
classes[(int) labels[i]] = 1;
int counter = 0;
map<int, int>::iterator itr = classes.begin();
for (; itr != classes.end(); ++itr)
itr->second = ++counter;
return classes;
}
namespace ext {
void rescale(mat& data, float lower, float upper) {
float min = ext::min(data);
float max = ext::max(data);
float ratio = (upper - lower) / (max - min);
data = (data - min) * ratio + lower;
}
float max(const mat& v) {
thrust::device_ptr<float> vPtr(v.getData());
thrust::device_ptr<float> maxPtr = thrust::max_element(vPtr, vPtr + v.size());
thrust::host_vector<float> hMaxPtr(maxPtr, maxPtr + 1);
return hMaxPtr[0];
}
float min(const mat& v) {
thrust::device_ptr<float> vPtr(v.getData());
thrust::device_ptr<float> minPtr = thrust::min_element(vPtr, vPtr + v.size());
thrust::host_vector<float> hMaxPtr(minPtr, minPtr + 1);
return hMaxPtr[0];
}
float max(const hmat& v) {
float* m = thrust::max_element(v.getData(), v.getData() + v.size());
return *m;
}
float min(const hmat& v) {
float* m = thrust::min_element(v.getData(), v.getData() + v.size());
return *m;
}
};
__global__ void dcrossentropy_kernel(float* error, float* const target, float* const output, unsigned int rows, unsigned int cols) {
int tx = threadIdx.x;
int ty = threadIdx.y;
// Matrix index
int x = blockIdx.x*blockDim.x + tx;
int y = blockIdx.y*blockDim.y + ty;
if (x >= cols || y >= rows)
return;
int i = x * rows + y;
// target[y] need to be 0-based
error[i] = output[i] - (float) (target[y] == x);
__syncthreads();
}
void dCrossEntropy(mat& error, const mat &target, const mat& output) {
assert(error.getRows() == output.getRows() && error.getCols() == output.getCols());
ALLOCATE_GRIDS_AND_THREADS(error.getRows(), error.getCols());
hipLaunchKernelGGL(( dcrossentropy_kernel), dim3(grids), dim3(threads) , 0, 0,
error.getData(), target.getData(), output.getData(),
error.getRows(), error.getCols());
CCE(hipDeviceSynchronize());
}
mat getError(const mat& target, const mat& output, ERROR_MEASURE errorMeasure) {
mat error(output.getRows(), output.getCols());
switch (errorMeasure) {
case L2ERROR:
// FIXME
/*error = output - target;
error.reserve(error.getRows() * (error.getCols() + 1));
error.resize(error.getRows(), error.getCols() + 1);*/
break;
case CROSS_ENTROPY:
dCrossEntropy(error, target, output);
break;
}
return error;
}
mat posteriorProb2Label(const mat& prob) {
assert(prob.getCols() > 1);
size_t rows = prob.getRows(),
cols = prob.getCols();
float* h_prob = new float[prob.size()];
float* h_labels = new float[rows];
CCE(hipMemcpy(h_prob, prob.getData(), sizeof(float) * prob.size(), hipMemcpyDeviceToHost));
CCE(hipDeviceSynchronize());
for (size_t i=0; i<rows; ++i) {
float max = -1e10;
size_t maxIdx = 0;
for (size_t j=0; j<cols; ++j) {
if (h_prob[j * rows + i] > max) {
max = h_prob[j * rows + i];
maxIdx = j;
}
}
h_labels[i] = maxIdx;
}
mat labels(h_labels, rows, 1);
delete [] h_prob;
delete [] h_labels;
return labels;
}
vector<float> copyToHost(const mat& m) {
vector<float> hm(m.size());
thrust::device_ptr<float> dPtr(m.getData());
thrust::copy(dPtr, dPtr + m.size(), hm.begin());
return hm;
}
size_t countDifference(const mat& m1, const mat& m2) {
assert(m1.size() == m2.size());
size_t L = m1.size();
thrust::device_ptr<float> ptr1(m1.getData());
thrust::device_ptr<float> ptr2(m2.getData());
size_t nDiff = thrust::inner_product(ptr1, ptr1 + L, ptr2, 0.0, thrust::plus<float>(), thrust::not_equal_to<float>());
return nDiff;
}
size_t zeroOneError(const mat& prob, const mat& label, ERROR_MEASURE errorMeasure) {
assert(prob.getRows() == label.getRows());
assert(label.getCols() == 1);
size_t nError = 0;
if (errorMeasure == L2ERROR) {
// nError = countDifference(label, prob);
}
else {
mat L = posteriorProb2Label(prob);
nError = countDifference(L, label);
}
return nError;
}
template <typename T>
device_matrix<T> MaxPerRow(device_matrix<T>& A) {
device_matrix<T> rmax(A.getRows(), 1);
device_matrix<T> At = ~A;
// allocate storage for per-row results and indices
thrust::device_vector<T> row_indices(A.getRows());
thrust::device_vector<T> row_results(A.getRows());
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(A.getCols())),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(A.getCols())) + A.size(),
thrust::device_ptr<T>(At.getData()),
row_indices.begin(),
thrust::device_ptr<T>(rmax.getData()),
thrust::equal_to<T>(),
thrust::maximum<T>());
return rmax;
}
template <typename T>
__global__ void substract_max_per_row_kernel(T* const A, T* const rmax, unsigned int rows, unsigned int cols) {
// Matrix index
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
A[x * rows + y] -= rmax[y];
}
template <typename T>
void SubstractMaxPerRow(device_matrix<T>& x) {
device_matrix<T> rmax = MaxPerRow(x);
ALLOCATE_GRIDS_AND_THREADS(x.getRows(), x.getCols());
hipLaunchKernelGGL(( substract_max_per_row_kernel<float>), dim3(grids), dim3(threads) , 0, 0,
x.getData(), rmax.getData(), x.getRows(), x.getCols());
CCE(hipDeviceSynchronize());
}
template <typename T>
void fillLastColumnWith(device_matrix<T>& A, const T value) {
thrust::device_ptr<T> ptr(A.getData());
thrust::fill(ptr + A.size() - A.getRows(), ptr + A.size(), value);
}
template <typename T>
device_matrix<T> operator & (const device_matrix<T>& A, const device_matrix<T>& B) {
assert(A.getRows() == B.getRows() && A.getCols() == B.getCols());
device_matrix<T> C(A.getRows(), A.getCols());
thrust::device_ptr<T> aPtr(A.getData());
thrust::device_ptr<T> bPtr(B.getData());
thrust::device_ptr<T> cPtr(C.getData());
thrust::transform(aPtr, aPtr + A.size(), bPtr, cPtr, thrust::multiplies<T>());
return C;
}
template <typename T>
device_matrix<T> log(const device_matrix<T>& x) {
return transform(x, func::log<T>());
}
template <typename T>
device_matrix<T> log1pexp(const device_matrix<T>& x) {
return transform(x, func::log_of_one_plus_exp<T>());
}
template <typename T>
device_matrix<T> sigmoid(const device_matrix<T>& x) {
return transform(x, func::sigmoid<T>());
}
template <typename T>
device_matrix<T> softmax(const device_matrix<T>& x) {
mat x2(x);
x2.resize(x2.getRows(), x2.getCols() - 1);
SubstractMaxPerRow(x2);
mat p(x2.getRows(), x2.getCols());
thrust::device_ptr<T> xPtr(x2.getData());
thrust::device_ptr<T> pPtr(p.getData());
thrust::transform(xPtr, xPtr + x2.size(), pPtr, func::exp<T>());
mat sumOfProb = p * mat(p.getCols(), p.getCols(), 1);
mat y(p.getRows(), p.getCols() + 1);
thrust::device_ptr<T> yPtr(y.getData());
thrust::device_ptr<T> sPtr(sumOfProb.getData());
thrust::transform(pPtr, pPtr + p.size(), sPtr, yPtr, thrust::divides<T>());
return y;
}
/* \brief Explicit instantiation definition of template functions
*/
#define register_device_matrix_utility(T) \
template device_matrix<T> operator &<T> (const device_matrix<T>& A, const device_matrix<T>& B); \
template void fillLastColumnWith<T>(device_matrix<T>& A, const T value); \
template device_matrix<T> log<T>(const device_matrix<T>& x); \
template device_matrix<T> log1pexp<T>(const device_matrix<T>& x); \
template device_matrix<T> sigmoid<T>(const device_matrix<T>& x); \
template device_matrix<T> softmax<T>(const device_matrix<T>& x); \
template device_matrix<T> MaxPerRow<T>(device_matrix<T>& A); \
template void SubstractMaxPerRow<T>(device_matrix<T>& x);
register_device_matrix_utility(float);
| a6896da63ccaece0d47f0b3638935de68a65d192.cu | // Copyright 2013-2014 [Author: Po-Wei Chou]
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <dnn-utility.h>
CURAND_STATE::CURAND_STATE(unsigned seed, int N): _states(NULL) {
cudaMalloc ( &_states, N * N * sizeof( curandState ) );
setupCuRandState <<< 1, N * N >>> ( _states, seed );
CCE(cudaDeviceSynchronize());
}
curandState* CURAND_STATE::get() const {
return _states;
}
CURAND_STATE::~CURAND_STATE() {
cudaFree(_states);
}
__global__ void setupCuRandState( curandState * state, unsigned long seed ) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
curand_init ( seed, x, 0, &state[x] );
}
inline __device__ void get_curand_normal(float& x, curandState* state) {
x = curand_normal(state);
}
inline __device__ void get_curand_uniform(float& x, curandState* state) {
x = curand_uniform(state);
}
inline __device__ void sample_gaussian(float& x, curandState* state) {
x += curand_normal(state);
}
inline __device__ void sample_bernoulli(float& x, curandState* state) {
x = (float) (x >= curand_uniform(state));
}
template <Operation op>
__global__ void element_wise_curand_kernel(float* const data, curandState* globalState, unsigned int rows, unsigned int cols) {
int tx = threadIdx.x;
int ty = threadIdx.y;
// Matrix index
int x = blockIdx.x*blockDim.x + tx;
int y = blockIdx.y*blockDim.y + ty;
if (x >= cols || y >= rows)
return;
int i = x * rows + y;
int j = tx * blockDim.y + ty;
op(data[i], globalState + j);
__syncthreads();
}
void sample(mat &prob, UNIT_TYPE type) {
static CURAND_STATE state;
ALLOCATE_GRIDS_AND_THREADS(prob.getRows(), prob.getCols());
switch (type) {
case GAUSSIAN:
element_wise_curand_kernel<sample_gaussian><<< grids, threads >>>(prob.getData(), state.get(), prob.getRows(), prob.getCols());
break;
case BERNOULLI:
element_wise_curand_kernel<sample_bernoulli><<< grids, threads >>>(prob.getData(), state.get(), prob.getRows(), prob.getCols());
break;
}
CCE(cudaDeviceSynchronize());
fill_bias(prob);
}
mat randn(int m, int n) {
#ifdef DEBUG
// Use ext::randn (which is set to seed 0) to debug.
mat x(m, n);
ext::randn(x);
return x;
#else
static CURAND_STATE state;
mat x(m, n);
ALLOCATE_GRIDS_AND_THREADS(m, n);
element_wise_curand_kernel<get_curand_normal><<<grids, threads>>>(x.getData(), state.get(), m, n);
CCE(cudaDeviceSynchronize());
return x;
#endif
}
mat rand(int m, int n) {
#ifdef DEBUG
// Use ext::rand (which is set to seed 0) to debug.
mat x(m, n);
ext::rand(x);
return x;
#else
static CURAND_STATE state;
mat x(m, n);
ALLOCATE_GRIDS_AND_THREADS(m, n);
element_wise_curand_kernel<get_curand_uniform><<<grids, threads>>>(x.getData(), state.get(), m, n);
CCE(cudaDeviceSynchronize());
return x;
#endif
}
map<int, int> getLabelMapping(const hmat& labels) {
map<int, int> classes;
for (size_t i=0; i<labels.size(); ++i)
classes[(int) labels[i]] = 1;
int counter = 0;
map<int, int>::iterator itr = classes.begin();
for (; itr != classes.end(); ++itr)
itr->second = ++counter;
return classes;
}
namespace ext {
void rescale(mat& data, float lower, float upper) {
float min = ext::min(data);
float max = ext::max(data);
float ratio = (upper - lower) / (max - min);
data = (data - min) * ratio + lower;
}
float max(const mat& v) {
thrust::device_ptr<float> vPtr(v.getData());
thrust::device_ptr<float> maxPtr = thrust::max_element(vPtr, vPtr + v.size());
thrust::host_vector<float> hMaxPtr(maxPtr, maxPtr + 1);
return hMaxPtr[0];
}
float min(const mat& v) {
thrust::device_ptr<float> vPtr(v.getData());
thrust::device_ptr<float> minPtr = thrust::min_element(vPtr, vPtr + v.size());
thrust::host_vector<float> hMaxPtr(minPtr, minPtr + 1);
return hMaxPtr[0];
}
float max(const hmat& v) {
float* m = thrust::max_element(v.getData(), v.getData() + v.size());
return *m;
}
float min(const hmat& v) {
float* m = thrust::min_element(v.getData(), v.getData() + v.size());
return *m;
}
};
__global__ void dcrossentropy_kernel(float* error, float* const target, float* const output, unsigned int rows, unsigned int cols) {
int tx = threadIdx.x;
int ty = threadIdx.y;
// Matrix index
int x = blockIdx.x*blockDim.x + tx;
int y = blockIdx.y*blockDim.y + ty;
if (x >= cols || y >= rows)
return;
int i = x * rows + y;
// target[y] need to be 0-based
error[i] = output[i] - (float) (target[y] == x);
__syncthreads();
}
void dCrossEntropy(mat& error, const mat &target, const mat& output) {
assert(error.getRows() == output.getRows() && error.getCols() == output.getCols());
ALLOCATE_GRIDS_AND_THREADS(error.getRows(), error.getCols());
dcrossentropy_kernel<<< grids, threads >>>(
error.getData(), target.getData(), output.getData(),
error.getRows(), error.getCols());
CCE(cudaDeviceSynchronize());
}
mat getError(const mat& target, const mat& output, ERROR_MEASURE errorMeasure) {
mat error(output.getRows(), output.getCols());
switch (errorMeasure) {
case L2ERROR:
// FIXME
/*error = output - target;
error.reserve(error.getRows() * (error.getCols() + 1));
error.resize(error.getRows(), error.getCols() + 1);*/
break;
case CROSS_ENTROPY:
dCrossEntropy(error, target, output);
break;
}
return error;
}
mat posteriorProb2Label(const mat& prob) {
assert(prob.getCols() > 1);
size_t rows = prob.getRows(),
cols = prob.getCols();
float* h_prob = new float[prob.size()];
float* h_labels = new float[rows];
CCE(cudaMemcpy(h_prob, prob.getData(), sizeof(float) * prob.size(), cudaMemcpyDeviceToHost));
CCE(cudaDeviceSynchronize());
for (size_t i=0; i<rows; ++i) {
float max = -1e10;
size_t maxIdx = 0;
for (size_t j=0; j<cols; ++j) {
if (h_prob[j * rows + i] > max) {
max = h_prob[j * rows + i];
maxIdx = j;
}
}
h_labels[i] = maxIdx;
}
mat labels(h_labels, rows, 1);
delete [] h_prob;
delete [] h_labels;
return labels;
}
vector<float> copyToHost(const mat& m) {
vector<float> hm(m.size());
thrust::device_ptr<float> dPtr(m.getData());
thrust::copy(dPtr, dPtr + m.size(), hm.begin());
return hm;
}
size_t countDifference(const mat& m1, const mat& m2) {
assert(m1.size() == m2.size());
size_t L = m1.size();
thrust::device_ptr<float> ptr1(m1.getData());
thrust::device_ptr<float> ptr2(m2.getData());
size_t nDiff = thrust::inner_product(ptr1, ptr1 + L, ptr2, 0.0, thrust::plus<float>(), thrust::not_equal_to<float>());
return nDiff;
}
size_t zeroOneError(const mat& prob, const mat& label, ERROR_MEASURE errorMeasure) {
assert(prob.getRows() == label.getRows());
assert(label.getCols() == 1);
size_t nError = 0;
if (errorMeasure == L2ERROR) {
// nError = countDifference(label, prob);
}
else {
mat L = posteriorProb2Label(prob);
nError = countDifference(L, label);
}
return nError;
}
template <typename T>
device_matrix<T> MaxPerRow(device_matrix<T>& A) {
device_matrix<T> rmax(A.getRows(), 1);
device_matrix<T> At = ~A;
// allocate storage for per-row results and indices
thrust::device_vector<T> row_indices(A.getRows());
thrust::device_vector<T> row_results(A.getRows());
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(A.getCols())),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(A.getCols())) + A.size(),
thrust::device_ptr<T>(At.getData()),
row_indices.begin(),
thrust::device_ptr<T>(rmax.getData()),
thrust::equal_to<T>(),
thrust::maximum<T>());
return rmax;
}
template <typename T>
__global__ void substract_max_per_row_kernel(T* const A, T* const rmax, unsigned int rows, unsigned int cols) {
// Matrix index
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
A[x * rows + y] -= rmax[y];
}
template <typename T>
void SubstractMaxPerRow(device_matrix<T>& x) {
device_matrix<T> rmax = MaxPerRow(x);
ALLOCATE_GRIDS_AND_THREADS(x.getRows(), x.getCols());
substract_max_per_row_kernel<float><<< grids, threads >>>
(x.getData(), rmax.getData(), x.getRows(), x.getCols());
CCE(cudaDeviceSynchronize());
}
template <typename T>
void fillLastColumnWith(device_matrix<T>& A, const T value) {
thrust::device_ptr<T> ptr(A.getData());
thrust::fill(ptr + A.size() - A.getRows(), ptr + A.size(), value);
}
template <typename T>
device_matrix<T> operator & (const device_matrix<T>& A, const device_matrix<T>& B) {
assert(A.getRows() == B.getRows() && A.getCols() == B.getCols());
device_matrix<T> C(A.getRows(), A.getCols());
thrust::device_ptr<T> aPtr(A.getData());
thrust::device_ptr<T> bPtr(B.getData());
thrust::device_ptr<T> cPtr(C.getData());
thrust::transform(aPtr, aPtr + A.size(), bPtr, cPtr, thrust::multiplies<T>());
return C;
}
template <typename T>
device_matrix<T> log(const device_matrix<T>& x) {
return transform(x, func::log<T>());
}
template <typename T>
device_matrix<T> log1pexp(const device_matrix<T>& x) {
return transform(x, func::log_of_one_plus_exp<T>());
}
template <typename T>
device_matrix<T> sigmoid(const device_matrix<T>& x) {
return transform(x, func::sigmoid<T>());
}
template <typename T>
device_matrix<T> softmax(const device_matrix<T>& x) {
mat x2(x);
x2.resize(x2.getRows(), x2.getCols() - 1);
SubstractMaxPerRow(x2);
mat p(x2.getRows(), x2.getCols());
thrust::device_ptr<T> xPtr(x2.getData());
thrust::device_ptr<T> pPtr(p.getData());
thrust::transform(xPtr, xPtr + x2.size(), pPtr, func::exp<T>());
mat sumOfProb = p * mat(p.getCols(), p.getCols(), 1);
mat y(p.getRows(), p.getCols() + 1);
thrust::device_ptr<T> yPtr(y.getData());
thrust::device_ptr<T> sPtr(sumOfProb.getData());
thrust::transform(pPtr, pPtr + p.size(), sPtr, yPtr, thrust::divides<T>());
return y;
}
/* \brief Explicit instantiation definition of template functions
*/
#define register_device_matrix_utility(T) \
template device_matrix<T> operator &<T> (const device_matrix<T>& A, const device_matrix<T>& B); \
template void fillLastColumnWith<T>(device_matrix<T>& A, const T value); \
template device_matrix<T> log<T>(const device_matrix<T>& x); \
template device_matrix<T> log1pexp<T>(const device_matrix<T>& x); \
template device_matrix<T> sigmoid<T>(const device_matrix<T>& x); \
template device_matrix<T> softmax<T>(const device_matrix<T>& x); \
template device_matrix<T> MaxPerRow<T>(device_matrix<T>& A); \
template void SubstractMaxPerRow<T>(device_matrix<T>& x);
register_device_matrix_utility(float);
|
0dd27574b7aac6e5c44bb8771de23e91727d5e96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <common/fast_int_div.cuh>
#include "test_utils.h"
namespace MLCommon {
TEST(FastIntDiv, CpuTest) {
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
for (int i = 0; i < 10000; ++i) {
auto num = rand();
auto correct = num / divisor;
auto computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = -num;
correct = num / divisor;
computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
}
}
}
__global__ void fastIntDivTestKernel(int* computed, int* correct, const int* in,
FastIntDiv fid, int divisor, int len) {
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < len) {
computed[tid] = in[tid] % fid;
correct[tid] = in[tid] % divisor;
computed[len + tid] = -in[tid] % fid;
correct[len + tid] = -in[tid] % divisor;
}
}
TEST(FastIntDiv, GpuTest) {
static const int len = 100000;
static const int TPB = 128;
int *computed, *correct, *in;
allocate(computed, len * 2);
allocate(correct, len * 2);
allocate(in, len);
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
int* h_in = new int[len];
for (int i = 0; i < len; ++i) {
h_in[i] = rand();
}
updateDevice(in, h_in, len, 0);
int nblks = ceildiv(len, TPB);
hipLaunchKernelGGL(( fastIntDivTestKernel), dim3(nblks), dim3(TPB), 0, 0, computed, correct, in, fid,
divisor, len);
CUDA_CHECK(hipStreamSynchronize(0));
ASSERT_TRUE(devArrMatch(correct, computed, len * 2, Compare<int>()))
<< " divisor=" << divisor;
}
}
FastIntDiv dummyFunc(int num) {
FastIntDiv fd(num);
return fd;
}
TEST(FastIntDiv, IncorrectUsage) {
ASSERT_THROW(dummyFunc(-1), MLCommon::Exception);
ASSERT_THROW(dummyFunc(0), MLCommon::Exception);
}
} // namespace MLCommon
| 0dd27574b7aac6e5c44bb8771de23e91727d5e96.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <common/fast_int_div.cuh>
#include "test_utils.h"
namespace MLCommon {
TEST(FastIntDiv, CpuTest) {
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
for (int i = 0; i < 10000; ++i) {
auto num = rand();
auto correct = num / divisor;
auto computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = -num;
correct = num / divisor;
computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
}
}
}
__global__ void fastIntDivTestKernel(int* computed, int* correct, const int* in,
FastIntDiv fid, int divisor, int len) {
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < len) {
computed[tid] = in[tid] % fid;
correct[tid] = in[tid] % divisor;
computed[len + tid] = -in[tid] % fid;
correct[len + tid] = -in[tid] % divisor;
}
}
TEST(FastIntDiv, GpuTest) {
static const int len = 100000;
static const int TPB = 128;
int *computed, *correct, *in;
allocate(computed, len * 2);
allocate(correct, len * 2);
allocate(in, len);
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
int* h_in = new int[len];
for (int i = 0; i < len; ++i) {
h_in[i] = rand();
}
updateDevice(in, h_in, len, 0);
int nblks = ceildiv(len, TPB);
fastIntDivTestKernel<<<nblks, TPB, 0, 0>>>(computed, correct, in, fid,
divisor, len);
CUDA_CHECK(cudaStreamSynchronize(0));
ASSERT_TRUE(devArrMatch(correct, computed, len * 2, Compare<int>()))
<< " divisor=" << divisor;
}
}
FastIntDiv dummyFunc(int num) {
FastIntDiv fd(num);
return fd;
}
TEST(FastIntDiv, IncorrectUsage) {
ASSERT_THROW(dummyFunc(-1), MLCommon::Exception);
ASSERT_THROW(dummyFunc(0), MLCommon::Exception);
}
} // namespace MLCommon
|
c6854f47e5637d12bd8a5296bc1866c6e7f7d6c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "optixParams.h" // our launch params
extern "C" {
__constant__ LaunchParams optixLaunchParams;
}
// ray types
enum { RAIDANCE=0, SHADOW, RAY_TYPE_COUNT };
struct RadiancePRD {
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
bool done;
uint32_t seed;
int32_t countEmitted;
} ;
struct shadowPRD {
float shadowAtt;
uint32_t seed;
} ;
extern "C" __global__ void __closesthit__radiance() {
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
RadiancePRD &prd = *(RadiancePRD *)getPRD<RadiancePRD>();
// retrieve primitive id and indexes
const int primID = optixGetPrimitiveIndex();
const uint3 index = sbtData.index[primID];
// get barycentric coordinates
const float u = optixGetTriangleBarycentrics().x;
const float v = optixGetTriangleBarycentrics().y;
// compute normal
const float4 n
= (1.f-u-v) * sbtData.vertexD.normal[index.x]
+ u * sbtData.vertexD.normal[index.y]
+ v * sbtData.vertexD.normal[index.z];
const float3 nn = normalize(make_float3(n));
// intersection position
const float3 &rayDir = optixGetWorldRayDirection();
const float3 pos = optixGetWorldRayOrigin() + optixGetRayTmax() * rayDir ;
if (prd.countEmitted && length(sbtData.emission) != 0) {
prd.emitted = sbtData.emission ;
return;
}
else
prd.emitted = make_float3(0.0f);
uint32_t seed = prd.seed;
float3 color;
if (sbtData.hasTexture && sbtData.vertexD.texCoord0) {
const float4 tc
= (1.f-u-v) * sbtData.vertexD.texCoord0[index.x]
+ u * sbtData.vertexD.texCoord0[index.y]
+ v * sbtData.vertexD.texCoord0[index.z];
float4 fromTexture = tex2D<float4>(sbtData.texture,tc.x,tc.y);
color = make_float3(fromTexture);
} else {
color = sbtData.diffuse;
}
{
const float z1 = rnd(seed);
const float z2 = rnd(seed);
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
Onb onb( nn );
onb.inverse_transform( w_in );
prd.direction = w_in;
prd.origin = pos;
prd.attenuation *= color;
prd.countEmitted = false;
}
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd.seed = seed;
const float3 lightV1 = make_float3(0.47f, 0.0, 0.0f);
const float3 lightV2 = make_float3(0.0f, 0.0, 0.38f);
const float3 light_pos = make_float3(optixLaunchParams.global->lightPos) + lightV1 * z1 + lightV2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length(light_pos - pos );
const float3 L = normalize(light_pos - pos );
const float nDl = dot( nn, L );
const float3 Ln = normalize(cross(lightV1, lightV2));
const float LnDl = -dot( Ln, L );
float weight = 0.0f;
if (nDl > 0.0f && LnDl > 0.0f) {
uint32_t occluded = 0u;
optixTrace(optixLaunchParams.traversable,
pos,
L,
0.001f, // tmin
Ldist - 0.01f, // tmax
0.0f, // rayTime
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
SHADOW, // SBT offset
RAY_TYPE_COUNT, // SBT stride
SHADOW, // missSBTIndex
occluded);
if (!occluded) {
const float att = Ldist * Ldist;
const float A = length(cross(lightV1, lightV2));
weight = nDl * LnDl * A / att;
}
}
float probability = (sbtData.diffuse.x + sbtData.diffuse.y + sbtData.diffuse.z) / 3;
float random = rnd(seed);
if (random < probability) {
prd.done = false;
} else {
prd.done = true;
}
prd.radiance += (make_float3(5.0f, 5.0f, 5.0f) * weight * optixLaunchParams.global->lightScale) / probability;
}
extern "C" __global__ void __anyhit__radiance() {
}
// miss sets the background color
extern "C" __global__ void __miss__radiance() {
RadiancePRD &prd = *(RadiancePRD*)getPRD<RadiancePRD>();
// set black as background color
prd.radiance = make_float3(0.0f, 0.0f, 0.0f);
prd.done = true;
}
// -----------------------------------------------
// Shadow rays
extern "C" __global__ void __closesthit__shadow() {
optixSetPayload_0( static_cast<uint32_t>(true));
}
// any hit for shadows
extern "C" __global__ void __anyhit__shadow() {
}
// miss for shadows
extern "C" __global__ void __miss__shadow() {
optixSetPayload_0( static_cast<uint32_t>(false));
}
// -----------------------------------------------
// Primary Rays
extern "C" __global__ void __raygen__renderFrame() {
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const auto &camera = optixLaunchParams.camera;
const int &maxDepth = optixLaunchParams.frame.maxDepth;
float squaredRaysPerPixel = float(optixLaunchParams.frame.raysPerPixel);
float2 delta = make_float2(1.0f/squaredRaysPerPixel, 1.0f/squaredRaysPerPixel);
float3 result = make_float3(0.0f);
uint32_t seed = tea<4>( ix * optixGetLaunchDimensions().x + iy, optixLaunchParams.frame.frame );
for (int i = 0; i < squaredRaysPerPixel; ++i) {
for (int j = 0; j < squaredRaysPerPixel; ++j) {
const float2 subpixel_jitter = make_float2( delta.x * (i + rnd(seed)), delta.y * (j + rnd( seed )));
const float2 screen(make_float2(ix + subpixel_jitter.x, iy + subpixel_jitter.y)
/ make_float2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y) * 2.0 - 1.0);
// note: nau already takes into account the field of view and ratio when computing
// camera horizontal and vertical
float3 origin = camera.position;
float3 rayDir = normalize(camera.direction
+ (screen.x ) * camera.horizontal
+ (screen.y ) * camera.vertical);
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
uint32_t u0, u1;
packPointer( &prd, u0, u1 );
for (int k = 0; k < maxDepth && !prd.done; ++k) {
optixTrace(optixLaunchParams.traversable,
origin,
rayDir,
0.001f, // tmin
1e20f, // tmax
0.0f, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, RAIDANCE, RAY_TYPE_COUNT, RAIDANCE, u0, u1 );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
origin = prd.origin;
rayDir = prd.direction;
}
}
}
result = result / (squaredRaysPerPixel*squaredRaysPerPixel);
float gamma = optixLaunchParams.global->gamma;
// compute index
const uint32_t fbIndex = ix + iy*optixGetLaunchDimensions().x;
optixLaunchParams.global->accumBuffer[fbIndex] =
(optixLaunchParams.global->accumBuffer[fbIndex] * optixLaunchParams.frame.subFrame +
make_float4(result.x, result.y, result.z, 1)) /(optixLaunchParams.frame.subFrame+1);
float4 rgbaf = optixLaunchParams.global->accumBuffer[fbIndex];
//convert float (0-1) to int (0-255)
const int r = int(255.0f*min(1.0f, pow(rgbaf.x, 1/gamma)));
const int g = int(255.0f*min(1.0f, pow(rgbaf.y, 1/gamma)));
const int b = int(255.0f*min(1.0f, pow(rgbaf.z, 1/gamma))) ;
// convert to 32-bit rgba value
const uint32_t rgba = 0xff000000 | (r<<0) | (g<<8) | (b<<16);
// write to output buffer
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
extern "C" __global__ void __closesthit__phong_metal() {
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
// retrieve primitive id and indexes
const int primID = optixGetPrimitiveIndex();
const uint3 index = sbtData.index[primID];
// get barycentric coordinates
const float u = optixGetTriangleBarycentrics().x;
const float v = optixGetTriangleBarycentrics().y;
// compute normal
const float4 n
= (1.f-u-v) * sbtData.vertexD.normal[index.x]
+ u * sbtData.vertexD.normal[index.y]
+ v * sbtData.vertexD.normal[index.z];
// ray payload
float3 normal = normalize(make_float3(n));
// entering glass
//if (dot(optixGetWorldRayDirection(), normal) < 0)
RadiancePRD &prd = *(RadiancePRD*)getPRD<RadiancePRD>();
const float3 pos = optixGetWorldRayOrigin() + optixGetRayTmax()*optixGetWorldRayDirection();
//(1.f-u-v) * A + u * B + v * C;
const float glossiness = 20000.0f;
float3 rayDir;
float3 reflectDir = reflect(optixGetWorldRayDirection(), normal);
unsigned int seed = prd.seed;
const float z1 = rnd(seed);
const float z2 = rnd(seed);
cosine_power_sample_hemisphere( z1, z2, rayDir, glossiness );
Onb onb( reflectDir );
onb.inverse_transform( rayDir );
prd.origin = pos;
prd.direction = rayDir;
prd.seed = seed;
}
extern "C" __global__ void __closesthit__phong_glass() {
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
// retrieve primitive id and indexes
const int primID = optixGetPrimitiveIndex();
const uint3 index = sbtData.index[primID];
// get barycentric coordinates
const float u = optixGetTriangleBarycentrics().x;
const float v = optixGetTriangleBarycentrics().y;
// compute normal
const float4 n
= (1.f-u-v) * sbtData.vertexD.normal[index.x]
+ u * sbtData.vertexD.normal[index.y]
+ v * sbtData.vertexD.normal[index.z];
float3 normal = normalize(make_float3(n));
const float3 normRayDir = optixGetWorldRayDirection();
RadiancePRD &prd = *(RadiancePRD*)getPRD<RadiancePRD>();
// new ray direction
float3 refractDir;
float3 reflectDir;
// entering glass
float cosTeta1;
float teta1;
float cosTeta2;
float teta2;
float n1;
float n2;
if (dot(normRayDir, normal) < 0) {
n1 = 1.0;
n2 = 1.5;
cosTeta1 = dot(normRayDir, -normal);
teta1 = acosf(cosTeta1);
teta2 = asinf(((sin(teta1) * n1) / n2));
cosTeta2 = cos(teta2);
refractDir = refract(normRayDir, normal, 0.66);
reflectDir = reflect(normRayDir, normal);
}
// exiting glass
else {
n1 = 1.5;
n2 = 1.0;
cosTeta1 = dot(normRayDir, normal);
teta1 = acosf(cosTeta1);
teta2 = asinf(((sin(teta1) * n1) / n2));
cosTeta2 = cos(teta2);
refractDir = refract(normRayDir, -normal, 1.5);
reflectDir = reflect(normRayDir, normal);
}
const float3 pos = optixGetWorldRayOrigin() + optixGetRayTmax() * optixGetWorldRayDirection();
uint32_t seed = prd.seed;
const float rand = rnd(seed);
prd.seed = seed;
float fr1 = pow((((n2*cosTeta1) - (n1*cosTeta2)) / ((n2*cosTeta1) + (n1*cosTeta2))), 2);
float fr2 = pow((((n1*cosTeta2) - (n2*cosTeta1)) / ((n1*cosTeta2) + (n2*cosTeta1))), 2);
float fr = ((fr1 + fr2) / 2.0);
float3 outDir;
if (fr > rand) {
outDir = reflectDir;
} else {
outDir = refractDir;
}
prd.origin = pos;
prd.direction = outDir;
}
| c6854f47e5637d12bd8a5296bc1866c6e7f7d6c1.cu | #include "optixParams.h" // our launch params
extern "C" {
__constant__ LaunchParams optixLaunchParams;
}
// ray types
enum { RAIDANCE=0, SHADOW, RAY_TYPE_COUNT };
struct RadiancePRD {
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
bool done;
uint32_t seed;
int32_t countEmitted;
} ;
struct shadowPRD {
float shadowAtt;
uint32_t seed;
} ;
extern "C" __global__ void __closesthit__radiance() {
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
RadiancePRD &prd = *(RadiancePRD *)getPRD<RadiancePRD>();
// retrieve primitive id and indexes
const int primID = optixGetPrimitiveIndex();
const uint3 index = sbtData.index[primID];
// get barycentric coordinates
const float u = optixGetTriangleBarycentrics().x;
const float v = optixGetTriangleBarycentrics().y;
// compute normal
const float4 n
= (1.f-u-v) * sbtData.vertexD.normal[index.x]
+ u * sbtData.vertexD.normal[index.y]
+ v * sbtData.vertexD.normal[index.z];
const float3 nn = normalize(make_float3(n));
// intersection position
const float3 &rayDir = optixGetWorldRayDirection();
const float3 pos = optixGetWorldRayOrigin() + optixGetRayTmax() * rayDir ;
if (prd.countEmitted && length(sbtData.emission) != 0) {
prd.emitted = sbtData.emission ;
return;
}
else
prd.emitted = make_float3(0.0f);
uint32_t seed = prd.seed;
float3 color;
if (sbtData.hasTexture && sbtData.vertexD.texCoord0) {
const float4 tc
= (1.f-u-v) * sbtData.vertexD.texCoord0[index.x]
+ u * sbtData.vertexD.texCoord0[index.y]
+ v * sbtData.vertexD.texCoord0[index.z];
float4 fromTexture = tex2D<float4>(sbtData.texture,tc.x,tc.y);
color = make_float3(fromTexture);
} else {
color = sbtData.diffuse;
}
{
const float z1 = rnd(seed);
const float z2 = rnd(seed);
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
Onb onb( nn );
onb.inverse_transform( w_in );
prd.direction = w_in;
prd.origin = pos;
prd.attenuation *= color;
prd.countEmitted = false;
}
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd.seed = seed;
const float3 lightV1 = make_float3(0.47f, 0.0, 0.0f);
const float3 lightV2 = make_float3(0.0f, 0.0, 0.38f);
const float3 light_pos = make_float3(optixLaunchParams.global->lightPos) + lightV1 * z1 + lightV2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length(light_pos - pos );
const float3 L = normalize(light_pos - pos );
const float nDl = dot( nn, L );
const float3 Ln = normalize(cross(lightV1, lightV2));
const float LnDl = -dot( Ln, L );
float weight = 0.0f;
if (nDl > 0.0f && LnDl > 0.0f) {
uint32_t occluded = 0u;
optixTrace(optixLaunchParams.traversable,
pos,
L,
0.001f, // tmin
Ldist - 0.01f, // tmax
0.0f, // rayTime
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
SHADOW, // SBT offset
RAY_TYPE_COUNT, // SBT stride
SHADOW, // missSBTIndex
occluded);
if (!occluded) {
const float att = Ldist * Ldist;
const float A = length(cross(lightV1, lightV2));
weight = nDl * LnDl * A / att;
}
}
float probability = (sbtData.diffuse.x + sbtData.diffuse.y + sbtData.diffuse.z) / 3;
float random = rnd(seed);
if (random < probability) {
prd.done = false;
} else {
prd.done = true;
}
prd.radiance += (make_float3(5.0f, 5.0f, 5.0f) * weight * optixLaunchParams.global->lightScale) / probability;
}
extern "C" __global__ void __anyhit__radiance() {
}
// miss sets the background color
extern "C" __global__ void __miss__radiance() {
RadiancePRD &prd = *(RadiancePRD*)getPRD<RadiancePRD>();
// set black as background color
prd.radiance = make_float3(0.0f, 0.0f, 0.0f);
prd.done = true;
}
// -----------------------------------------------
// Shadow rays
extern "C" __global__ void __closesthit__shadow() {
optixSetPayload_0( static_cast<uint32_t>(true));
}
// any hit for shadows
extern "C" __global__ void __anyhit__shadow() {
}
// miss for shadows
extern "C" __global__ void __miss__shadow() {
optixSetPayload_0( static_cast<uint32_t>(false));
}
// -----------------------------------------------
// Primary Rays
extern "C" __global__ void __raygen__renderFrame() {
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const auto &camera = optixLaunchParams.camera;
const int &maxDepth = optixLaunchParams.frame.maxDepth;
float squaredRaysPerPixel = float(optixLaunchParams.frame.raysPerPixel);
float2 delta = make_float2(1.0f/squaredRaysPerPixel, 1.0f/squaredRaysPerPixel);
float3 result = make_float3(0.0f);
uint32_t seed = tea<4>( ix * optixGetLaunchDimensions().x + iy, optixLaunchParams.frame.frame );
for (int i = 0; i < squaredRaysPerPixel; ++i) {
for (int j = 0; j < squaredRaysPerPixel; ++j) {
const float2 subpixel_jitter = make_float2( delta.x * (i + rnd(seed)), delta.y * (j + rnd( seed )));
const float2 screen(make_float2(ix + subpixel_jitter.x, iy + subpixel_jitter.y)
/ make_float2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y) * 2.0 - 1.0);
// note: nau already takes into account the field of view and ratio when computing
// camera horizontal and vertical
float3 origin = camera.position;
float3 rayDir = normalize(camera.direction
+ (screen.x ) * camera.horizontal
+ (screen.y ) * camera.vertical);
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
uint32_t u0, u1;
packPointer( &prd, u0, u1 );
for (int k = 0; k < maxDepth && !prd.done; ++k) {
optixTrace(optixLaunchParams.traversable,
origin,
rayDir,
0.001f, // tmin
1e20f, // tmax
0.0f, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, RAIDANCE, RAY_TYPE_COUNT, RAIDANCE, u0, u1 );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
origin = prd.origin;
rayDir = prd.direction;
}
}
}
result = result / (squaredRaysPerPixel*squaredRaysPerPixel);
float gamma = optixLaunchParams.global->gamma;
// compute index
const uint32_t fbIndex = ix + iy*optixGetLaunchDimensions().x;
optixLaunchParams.global->accumBuffer[fbIndex] =
(optixLaunchParams.global->accumBuffer[fbIndex] * optixLaunchParams.frame.subFrame +
make_float4(result.x, result.y, result.z, 1)) /(optixLaunchParams.frame.subFrame+1);
float4 rgbaf = optixLaunchParams.global->accumBuffer[fbIndex];
//convert float (0-1) to int (0-255)
const int r = int(255.0f*min(1.0f, pow(rgbaf.x, 1/gamma)));
const int g = int(255.0f*min(1.0f, pow(rgbaf.y, 1/gamma)));
const int b = int(255.0f*min(1.0f, pow(rgbaf.z, 1/gamma))) ;
// convert to 32-bit rgba value
const uint32_t rgba = 0xff000000 | (r<<0) | (g<<8) | (b<<16);
// write to output buffer
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
extern "C" __global__ void __closesthit__phong_metal() {
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
// retrieve primitive id and indexes
const int primID = optixGetPrimitiveIndex();
const uint3 index = sbtData.index[primID];
// get barycentric coordinates
const float u = optixGetTriangleBarycentrics().x;
const float v = optixGetTriangleBarycentrics().y;
// compute normal
const float4 n
= (1.f-u-v) * sbtData.vertexD.normal[index.x]
+ u * sbtData.vertexD.normal[index.y]
+ v * sbtData.vertexD.normal[index.z];
// ray payload
float3 normal = normalize(make_float3(n));
// entering glass
//if (dot(optixGetWorldRayDirection(), normal) < 0)
RadiancePRD &prd = *(RadiancePRD*)getPRD<RadiancePRD>();
const float3 pos = optixGetWorldRayOrigin() + optixGetRayTmax()*optixGetWorldRayDirection();
//(1.f-u-v) * A + u * B + v * C;
const float glossiness = 20000.0f;
float3 rayDir;
float3 reflectDir = reflect(optixGetWorldRayDirection(), normal);
unsigned int seed = prd.seed;
const float z1 = rnd(seed);
const float z2 = rnd(seed);
cosine_power_sample_hemisphere( z1, z2, rayDir, glossiness );
Onb onb( reflectDir );
onb.inverse_transform( rayDir );
prd.origin = pos;
prd.direction = rayDir;
prd.seed = seed;
}
extern "C" __global__ void __closesthit__phong_glass() {
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
// retrieve primitive id and indexes
const int primID = optixGetPrimitiveIndex();
const uint3 index = sbtData.index[primID];
// get barycentric coordinates
const float u = optixGetTriangleBarycentrics().x;
const float v = optixGetTriangleBarycentrics().y;
// compute normal
const float4 n
= (1.f-u-v) * sbtData.vertexD.normal[index.x]
+ u * sbtData.vertexD.normal[index.y]
+ v * sbtData.vertexD.normal[index.z];
float3 normal = normalize(make_float3(n));
const float3 normRayDir = optixGetWorldRayDirection();
RadiancePRD &prd = *(RadiancePRD*)getPRD<RadiancePRD>();
// new ray direction
float3 refractDir;
float3 reflectDir;
// entering glass
float cosTeta1;
float teta1;
float cosTeta2;
float teta2;
float n1;
float n2;
if (dot(normRayDir, normal) < 0) {
n1 = 1.0;
n2 = 1.5;
cosTeta1 = dot(normRayDir, -normal);
teta1 = acosf(cosTeta1);
teta2 = asinf(((sin(teta1) * n1) / n2));
cosTeta2 = cos(teta2);
refractDir = refract(normRayDir, normal, 0.66);
reflectDir = reflect(normRayDir, normal);
}
// exiting glass
else {
n1 = 1.5;
n2 = 1.0;
cosTeta1 = dot(normRayDir, normal);
teta1 = acosf(cosTeta1);
teta2 = asinf(((sin(teta1) * n1) / n2));
cosTeta2 = cos(teta2);
refractDir = refract(normRayDir, -normal, 1.5);
reflectDir = reflect(normRayDir, normal);
}
const float3 pos = optixGetWorldRayOrigin() + optixGetRayTmax() * optixGetWorldRayDirection();
uint32_t seed = prd.seed;
const float rand = rnd(seed);
prd.seed = seed;
float fr1 = pow((((n2*cosTeta1) - (n1*cosTeta2)) / ((n2*cosTeta1) + (n1*cosTeta2))), 2);
float fr2 = pow((((n1*cosTeta2) - (n2*cosTeta1)) / ((n1*cosTeta2) + (n2*cosTeta1))), 2);
float fr = ((fr1 + fr2) / 2.0);
float3 outDir;
if (fr > rand) {
outDir = reflectDir;
} else {
outDir = refractDir;
}
prd.origin = pos;
prd.direction = outDir;
}
|
be50409ef1cdbf024cd084476262eea02bdeb76c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "device_atomic_functions.h"
#include "Math_basics.cuh"
#include "AtomicAction.cuh"
#include "Polygon.h"
#include <iostream>
#include <fstream>
#include <iomanip>
using namespace std;
using namespace P_RVD;
//set global variable to store the seeds' position and weight
__device__ double* SeedsInformation;
__device__ int* SeedsPolygon_nb;
const int CUDA_Stack_size = 10;
texture<int2, 1> t_vertex;
texture<int2, 1> t_points;
texture<int, 1> t_points_nn;
struct Cuda_Vertex
{
double x;
double y;
double z;
double w;
int neigh_s = -1;
};
struct Cuda_Polygon
{
Cuda_Vertex vertex[10];
int vertex_nb;
};
/*
process the cuda error
*/
inline void checkCudaErrors(hipError_t err)
{
if (hipSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error : %s.\n", hipGetErrorString(err));
return;
}
}
void CheckCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__device__
double MyAtomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
static __inline__ __device__
double fetch_double(texture<int2, 1> t, int i){
int2 v = tex1Dfetch(t, i);
return __hiloint2double(v.y, v.x);
}
/*
device function
conmpute 3 vertex's centroid of a facet
*/
__device__ double3 computeCentriod(double3 a, double3 b, double3 c)
{
double3 ret = { (a.x + b.x + c.x) / 3,
(a.y + b.y + c.y) / 3,
(a.z + b.z + c.z) / 3, };
return ret;
}
/*
device function
compute the intersection(stored in a polygon) clipped by a bisector defined by seed i and j
input : polygon ping and its number
output: polygon pong and its number
*/
__device__
void clip_by_plane(Cuda_Polygon& ping, Cuda_Polygon& pong, double3 position_i,
double3 position_j, int j)
{
//reset the pong
pong.vertex_nb = 0;
if (ping.vertex_nb == 0)
return;
// Compute d = n . (2m), where n is the
// normal vector of the bisector [i, j]
// and m the middle point of the bisector.
double d = 0.0;
d = dot(add(position_i, position_j), sub(position_i, position_j));
//The predecessor of the first vertex is the last vertex
int prev_k = ping.vertex_nb - 1;
//get the position data
Cuda_Vertex* prev_vk = &ping.vertex[prev_k];
double3 prev_vertex_position = { prev_vk->x, prev_vk->y, prev_vk->z };
//then we compute prev_vertex_position "cross" n
//prev_l = prev_vertex_position . n
double prev_l = dot(prev_vertex_position, sub(position_i, position_j));
int prev_status = sgn(2.0 * prev_l - d);
//traverse the Vertex in this Polygon
for (int k = 0; k < ping.vertex_nb; ++k)
{
Cuda_Vertex* vk = &ping.vertex[k];
double3 vertex_position = { vk->x, vk->y, vk->z };
double l = dot(vertex_position, sub(position_i, position_j));
int status = sgn(2.0 * l - d);
//If status of edge extremities differ,
//then there is an intersection.
if (status != prev_status && (prev_status) != 0)
{
// create the intersection and update the Polyon
Cuda_Vertex I;
//compute the position and weight
double denom = 2.0 * (prev_l - l);
double lambda1, lambda2;
// Shit happens!
if (m_fabs(denom) < 1e-20)
{
lambda1 = 0.5;
lambda2 = 0.5;
}
else
{
lambda1 = (d - 2.0 * l) / denom;
// Note: lambda2 is also given
// by (2.0*l2-d)/denom
// (but 1.0 - lambda1 is a bit
// faster to compute...)
lambda2 = 1.0 - lambda1;
}
//Set the Position of Vertex
I.x = lambda1 * prev_vertex_position.x + lambda2 * vertex_position.x;
I.y = lambda1 * prev_vertex_position.y + lambda2 * vertex_position.y;
I.z = lambda1 * prev_vertex_position.z + lambda2 * vertex_position.z;
//Set the Weight of Vertex
I.w = (lambda1 * prev_vk->w + lambda2 * vk->w);
if (status > 0)
{
I.neigh_s = (j);
}
else {
I.neigh_s = (vk->neigh_s);
}
//add I to pong
pong.vertex[pong.vertex_nb] = I;
pong.vertex_nb++;
}
if (status > 0)
{
//add vertex to pong
pong.vertex[pong.vertex_nb] = *vk;
pong.vertex_nb++;
}
prev_vk = vk;
prev_vertex_position = vertex_position;
prev_status = status;
prev_l = l;
prev_k = k;
}
return;
}
/*
device function
swap the ping and pong to make sure
ping store the result;
*/
__device__
void swap_polygons(Cuda_Polygon& ping, Cuda_Polygon& pong)
{
//!!! can be accerate
//memset memcpy
Cuda_Polygon t = ping;
ping = pong;
pong = t;
}
/*
*/
//__device__
//void intersection_clip_facet_with_knn(Cuda_Polygon& current_polygon , int current_seed, double* seeds_pointer, int seeds_nb, int *seeds_neighbor_index, int k)
//{
//
// //set a buffer pointer to store the polygon
// Cuda_Polygon polygon_buffer;
//
// for (int i = 0; i < k; ++i)
// {
// int j = seeds_neighbor_index[current_seed * k + i];
// if (current_seed != j)
// {
// clip_by_plane(current_polygon, polygon_buffer, current_seed, j, seeds_pointer, seeds_nb);
// swap_polygons(current_polygon, polygon_buffer);
// }
// }
// return;
//}
/*
*/
__device__
void intersection_clip_facet_SR(Cuda_Polygon& current_polygon, int i, double* seeds_pointer, int seeds_nb,
int *seeds_neighbor_index, int k)
{
Cuda_Polygon polygon_buffer;
double3 pi = {
fetch_double(t_points, i * 3 + 0),
fetch_double(t_points, i * 3 + 1),
fetch_double(t_points, i * 3 + 2)
};
for (int t = 0; t < k; ++t)
{
int j = tex1Dfetch(t_points_nn, i * k + t);
if (i != j)
{
double3 pj = {
fetch_double(t_points, j * 3 + 0),
fetch_double(t_points, j * 3 + 1),
fetch_double(t_points, j * 3 + 2)
};
double dij = distance2(pi, pj);
double R2 = 0.0;
for (int ii = 0; ii < current_polygon.vertex_nb; ++ii)
{
double3 pk = { current_polygon.vertex[ii].x, current_polygon.vertex[ii].y, current_polygon.vertex[ii].z };
double dik = distance2(pi, pk);
R2 = max(R2, dik);
}
if (dij > 4.1 * R2)
{
return;
}
clip_by_plane(current_polygon, polygon_buffer, pi, pj, j);
swap_polygons(current_polygon, polygon_buffer);
}
}
return;
}
__device__
void action(const Cuda_Polygon polygon, int current_seed)
{
double weight;
double3 position;
int _v1 = 0;
int _v2, _v3;
double3 pos1, pos2, pos3;
double d1, d2, d3;
int triangle_nb = polygon.vertex_nb - 2;
double total_weight = 0.0;
double3 centriodTimesWeight = { 0.0, 0.0, 0.0 };
double current_weight = 0.0;
double3 current_posTimesWeight = { 0.0, 0.0, 0.0 };
for (int i = 1; i < polygon.vertex_nb - 1; ++i)
{
_v2 = i; _v3 = i + 1;
pos1 = { polygon.vertex[_v1].x, polygon.vertex[_v1].y, polygon.vertex[_v1].z };
d1 = polygon.vertex[_v1].w;
pos2 = { polygon.vertex[_v2].x, polygon.vertex[_v2].y, polygon.vertex[_v2].z };
d2 = polygon.vertex[_v2].w;
pos3 = { polygon.vertex[_v3].x, polygon.vertex[_v3].y, polygon.vertex[_v3].z };
d3 = polygon.vertex[_v3].w;
computeTriangleCentriod(pos1, pos2, pos3, d1, d2, d3, centriodTimesWeight, total_weight);
current_weight += total_weight;
current_posTimesWeight.x += centriodTimesWeight.x;
current_posTimesWeight.y += centriodTimesWeight.y;
current_posTimesWeight.z += centriodTimesWeight.z;
total_weight = 0.0;
centriodTimesWeight = { 0.0, 0.0, 0.0 };
}
atomicAdd(&SeedsPolygon_nb[current_seed], 1);
//atomicAdd(&SeedsPolygon_nb[current_seed], 1);
if (triangle_nb > 0){
//atomicAdd(&SeedsPolygon_nb[current_seed], 1);
current_weight /= triangle_nb;
double3 temp_pos;
temp_pos.x = current_posTimesWeight.x / triangle_nb;
temp_pos.y = current_posTimesWeight.y / triangle_nb;
temp_pos.z = current_posTimesWeight.z / triangle_nb;
//try not to use the MyAtomicAdd
//SeedsInformation[current_seed * 4 + 0] += temp_pos.x;
//SeedsInformation[current_seed * 4 + 1] += temp_pos.y;
//SeedsInformation[current_seed * 4 + 2] += temp_pos.z;
//SeedsInformation[current_seed * 4 + 3] += current_weight;
MyAtomicAdd(&SeedsInformation[current_seed * 4 + 0], temp_pos.x);
MyAtomicAdd(&SeedsInformation[current_seed * 4 + 1], temp_pos.y);
MyAtomicAdd(&SeedsInformation[current_seed * 4 + 2], temp_pos.z);
MyAtomicAdd(&SeedsInformation[current_seed * 4 + 3], current_weight);
}
}
__global__
void compute_RVD_with_knn(double* seeds_pointer, int seeds_nb,
double* mesh_vertex, int mesh_vertex_nb,
int* mesh_facet, int mesh_facet_nb,
int* facet_center_neighbor_index, int* seeds_neighbor_index, int k, double* ret_seeds, double* test_seeds)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= mesh_facet_nb) return;
if (tid >= 0 && tid < seeds_nb){
SeedsInformation[tid * 4 + 0] = 0.0;
SeedsInformation[tid * 4 + 1] = 0.0;
SeedsInformation[tid * 4 + 2] = 0.0;
SeedsInformation[tid * 4 + 3] = 0.0;
}
int f_idx1 = mesh_facet[tid * 3 + 0];
int f_idx2 = mesh_facet[tid * 3 + 1];
int f_idx3 = mesh_facet[tid * 3 + 2];
int3 facet_index = { f_idx1, f_idx2, f_idx3 };
double3 v1 = {
fetch_double(t_vertex, facet_index.x * 3 + 0),
fetch_double(t_vertex, facet_index.x * 3 + 1),
fetch_double(t_vertex, facet_index.x * 3 + 2)
};
double3 v2 = {
fetch_double(t_vertex, facet_index.y * 3 + 0),
fetch_double(t_vertex, facet_index.y * 3 + 1),
fetch_double(t_vertex, facet_index.y * 3 + 2)
};
double3 v3 = {
fetch_double(t_vertex, facet_index.z * 3 + 0),
fetch_double(t_vertex, facet_index.z * 3 + 1),
fetch_double(t_vertex, facet_index.z * 3 + 2)
};
Cuda_Polygon current_polygon;
current_polygon.vertex_nb = 3;
//intialize the polygon with the 3 vertex of the current facet
/*
polygon pointer can be apart by several vertex
a vertex is made up with x,y,z,w
w : the weight of a vertex
*/
current_polygon.vertex[0].x = v1.x; current_polygon.vertex[0].y = v1.y; current_polygon.vertex[0].z = v1.z; current_polygon.vertex[0].w = 1.0;
current_polygon.vertex[1].x = v2.x; current_polygon.vertex[1].y = v2.y; current_polygon.vertex[1].z = v2.z; current_polygon.vertex[1].w = 1.0;
current_polygon.vertex[2].x = v3.x; current_polygon.vertex[2].y = v3.y; current_polygon.vertex[2].z = v3.z; current_polygon.vertex[2].w = 1.0;
Cuda_Polygon store = current_polygon;
//doesn't have the stack?
int to_visit[CUDA_Stack_size];
int to_visit_pos = 0;
int has_visited[CUDA_Stack_size];
int has_visited_nb = 0;
bool has_visited_flag = false;
to_visit[to_visit_pos++] = facet_center_neighbor_index[tid];
has_visited[has_visited_nb++] = to_visit[0];
while (to_visit_pos){
int current_seed = to_visit[to_visit_pos - 1];
to_visit_pos--;
intersection_clip_facet_SR(current_polygon, current_seed, seeds_pointer, seeds_nb, seeds_neighbor_index, k);
/*
if (tid == 0){
ret_seeds[0] = current_polygon.vertex_nb;
for (int i = 0; i < ret_seeds[0]; ++i){
ret_seeds[1 + 5 * i + 0] = current_polygon.vertex[i].x;
ret_seeds[1 + 5 * i + 1] = current_polygon.vertex[i].y;
ret_seeds[1 + 5 * i + 2] = current_polygon.vertex[i].z;
ret_seeds[1 + 5 * i + 3] = current_polygon.vertex[i].w;
ret_seeds[1 + 5 * i + 4] = current_polygon.vertex[i].neigh_s;
}
}
return;*/
//use the RVD
//action(current_polygon, current_seed);
//now we get the clipped polygon stored in "polygon"
//take care of the sychonize
//change the polygon data into "weight" and "position"
//Propagate to adjacent seeds
for (int v = 0; v < current_polygon.vertex_nb; ++v)
{
Cuda_Vertex ve = current_polygon.vertex[v];
int ns = ve.neigh_s;
if (ns != -1)
{
for (int ii = 0; ii < has_visited_nb; ++ii)
{
//if the neighbor seed has clipped the polygon
//the flag should be set "true"
if (has_visited[ii] == ns)
has_visited_flag = true;
}
//the neighbor seed is new!
if (!has_visited_flag)
{
to_visit[to_visit_pos++] = ns;
has_visited[has_visited_nb++] = ns;
}
has_visited_flag = false;
}
}
current_polygon = store;
}
//__syncthreads();
/*for (int i = 0; i < seeds_nb * 4; ++i)
{
ret_seeds[i] = seedsinformation[i];
}*/
/*for (int i = 0; i < seeds_nb; ++i)
{
ret_seeds[i] = SeedsPolygon_nb[i];
}*/
return;
}
extern "C" void runRVD(double* host_seeds_pointer, double* host_mesh_vertex_pointer,
int* host_facet_index, int points_nb, int mesh_vertex_nb, int mesh_facet_nb,
std::vector<int> facet_center_neigbors, std::vector<int> seeds_neighbors, std::vector<int>& seeds_polygon_nb)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsed_time;
hipEventRecord(start, 0);
//GPU data
double* dev_seeds_pointer;
double* dev_mesh_vertex_pointer;
int* dev_mesh_facet_index;
double* dev_seedsInformation;
double* ret_seeds;
seeds_polygon_nb.resize(points_nb);
int *dev_facet_center_neighbors, *dev_seeds_neighbors;
/*---------test----------*/
double* test_seeds;
int* dev_seedsPolygonNumber;
//CPU data
double* host_seedsInfo = (double*)malloc(sizeof(double) * points_nb * 16);
//allocate the memory
checkCudaErrors(hipMalloc((void**)&dev_seeds_pointer, sizeof(double) * points_nb * 3));
checkCudaErrors(hipMalloc((void**)&dev_mesh_vertex_pointer, sizeof(double) * mesh_vertex_nb * 3));
checkCudaErrors(hipMalloc((void**)&dev_mesh_facet_index, sizeof(int) * mesh_facet_nb * 3));
checkCudaErrors(hipMalloc((void**)&dev_seedsInformation, sizeof(double) * points_nb * 4));
checkCudaErrors(hipMalloc((void**)&ret_seeds, sizeof(double) * points_nb * 4));
checkCudaErrors(hipMalloc((void**)&test_seeds, sizeof(double) * points_nb * 16));
checkCudaErrors(hipMalloc((void**)&dev_seedsPolygonNumber, sizeof(int) * points_nb));
checkCudaErrors(hipMalloc((void**)&dev_facet_center_neighbors, sizeof(int) * facet_center_neigbors.size()));
checkCudaErrors(hipMalloc((void**)&dev_seeds_neighbors, sizeof(int) * seeds_neighbors.size()));
checkCudaErrors(hipMemcpyToSymbol(SeedsInformation, &dev_seedsInformation, sizeof(double*), size_t(0), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(SeedsPolygon_nb, &dev_seedsPolygonNumber, sizeof(int*), size_t(0), hipMemcpyHostToDevice));
//pass the data from CPU to GPU
hipMemcpy(dev_seeds_pointer, host_seeds_pointer, sizeof(double) * points_nb * 3, hipMemcpyHostToDevice);
hipMemcpy(dev_mesh_vertex_pointer, host_mesh_vertex_pointer, sizeof(double) * mesh_vertex_nb * 3, hipMemcpyHostToDevice);
hipMemcpy(dev_mesh_facet_index, host_facet_index, sizeof(int) * mesh_facet_nb * 3, hipMemcpyHostToDevice);
hipMemcpy(dev_facet_center_neighbors, &facet_center_neigbors[0], sizeof(int) * facet_center_neigbors.size(), hipMemcpyHostToDevice);
hipMemcpy(dev_seeds_neighbors, &seeds_neighbors[0], sizeof(int) * seeds_neighbors.size(), hipMemcpyHostToDevice);
CheckCUDAError("hipMemcpyHostToDevice");
hipBindTexture(NULL, t_vertex, dev_mesh_vertex_pointer, sizeof(double) * mesh_vertex_nb * 3);
hipBindTexture(NULL, t_points, dev_seeds_pointer, sizeof(double) * points_nb * 3);
hipBindTexture(NULL, t_points_nn, dev_seeds_neighbors, sizeof(int) * seeds_neighbors.size());
int threads = 512;
int blocks = mesh_facet_nb / threads + ((mesh_facet_nb % threads) ? 1 : 0);
compute_RVD_with_knn << <threads, blocks >> >(dev_seeds_pointer, points_nb, dev_mesh_vertex_pointer, mesh_vertex_nb, dev_mesh_facet_index, mesh_facet_nb, dev_facet_center_neighbors,
dev_seeds_neighbors, 20, ret_seeds, test_seeds);
CheckCUDAError("kenerl function");
hipMemcpy(host_seedsInfo, ret_seeds, sizeof(double) * points_nb * 4, hipMemcpyDeviceToHost);
CheckCUDAError("pass data back to CPU");
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf("Compute RVD time: %lfms\n", elapsed_time);
//getchar();
//for (int i = 0; i < points_nb; ++i)
//{
// if (fabs(host_seedsInfo[i * 4 + 3]) >= 1e-12){
// host_seedsInfo[i * 4 + 0] /= host_seedsInfo[i * 4 + 3];
// host_seedsInfo[i * 4 + 1] /= host_seedsInfo[i * 4 + 3];
// host_seedsInfo[i * 4 + 2] /= host_seedsInfo[i * 4 + 3];
// }
//}
//std::ofstream fileout("S2new.txt");
//for (int i = 0; i < points_nb ; ++i)
//{
// //printf("Line %d : x : %.17lf, y : %.17lf, z : %.17lf, w : %.17lf\n", i, host_seedsInfo[i * 4 + 0], host_seedsInfo[i * 4 + 1], host_seedsInfo[i * 4 + 2], host_seedsInfo[i * 4 + 3]);
// fileout << "Line " << i << ':' << setprecision(16) << host_seedsInfo[i * 4 + 0] << ' ' << host_seedsInfo[i * 4 + 1] << ' ' << host_seedsInfo[i * 4 + 2] << ' ' << host_seedsInfo[i * 4 + 3] << endl;
// //fileout << "Points " << i << ':' << host_seedsInfo[i] << endl;
// //seeds_polygon_nb[i] = host_seedsInfo[i];
//}
free(host_seedsInfo);
hipFree(dev_seeds_pointer);
hipFree(dev_mesh_vertex_pointer);
hipFree(dev_mesh_facet_index);
hipFree(dev_seedsInformation);
hipFree(ret_seeds);
hipFree(test_seeds);
hipFree(dev_facet_center_neighbors);
hipFree(dev_seeds_neighbors);
hipUnbindTexture(t_vertex);
hipUnbindTexture(t_points);
hipUnbindTexture(t_points_nn);
return;
} | be50409ef1cdbf024cd084476262eea02bdeb76c.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_atomic_functions.h"
#include "Math_basics.cuh"
#include "AtomicAction.cuh"
#include "Polygon.h"
#include <iostream>
#include <fstream>
#include <iomanip>
using namespace std;
using namespace P_RVD;
//set global variable to store the seeds' position and weight
__device__ double* SeedsInformation;
__device__ int* SeedsPolygon_nb;
const int CUDA_Stack_size = 10;
texture<int2, 1> t_vertex;
texture<int2, 1> t_points;
texture<int, 1> t_points_nn;
struct Cuda_Vertex
{
double x;
double y;
double z;
double w;
int neigh_s = -1;
};
struct Cuda_Polygon
{
Cuda_Vertex vertex[10];
int vertex_nb;
};
/*
process the cuda error
*/
inline void checkCudaErrors(cudaError err)
{
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error : %s.\n", cudaGetErrorString(err));
return;
}
}
void CheckCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__device__
double MyAtomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
static __inline__ __device__
double fetch_double(texture<int2, 1> t, int i){
int2 v = tex1Dfetch(t, i);
return __hiloint2double(v.y, v.x);
}
/*
device function
conmpute 3 vertex's centroid of a facet
*/
__device__ double3 computeCentriod(double3 a, double3 b, double3 c)
{
double3 ret = { (a.x + b.x + c.x) / 3,
(a.y + b.y + c.y) / 3,
(a.z + b.z + c.z) / 3, };
return ret;
}
/*
device function
compute the intersection(stored in a polygon) clipped by a bisector defined by seed i and j
input : polygon ping and its number
output: polygon pong and its number
*/
__device__
void clip_by_plane(Cuda_Polygon& ping, Cuda_Polygon& pong, double3 position_i,
double3 position_j, int j)
{
//reset the pong
pong.vertex_nb = 0;
if (ping.vertex_nb == 0)
return;
// Compute d = n . (2m), where n is the
// normal vector of the bisector [i, j]
// and m the middle point of the bisector.
double d = 0.0;
d = dot(add(position_i, position_j), sub(position_i, position_j));
//The predecessor of the first vertex is the last vertex
int prev_k = ping.vertex_nb - 1;
//get the position data
Cuda_Vertex* prev_vk = &ping.vertex[prev_k];
double3 prev_vertex_position = { prev_vk->x, prev_vk->y, prev_vk->z };
//then we compute prev_vertex_position "cross" n
//prev_l = prev_vertex_position . n
double prev_l = dot(prev_vertex_position, sub(position_i, position_j));
int prev_status = sgn(2.0 * prev_l - d);
//traverse the Vertex in this Polygon
for (int k = 0; k < ping.vertex_nb; ++k)
{
Cuda_Vertex* vk = &ping.vertex[k];
double3 vertex_position = { vk->x, vk->y, vk->z };
double l = dot(vertex_position, sub(position_i, position_j));
int status = sgn(2.0 * l - d);
//If status of edge extremities differ,
//then there is an intersection.
if (status != prev_status && (prev_status) != 0)
{
// create the intersection and update the Polyon
Cuda_Vertex I;
//compute the position and weight
double denom = 2.0 * (prev_l - l);
double lambda1, lambda2;
// Shit happens!
if (m_fabs(denom) < 1e-20)
{
lambda1 = 0.5;
lambda2 = 0.5;
}
else
{
lambda1 = (d - 2.0 * l) / denom;
// Note: lambda2 is also given
// by (2.0*l2-d)/denom
// (but 1.0 - lambda1 is a bit
// faster to compute...)
lambda2 = 1.0 - lambda1;
}
//Set the Position of Vertex
I.x = lambda1 * prev_vertex_position.x + lambda2 * vertex_position.x;
I.y = lambda1 * prev_vertex_position.y + lambda2 * vertex_position.y;
I.z = lambda1 * prev_vertex_position.z + lambda2 * vertex_position.z;
//Set the Weight of Vertex
I.w = (lambda1 * prev_vk->w + lambda2 * vk->w);
if (status > 0)
{
I.neigh_s = (j);
}
else {
I.neigh_s = (vk->neigh_s);
}
//add I to pong
pong.vertex[pong.vertex_nb] = I;
pong.vertex_nb++;
}
if (status > 0)
{
//add vertex to pong
pong.vertex[pong.vertex_nb] = *vk;
pong.vertex_nb++;
}
prev_vk = vk;
prev_vertex_position = vertex_position;
prev_status = status;
prev_l = l;
prev_k = k;
}
return;
}
/*
device function
swap the ping and pong to make sure
ping store the result;
*/
__device__
void swap_polygons(Cuda_Polygon& ping, Cuda_Polygon& pong)
{
//!!! can be accerate
//不确定是否可以使用memset memcpy
Cuda_Polygon t = ping;
ping = pong;
pong = t;
}
/*
*/
//__device__
//void intersection_clip_facet_with_knn(Cuda_Polygon& current_polygon , int current_seed, double* seeds_pointer, int seeds_nb, int *seeds_neighbor_index, int k)
//{
//
// //set a buffer pointer to store the polygon
// Cuda_Polygon polygon_buffer;
//
// for (int i = 0; i < k; ++i)
// {
// int j = seeds_neighbor_index[current_seed * k + i];
// if (current_seed != j)
// {
// clip_by_plane(current_polygon, polygon_buffer, current_seed, j, seeds_pointer, seeds_nb);
// swap_polygons(current_polygon, polygon_buffer);
// }
// }
// return;
//}
/*
*/
__device__
void intersection_clip_facet_SR(Cuda_Polygon& current_polygon, int i, double* seeds_pointer, int seeds_nb,
int *seeds_neighbor_index, int k)
{
Cuda_Polygon polygon_buffer;
double3 pi = {
fetch_double(t_points, i * 3 + 0),
fetch_double(t_points, i * 3 + 1),
fetch_double(t_points, i * 3 + 2)
};
for (int t = 0; t < k; ++t)
{
int j = tex1Dfetch(t_points_nn, i * k + t);
if (i != j)
{
double3 pj = {
fetch_double(t_points, j * 3 + 0),
fetch_double(t_points, j * 3 + 1),
fetch_double(t_points, j * 3 + 2)
};
double dij = distance2(pi, pj);
double R2 = 0.0;
for (int ii = 0; ii < current_polygon.vertex_nb; ++ii)
{
double3 pk = { current_polygon.vertex[ii].x, current_polygon.vertex[ii].y, current_polygon.vertex[ii].z };
double dik = distance2(pi, pk);
R2 = max(R2, dik);
}
if (dij > 4.1 * R2)
{
return;
}
clip_by_plane(current_polygon, polygon_buffer, pi, pj, j);
swap_polygons(current_polygon, polygon_buffer);
}
}
return;
}
__device__
void action(const Cuda_Polygon polygon, int current_seed)
{
double weight;
double3 position;
int _v1 = 0;
int _v2, _v3;
double3 pos1, pos2, pos3;
double d1, d2, d3;
int triangle_nb = polygon.vertex_nb - 2;
double total_weight = 0.0;
double3 centriodTimesWeight = { 0.0, 0.0, 0.0 };
double current_weight = 0.0;
double3 current_posTimesWeight = { 0.0, 0.0, 0.0 };
for (int i = 1; i < polygon.vertex_nb - 1; ++i)
{
_v2 = i; _v3 = i + 1;
pos1 = { polygon.vertex[_v1].x, polygon.vertex[_v1].y, polygon.vertex[_v1].z };
d1 = polygon.vertex[_v1].w;
pos2 = { polygon.vertex[_v2].x, polygon.vertex[_v2].y, polygon.vertex[_v2].z };
d2 = polygon.vertex[_v2].w;
pos3 = { polygon.vertex[_v3].x, polygon.vertex[_v3].y, polygon.vertex[_v3].z };
d3 = polygon.vertex[_v3].w;
computeTriangleCentriod(pos1, pos2, pos3, d1, d2, d3, centriodTimesWeight, total_weight);
current_weight += total_weight;
current_posTimesWeight.x += centriodTimesWeight.x;
current_posTimesWeight.y += centriodTimesWeight.y;
current_posTimesWeight.z += centriodTimesWeight.z;
total_weight = 0.0;
centriodTimesWeight = { 0.0, 0.0, 0.0 };
}
atomicAdd(&SeedsPolygon_nb[current_seed], 1);
//atomicAdd(&SeedsPolygon_nb[current_seed], 1);
if (triangle_nb > 0){
//atomicAdd(&SeedsPolygon_nb[current_seed], 1);
current_weight /= triangle_nb;
double3 temp_pos;
temp_pos.x = current_posTimesWeight.x / triangle_nb;
temp_pos.y = current_posTimesWeight.y / triangle_nb;
temp_pos.z = current_posTimesWeight.z / triangle_nb;
//try not to use the MyAtomicAdd
//SeedsInformation[current_seed * 4 + 0] += temp_pos.x;
//SeedsInformation[current_seed * 4 + 1] += temp_pos.y;
//SeedsInformation[current_seed * 4 + 2] += temp_pos.z;
//SeedsInformation[current_seed * 4 + 3] += current_weight;
MyAtomicAdd(&SeedsInformation[current_seed * 4 + 0], temp_pos.x);
MyAtomicAdd(&SeedsInformation[current_seed * 4 + 1], temp_pos.y);
MyAtomicAdd(&SeedsInformation[current_seed * 4 + 2], temp_pos.z);
MyAtomicAdd(&SeedsInformation[current_seed * 4 + 3], current_weight);
}
}
__global__
void compute_RVD_with_knn(double* seeds_pointer, int seeds_nb,
double* mesh_vertex, int mesh_vertex_nb,
int* mesh_facet, int mesh_facet_nb,
int* facet_center_neighbor_index, int* seeds_neighbor_index, int k, double* ret_seeds, double* test_seeds)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= mesh_facet_nb) return;
if (tid >= 0 && tid < seeds_nb){
SeedsInformation[tid * 4 + 0] = 0.0;
SeedsInformation[tid * 4 + 1] = 0.0;
SeedsInformation[tid * 4 + 2] = 0.0;
SeedsInformation[tid * 4 + 3] = 0.0;
}
int f_idx1 = mesh_facet[tid * 3 + 0];
int f_idx2 = mesh_facet[tid * 3 + 1];
int f_idx3 = mesh_facet[tid * 3 + 2];
int3 facet_index = { f_idx1, f_idx2, f_idx3 };
double3 v1 = {
fetch_double(t_vertex, facet_index.x * 3 + 0),
fetch_double(t_vertex, facet_index.x * 3 + 1),
fetch_double(t_vertex, facet_index.x * 3 + 2)
};
double3 v2 = {
fetch_double(t_vertex, facet_index.y * 3 + 0),
fetch_double(t_vertex, facet_index.y * 3 + 1),
fetch_double(t_vertex, facet_index.y * 3 + 2)
};
double3 v3 = {
fetch_double(t_vertex, facet_index.z * 3 + 0),
fetch_double(t_vertex, facet_index.z * 3 + 1),
fetch_double(t_vertex, facet_index.z * 3 + 2)
};
Cuda_Polygon current_polygon;
current_polygon.vertex_nb = 3;
//intialize the polygon with the 3 vertex of the current facet
/*
polygon pointer can be apart by several vertex
a vertex is made up with x,y,z,w
w : the weight of a vertex
*/
current_polygon.vertex[0].x = v1.x; current_polygon.vertex[0].y = v1.y; current_polygon.vertex[0].z = v1.z; current_polygon.vertex[0].w = 1.0;
current_polygon.vertex[1].x = v2.x; current_polygon.vertex[1].y = v2.y; current_polygon.vertex[1].z = v2.z; current_polygon.vertex[1].w = 1.0;
current_polygon.vertex[2].x = v3.x; current_polygon.vertex[2].y = v3.y; current_polygon.vertex[2].z = v3.z; current_polygon.vertex[2].w = 1.0;
Cuda_Polygon store = current_polygon;
//doesn't have the stack?
int to_visit[CUDA_Stack_size];
int to_visit_pos = 0;
int has_visited[CUDA_Stack_size];
int has_visited_nb = 0;
bool has_visited_flag = false;
to_visit[to_visit_pos++] = facet_center_neighbor_index[tid];
has_visited[has_visited_nb++] = to_visit[0];
while (to_visit_pos){
int current_seed = to_visit[to_visit_pos - 1];
to_visit_pos--;
intersection_clip_facet_SR(current_polygon, current_seed, seeds_pointer, seeds_nb, seeds_neighbor_index, k);
/*
if (tid == 0){
ret_seeds[0] = current_polygon.vertex_nb;
for (int i = 0; i < ret_seeds[0]; ++i){
ret_seeds[1 + 5 * i + 0] = current_polygon.vertex[i].x;
ret_seeds[1 + 5 * i + 1] = current_polygon.vertex[i].y;
ret_seeds[1 + 5 * i + 2] = current_polygon.vertex[i].z;
ret_seeds[1 + 5 * i + 3] = current_polygon.vertex[i].w;
ret_seeds[1 + 5 * i + 4] = current_polygon.vertex[i].neigh_s;
}
}
return;*/
//use the RVD
//action(current_polygon, current_seed);
//now we get the clipped polygon stored in "polygon"
//take care of the sychonize
//change the polygon data into "weight" and "position"
//Propagate to adjacent seeds
for (int v = 0; v < current_polygon.vertex_nb; ++v)
{
Cuda_Vertex ve = current_polygon.vertex[v];
int ns = ve.neigh_s;
if (ns != -1)
{
for (int ii = 0; ii < has_visited_nb; ++ii)
{
//if the neighbor seed has clipped the polygon
//the flag should be set "true"
if (has_visited[ii] == ns)
has_visited_flag = true;
}
//the neighbor seed is new!
if (!has_visited_flag)
{
to_visit[to_visit_pos++] = ns;
has_visited[has_visited_nb++] = ns;
}
has_visited_flag = false;
}
}
current_polygon = store;
}
//__syncthreads();
/*for (int i = 0; i < seeds_nb * 4; ++i)
{
ret_seeds[i] = seedsinformation[i];
}*/
/*for (int i = 0; i < seeds_nb; ++i)
{
ret_seeds[i] = SeedsPolygon_nb[i];
}*/
return;
}
extern "C" void runRVD(double* host_seeds_pointer, double* host_mesh_vertex_pointer,
int* host_facet_index, int points_nb, int mesh_vertex_nb, int mesh_facet_nb,
std::vector<int> facet_center_neigbors, std::vector<int> seeds_neighbors, std::vector<int>& seeds_polygon_nb)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsed_time;
cudaEventRecord(start, 0);
//GPU data
double* dev_seeds_pointer;
double* dev_mesh_vertex_pointer;
int* dev_mesh_facet_index;
double* dev_seedsInformation;
double* ret_seeds;
seeds_polygon_nb.resize(points_nb);
int *dev_facet_center_neighbors, *dev_seeds_neighbors;
/*---------test----------*/
double* test_seeds;
int* dev_seedsPolygonNumber;
//CPU data
double* host_seedsInfo = (double*)malloc(sizeof(double) * points_nb * 16);
//allocate the memory
checkCudaErrors(cudaMalloc((void**)&dev_seeds_pointer, sizeof(double) * points_nb * 3));
checkCudaErrors(cudaMalloc((void**)&dev_mesh_vertex_pointer, sizeof(double) * mesh_vertex_nb * 3));
checkCudaErrors(cudaMalloc((void**)&dev_mesh_facet_index, sizeof(int) * mesh_facet_nb * 3));
checkCudaErrors(cudaMalloc((void**)&dev_seedsInformation, sizeof(double) * points_nb * 4));
checkCudaErrors(cudaMalloc((void**)&ret_seeds, sizeof(double) * points_nb * 4));
checkCudaErrors(cudaMalloc((void**)&test_seeds, sizeof(double) * points_nb * 16));
checkCudaErrors(cudaMalloc((void**)&dev_seedsPolygonNumber, sizeof(int) * points_nb));
checkCudaErrors(cudaMalloc((void**)&dev_facet_center_neighbors, sizeof(int) * facet_center_neigbors.size()));
checkCudaErrors(cudaMalloc((void**)&dev_seeds_neighbors, sizeof(int) * seeds_neighbors.size()));
checkCudaErrors(cudaMemcpyToSymbol(SeedsInformation, &dev_seedsInformation, sizeof(double*), size_t(0), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(SeedsPolygon_nb, &dev_seedsPolygonNumber, sizeof(int*), size_t(0), cudaMemcpyHostToDevice));
//pass the data from CPU to GPU
cudaMemcpy(dev_seeds_pointer, host_seeds_pointer, sizeof(double) * points_nb * 3, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mesh_vertex_pointer, host_mesh_vertex_pointer, sizeof(double) * mesh_vertex_nb * 3, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mesh_facet_index, host_facet_index, sizeof(int) * mesh_facet_nb * 3, cudaMemcpyHostToDevice);
cudaMemcpy(dev_facet_center_neighbors, &facet_center_neigbors[0], sizeof(int) * facet_center_neigbors.size(), cudaMemcpyHostToDevice);
cudaMemcpy(dev_seeds_neighbors, &seeds_neighbors[0], sizeof(int) * seeds_neighbors.size(), cudaMemcpyHostToDevice);
CheckCUDAError("cudaMemcpyHostToDevice");
cudaBindTexture(NULL, t_vertex, dev_mesh_vertex_pointer, sizeof(double) * mesh_vertex_nb * 3);
cudaBindTexture(NULL, t_points, dev_seeds_pointer, sizeof(double) * points_nb * 3);
cudaBindTexture(NULL, t_points_nn, dev_seeds_neighbors, sizeof(int) * seeds_neighbors.size());
int threads = 512;
int blocks = mesh_facet_nb / threads + ((mesh_facet_nb % threads) ? 1 : 0);
compute_RVD_with_knn << <threads, blocks >> >(dev_seeds_pointer, points_nb, dev_mesh_vertex_pointer, mesh_vertex_nb, dev_mesh_facet_index, mesh_facet_nb, dev_facet_center_neighbors,
dev_seeds_neighbors, 20, ret_seeds, test_seeds);
CheckCUDAError("kenerl function");
cudaMemcpy(host_seedsInfo, ret_seeds, sizeof(double) * points_nb * 4, cudaMemcpyDeviceToHost);
CheckCUDAError("pass data back to CPU");
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("Compute RVD time: %lfms\n", elapsed_time);
//getchar();
//for (int i = 0; i < points_nb; ++i)
//{
// if (fabs(host_seedsInfo[i * 4 + 3]) >= 1e-12){
// host_seedsInfo[i * 4 + 0] /= host_seedsInfo[i * 4 + 3];
// host_seedsInfo[i * 4 + 1] /= host_seedsInfo[i * 4 + 3];
// host_seedsInfo[i * 4 + 2] /= host_seedsInfo[i * 4 + 3];
// }
//}
//std::ofstream fileout("S2new.txt");
//for (int i = 0; i < points_nb ; ++i)
//{
// //printf("Line %d : x : %.17lf, y : %.17lf, z : %.17lf, w : %.17lf\n", i, host_seedsInfo[i * 4 + 0], host_seedsInfo[i * 4 + 1], host_seedsInfo[i * 4 + 2], host_seedsInfo[i * 4 + 3]);
// fileout << "Line " << i << ':' << setprecision(16) << host_seedsInfo[i * 4 + 0] << ' ' << host_seedsInfo[i * 4 + 1] << ' ' << host_seedsInfo[i * 4 + 2] << ' ' << host_seedsInfo[i * 4 + 3] << endl;
// //fileout << "Points " << i << ':' << host_seedsInfo[i] << endl;
// //seeds_polygon_nb[i] = host_seedsInfo[i];
//}
free(host_seedsInfo);
cudaFree(dev_seeds_pointer);
cudaFree(dev_mesh_vertex_pointer);
cudaFree(dev_mesh_facet_index);
cudaFree(dev_seedsInformation);
cudaFree(ret_seeds);
cudaFree(test_seeds);
cudaFree(dev_facet_center_neighbors);
cudaFree(dev_seeds_neighbors);
cudaUnbindTexture(t_vertex);
cudaUnbindTexture(t_points);
cudaUnbindTexture(t_points_nn);
return;
} |
3343d89dcfa1e6c47825dbac6d5985d54178d728.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5) {
if (comp >= (-0.0f + var_3 + var_4)) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
float tmp_1 = coshf((-1.0111E-41f / var_5 / (+1.8573E36f / +1.8043E-37f)));
comp = tmp_1 + tanhf(+1.4164E5f);
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6);
hipDeviceSynchronize();
return 0;
}
| 3343d89dcfa1e6c47825dbac6d5985d54178d728.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5) {
if (comp >= (-0.0f + var_3 + var_4)) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
float tmp_1 = coshf((-1.0111E-41f / var_5 / (+1.8573E36f / +1.8043E-37f)));
comp = tmp_1 + tanhf(+1.4164E5f);
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6);
cudaDeviceSynchronize();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.