hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
61f0882c60c54aa86b984e1c3e09ba353b38687e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include "caffe2/core/context_gpu.h" // #include "caffe2/operators/top_k_heap_selection.cuh" #include "caffe2/operators/top_k_radix_selection.cuh" #include "caffe2/utils/math.h" #include "select_top_n_op.h" namespace caffe2 { namespace { template <typename TIndex> __global__ void SetIndex(const int nthreads, TIndex* output) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // just set the index output[index] = static_cast<TIndex>(index); } } // Does not work when K is larger than 512 // template <typename T, int kHeapSize, bool kSelectMax = false> // void RunHeapSelectionImpl( // const T* input, // const TIndex outer_size, // const TIndex inner_size, // const int k, // T* values, // TIndex* indices, // CUDAContext* context) { // constexpr int kBlockSize = 256; // constexpr int kNumWarps = kBlockSize / kWarpSize; // constexpr int smem = kNumWarps * kHeapSize * (sizeof(T) + sizeof(TIndex)); // constexpr T kInitVal = kSelectMax ? std::numeric_limits<T>::lowest() // : std::numeric_limits<T>::max(); // selectRowsViaHeap<T, TIndex, TIndex, kBlockSize, kHeapSize, kSelectMax> // <<<outer_size, kBlockSize, smem, context->cuda_stream()>>>( // input, // values, // indices, // kInitVal, // std::numeric_limits<TIndex>::max(), // outer_size, // inner_size, // k); // } // Stupid that it only works when selecting the Bottom K template <typename T, bool kSelectMax = false> void RunRadixSelectionImpl( const T* input, const TIndex outer_size, const TIndex inner_size, const int k, T* values, TIndex* indices, CUDAContext* context) { const int block = ::min( math::roundUp(static_cast<int>(inner_size), kWarpSize), CAFFE_CUDA_NUM_THREADS); hipLaunchKernelGGL(( gatherTopK<T, kSelectMax, TIndex>) , dim3(outer_size), dim3(block), 0, context->cuda_stream(), input, inner_size, k, outer_size, values, indices); } } // namespace template<> bool SelectTopNOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data DCHECK_EQ(X.ndim(), 4); const int num_images = X.dim32(0); const int num_probs = X.dim32(1) * X.dim32(2) * X.dim32(3); DCHECK_EQ(num_images * 2, OutputSize()); if (num_probs <= top_n_) { // just select everything const float* Xp = X.data<float>(); for (int i=0; i<num_images * 2; i+=2) { auto* Yi = Output(i); Yi->Resize(num_probs); hipLaunchKernelGGL(( SetIndex<TIndex>), dim3(CAFFE_GET_BLOCKS(num_probs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_probs, Yi->mutable_data<TIndex>()); auto* Yv = Output(i+1); Yv->Resize(num_probs); context_.Copy<float, CUDAContext, CUDAContext>(num_probs, Xp, Yv->mutable_data<float>()); Xp += num_probs; } return true; } const float* Xp = X.data<float>(); for (int i=0; i<num_images * 2; i+=2) { auto* Yi = Output(i); auto* Yv = Output(i+1); Yi->Resize(top_n_); Yv->Resize(top_n_); // do the top_k selection thing, heap sort seems not working // RunHeapSelectionImpl<float, 1024>(Xp, // 1, // num_probs, // top_n_, // Yv->mutable_data<float>(), // Yi->mutable_data<TIndex>(), // &context_); RunRadixSelectionImpl<float>(Xp, 1, num_probs, top_n_, Yv->mutable_data<float>(), Yi->mutable_data<TIndex>(), &context_); Xp += num_probs; } return true; } REGISTER_CUDA_OPERATOR(SelectTopN, SelectTopNOp<float, CUDAContext>); } // namespace caffe2
61f0882c60c54aa86b984e1c3e09ba353b38687e.cu
#include <cfloat> #include "caffe2/core/context_gpu.h" // #include "caffe2/operators/top_k_heap_selection.cuh" #include "caffe2/operators/top_k_radix_selection.cuh" #include "caffe2/utils/math.h" #include "select_top_n_op.h" namespace caffe2 { namespace { template <typename TIndex> __global__ void SetIndex(const int nthreads, TIndex* output) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // just set the index output[index] = static_cast<TIndex>(index); } } // Does not work when K is larger than 512 // template <typename T, int kHeapSize, bool kSelectMax = false> // void RunHeapSelectionImpl( // const T* input, // const TIndex outer_size, // const TIndex inner_size, // const int k, // T* values, // TIndex* indices, // CUDAContext* context) { // constexpr int kBlockSize = 256; // constexpr int kNumWarps = kBlockSize / kWarpSize; // constexpr int smem = kNumWarps * kHeapSize * (sizeof(T) + sizeof(TIndex)); // constexpr T kInitVal = kSelectMax ? std::numeric_limits<T>::lowest() // : std::numeric_limits<T>::max(); // selectRowsViaHeap<T, TIndex, TIndex, kBlockSize, kHeapSize, kSelectMax> // <<<outer_size, kBlockSize, smem, context->cuda_stream()>>>( // input, // values, // indices, // kInitVal, // std::numeric_limits<TIndex>::max(), // outer_size, // inner_size, // k); // } // Stupid that it only works when selecting the Bottom K template <typename T, bool kSelectMax = false> void RunRadixSelectionImpl( const T* input, const TIndex outer_size, const TIndex inner_size, const int k, T* values, TIndex* indices, CUDAContext* context) { const int block = std::min( math::roundUp(static_cast<int>(inner_size), kWarpSize), CAFFE_CUDA_NUM_THREADS); gatherTopK<T, kSelectMax, TIndex> <<<outer_size, block, 0, context->cuda_stream()>>>( input, inner_size, k, outer_size, values, indices); } } // namespace template<> bool SelectTopNOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data DCHECK_EQ(X.ndim(), 4); const int num_images = X.dim32(0); const int num_probs = X.dim32(1) * X.dim32(2) * X.dim32(3); DCHECK_EQ(num_images * 2, OutputSize()); if (num_probs <= top_n_) { // just select everything const float* Xp = X.data<float>(); for (int i=0; i<num_images * 2; i+=2) { auto* Yi = Output(i); Yi->Resize(num_probs); SetIndex<TIndex><<<CAFFE_GET_BLOCKS(num_probs), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(num_probs, Yi->mutable_data<TIndex>()); auto* Yv = Output(i+1); Yv->Resize(num_probs); context_.Copy<float, CUDAContext, CUDAContext>(num_probs, Xp, Yv->mutable_data<float>()); Xp += num_probs; } return true; } const float* Xp = X.data<float>(); for (int i=0; i<num_images * 2; i+=2) { auto* Yi = Output(i); auto* Yv = Output(i+1); Yi->Resize(top_n_); Yv->Resize(top_n_); // do the top_k selection thing, heap sort seems not working // RunHeapSelectionImpl<float, 1024>(Xp, // 1, // num_probs, // top_n_, // Yv->mutable_data<float>(), // Yi->mutable_data<TIndex>(), // &context_); RunRadixSelectionImpl<float>(Xp, 1, num_probs, top_n_, Yv->mutable_data<float>(), Yi->mutable_data<TIndex>(), &context_); Xp += num_probs; } return true; } REGISTER_CUDA_OPERATOR(SelectTopN, SelectTopNOp<float, CUDAContext>); } // namespace caffe2
35aeb12c618d4d9a5d5427c5ae5e6287e3b8ea5f.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> /* * Demonstrate defining the dimensions of a block of threads and a grid of * blocks from the host. */ int main(int argc, char **argv) { // define total data element int nElem = 1024; // define grid and block structure dim3 block (1024); dim3 grid ((nElem + block.x - 1) / block.x); printf("grid.x %d block.x %d \n", grid.x, block.x); // reset block block.x = 512; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); // reset block block.x = 256; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); // reset block block.x = 128; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); // reset device before you leave CHECK(hipDeviceReset()); return(0); }
35aeb12c618d4d9a5d5427c5ae5e6287e3b8ea5f.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> /* * Demonstrate defining the dimensions of a block of threads and a grid of * blocks from the host. */ int main(int argc, char **argv) { // define total data element int nElem = 1024; // define grid and block structure dim3 block (1024); dim3 grid ((nElem + block.x - 1) / block.x); printf("grid.x %d block.x %d \n", grid.x, block.x); // reset block block.x = 512; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); // reset block block.x = 256; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); // reset block block.x = 128; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); // reset device before you leave CHECK(cudaDeviceReset()); return(0); }
5d9b3acf882cb0f591145e378bec324e25ad2f1f.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <complex> #include <cstdlib> #include <iomanip> #include <iostream> #include <memory> #include <omp.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> using namespace std; #define CUDA_CHECK() \ do { \ if ( hipPeekAtLastError() != hipSuccess ) { \ hipError_t error = hipGetLastError(); \ printf( "cuda error: %i\n", error ); \ printf( " %s\n", hipGetErrorString( error ) ); \ printf( " line: %i\n", (int) __LINE__ ); \ printf( " file: %s\n", __FILE__ ); \ exit( -1 ); \ } \ } while ( 0 ) // Atomic add operation for double #if defined( __CUDA_ARCH__ ) && __CUDA_ARCH__ >= 600 #define atomicAdd2 atomicAdd #else __device__ double atomicAdd2( double *address, double val ) { unsigned long long int *address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __double_as_longlong( val + __longlong_as_double( assumed ) ) ); } while ( assumed != old ); return __longlong_as_double( old ); } #endif template<class TYPE> TYPE *allocate( size_t count ) { TYPE *x; hipMallocManaged( (void **) &x, count * sizeof( TYPE ) ); memset( x, 0, count * sizeof( TYPE ) ); return x; } class Complex { public: __device__ __host__ Complex( double r = 0, double i = 0 ) : re( r ), im( i ) {} __device__ __host__ Complex &operator+=( const Complex &x ) { re += x.re; im += x.im; return *this; } __device__ __host__ double &real() { return re; } __device__ __host__ double &imag() { return im; } __device__ __host__ friend Complex operator*( const Complex &a, const Complex &b ) { return Complex( a.re * b.re - a.im * b.im, a.re * b.im + a.im * b.re ); } __device__ __host__ friend Complex operator*( const Complex &a, double b ) { return Complex( a.re * b, a.im * b ); } __device__ __host__ friend Complex operator*( double a, const Complex &b ) { return Complex( a * b.re, a * b.im ); } __device__ __host__ friend Complex operator+( const Complex &a, const Complex &b ) { return Complex( a.re + b.re, a.im + b.im ); } __device__ __host__ friend Complex operator-( const Complex &a, const Complex &b ) { return Complex( a.re - b.re, a.im - b.im ); } __device__ __host__ friend Complex operator-( const Complex &x ) { return Complex( -x.re, -x.im ); } __device__ __host__ friend Complex conj( const Complex &x ) { return Complex( x.re, -x.im ); } __device__ __host__ friend double real( const Complex &x ) { return x.re; } __device__ __host__ friend double imag( const Complex &x ) { return x.im; } __device__ __host__ friend double abs( const Complex &x ) { return sqrt( x.re * x.re + x.im * x.im ); } __device__ __host__ friend double abs2( const Complex &x ) { return x.re * x.re + x.im * x.im; } friend ostream &operator<<( ostream &os, const Complex &x ) { os << "(" << x.re << "," << x.im << ")"; return os; } private: double re; double im; }; __device__ __host__ inline void ssxt_scht_solver( double wxt, int igp, int my_igp, int ig, const Complex& wtilde, const Complex& Omega2, const Complex& matngmatmgp, Complex &ssxa, Complex &scha, const Complex& I_eps_array_igp_myIgp ) { const double to1 = 1e-6; const double limitone = 1.0 / ( to1 * 4.0 ); const double limittwo = 0.25; // 0.5^2 Complex wdiff = wxt - wtilde; Complex cden( wdiff ); double rden = 1 / abs2( cden ); Complex delw = wtilde * conj( cden ) * rden; double delwr = abs2( delw ); double wdiffr = abs2( wdiff ); Complex sch, ssx; if ( ( wdiffr > limittwo ) && ( delwr < limitone ) ) { sch = delw * I_eps_array_igp_myIgp; cden = wxt * wxt; rden = abs2( cden ); rden = 1.00 / rden; ssx = Omega2 * conj( cden ) * rden; } else if ( delwr > to1 ) { sch = Complex( 0.0, 0.0 ); cden = 4.0 * wtilde * wtilde * ( delw + 0.50 ); rden = abs2( cden ); rden = 1.00 / rden; ssx = -Omega2 * conj( cden ) * rden * delw; } else { sch = ssx = Complex( 0.0, 0.0 ); } ssxa = matngmatmgp * ssx; scha = matngmatmgp * sch; } /*__device__ __host__ inline void reduce_achstemp( int n1, const int *inv_igp_index, int ncouls, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, Complex &achstemp, int ngpown, const double *vcoul ) { double to1 = 1e-6; Complex schstemp( 0.0, 0.0 ); for ( int my_igp = 0; my_igp < ngpown; my_igp++ ) { Complex mygpvar1, mygpvar2; int igp = inv_igp_index[my_igp]; if ( igp >= ncouls ) igp = ncouls - 1; if ( !( igp > ncouls || igp < 0 ) ) { Complex mygpvar1 = conj( aqsmtemp[n1 * ncouls + igp] ); Complex mygpvar2 = aqsntemp[n1 * ncouls + igp]; Complex schs = -I_eps_array[my_igp * ncouls + igp]; Complex matngmatmgp = aqsntemp[n1 * ncouls + igp] * mygpvar1; if ( abs( schs ) > to1 ) schstemp = schstemp + matngmatmgp * schs; } else { for ( int ig = 1; ig < ncouls; ++ig ) schstemp = schstemp - aqsntemp[n1 * ncouls + igp] * I_eps_array[my_igp * ncouls + ig] * mygpvar1; } achstemp += 0.5 * schstemp * vcoul[igp]; } }*/ __device__ __host__ inline void flagOCC_solver( double wxt, const Complex *wtilde_array, int my_igp, int n1, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, Complex &ssxt, Complex &scht, int ncouls, int igp ) { Complex ssxa, scha; for ( int ig = 0; ig < ncouls; ++ig ) { Complex wtilde = wtilde_array[my_igp * ncouls + ig]; Complex Omega2 = wtilde * wtilde * I_eps_array[my_igp * ncouls + ig]; Complex mygpvar1 = conj( aqsmtemp[n1 * ncouls + igp] ); Complex matngmatmgp = aqsntemp[n1 * ncouls + ig] * mygpvar1; ssxt_scht_solver( wxt, igp, my_igp, ig, wtilde, Omega2, matngmatmgp, ssxa, scha, I_eps_array[my_igp * ncouls + ig] ); ssxt += ssxa; scht += scha; } } __device__ __host__ inline void noflagOCC_solver( double wxt, const Complex *wtilde_array, int my_igp, int n1, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, Complex &scht, int ncouls, int igp ) { Complex mygpvar1 = conj( aqsmtemp[n1 * ncouls + igp] ); Complex scht_loc( 0.00, 0.00 ); for ( int ig = 0; ig < ncouls; ++ig ) { Complex wdiff = wxt - wtilde_array[my_igp * ncouls + ig]; double wdiffr = abs2( wdiff ); double rden = 1.0 / wdiffr; Complex delw = wtilde_array[my_igp * ncouls + ig] * conj( wdiff ) * rden; //*rden double delwr = abs2( delw ); scht_loc += mygpvar1 * aqsntemp[n1 * ncouls + ig] * delw * I_eps_array[my_igp * ncouls + ig]; } scht = scht_loc; } __device__ inline void calculate( int n1, int my_igp, int nvband, const int *inv_igp_index, int ncouls, const double *wx_array, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, const Complex *wtilde_array, Complex *achtemp, Complex *asxtemp, const double *vcoul ) { const int nstart = 0, nend = 3; const double occ = 1.0; bool flag_occ = n1 < nvband; int igp = inv_igp_index[my_igp]; if ( igp >= ncouls ) igp = ncouls - 1; if ( flag_occ ) { Complex scht, ssxt; for ( int iw = nstart; iw < nend; iw++ ) { flagOCC_solver( wx_array[iw], wtilde_array, my_igp, n1, aqsmtemp, aqsntemp, I_eps_array, ssxt, scht, ncouls, igp ); asxtemp[iw] += ssxt * ( occ * vcoul[igp] ); achtemp[iw] += (double) 0.5 * scht * vcoul[igp]; } } else { Complex scht; for ( int iw = nstart; iw < nend; ++iw ) { noflagOCC_solver( wx_array[iw], wtilde_array, my_igp, n1, aqsmtemp, aqsntemp, I_eps_array, scht, ncouls, igp ); achtemp[iw] += (double) 0.5 * ( scht * vcoul[igp] ); } } } // Get the globally unique thread id __device__ int getGlobalIdx3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * ( blockDim.x * blockDim.y * blockDim.z ) + ( threadIdx.z * ( blockDim.x * blockDim.y ) ) + ( threadIdx.y * blockDim.x ) + threadIdx.x; return threadId; } // Compute kernel __global__ void call_compute( int number_bands, int ngpown, int nvband, const int *inv_igp_index, int ncouls, const double *wx_array, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, const Complex *wtilde_array, Complex *achtemp, Complex *asxtemp, const double *vcoul ) { int tid = getGlobalIdx3D(); int n1 = tid % number_bands; int my_igp = tid / number_bands; // Perform the local computations Complex achtemp2[3], asxtemp2[3]; if ( n1 < number_bands && my_igp < ngpown ) calculate( n1, my_igp, nvband, inv_igp_index, ncouls, wx_array, aqsmtemp, aqsntemp, I_eps_array, wtilde_array, achtemp2, asxtemp2, vcoul ); // Perform the reduce for ( int i = 0; i < 3; i++ ) { atomicAdd2( &achtemp[i].real(), achtemp2[i].real() ); atomicAdd2( &achtemp[i].imag(), achtemp2[i].imag() ); atomicAdd2( &asxtemp[i].real(), asxtemp2[i].real() ); atomicAdd2( &asxtemp[i].imag(), asxtemp2[i].imag() ); } } int main( int argc, char **argv ) { if ( argc != 5 ) { std::cout << "The correct form of input is : " << endl; std::cout << " ./a.out <number_bands> <number_valence_bands> <number_plane_waves> <matrix_divider>\n"; exit( 0 ); } int number_bands = atoi( argv[1] ); int nvband = atoi( argv[2] ); int ncouls = atoi( argv[3] ); int nodes_per_group = atoi( argv[4] ); int npes = 1; // Represents the number of ranks per node int ngpown = ncouls / ( nodes_per_group * npes ); // Number of gvectors per mpi task double e_lk = 10; double dw = 1; const int nstart = 0, nend = 3; double to1 = 1e-6; double gamma = 0.5; double sexcut = 4.0; double limitone = 1.0 / ( to1 * 4.0 ); double limittwo = 0.25; double e_n1kq = 6.0; // Printing out the params passed. std::cout << "number_bands = " << number_bands << "\t nvband = " << nvband << "\t ncouls = " << ncouls << "\t nodes_per_group = " << nodes_per_group << "\t ngpown = " << ngpown << "\t nend = " << nend << "\t nstart = " << nstart << "\t gamma = " << gamma << "\t sexcut = " << sexcut << "\t limitone = " << limitone << "\t limittwo = " << limittwo << endl; const Complex expr0( 0.0, 0.0 ); const Complex expr( 0.5, 0.5 ); // Memory allocation Complex *aqsmtemp = allocate<Complex>( number_bands * ncouls ); Complex *aqsntemp = allocate<Complex>( number_bands * ncouls ); Complex *I_eps_array = allocate<Complex>( ngpown * ncouls ); Complex *wtilde_array = allocate<Complex>( ngpown * ncouls ); int *inv_igp_index = allocate<int>( ngpown ); double *vcoul = allocate<double>( ncouls ); Complex *achtemp = allocate<Complex>( 3 ); Complex *asxtemp = allocate<Complex>( 3 ); double *wx_array = allocate<double>( 3 ); CUDA_CHECK(); cout << "Size of wtilde_array = " << ( ncouls * ngpown * 2.0 * 8 ) / 1048576 << " Mbytes" << endl; cout << "Size of aqsntemp = " << ( ncouls * number_bands * 2.0 * 8 ) / 1048576 << " Mbytes" << endl; cout << "Size of I_eps_array array = " << ( ncouls * ngpown * 2.0 * 8 ) / 1048576 << " Mbytes" << endl; for ( int i = 0; i < number_bands; i++ ) for ( int j = 0; j < ncouls; j++ ) { aqsntemp[i * ncouls + j] = ( (double) ( i + j ) ) * expr; aqsmtemp[i * ncouls + j] = ( (double) ( i + j ) ) * expr; } for ( int i = 0; i < ngpown; i++ ) for ( int j = 0; j < ncouls; j++ ) { I_eps_array[i * ncouls + j] = ( (double) ( i + j ) ) * expr; wtilde_array[i * ncouls + j] = ( (double) ( i + j ) ) * expr; } for ( int i = 0; i < ncouls; i++ ) vcoul[i] = 1.0 * i; for ( int ig = 0; ig < ngpown; ++ig ) inv_igp_index[ig] = ( ig + 1 ) * ncouls / ngpown; for ( int iw = nstart; iw < nend; ++iw ) { achtemp[iw] = expr0; wx_array[iw] = e_lk - e_n1kq + dw * ( ( iw + 1 ) - 2 ); if ( abs( wx_array[iw] ) < to1 ) wx_array[iw] = to1; } // Prefetch memory for read only variables int device = -1; hipGetDevice( &device ); hipMemPrefetchAsync( aqsmtemp, number_bands * ncouls * sizeof( Complex ), device ); hipMemPrefetchAsync( aqsntemp, number_bands * ncouls * sizeof( Complex ), device ); hipMemPrefetchAsync( I_eps_array, ngpown * ncouls * sizeof( Complex ), device ); hipMemPrefetchAsync( wtilde_array, ngpown * ncouls * sizeof( Complex ), device ); hipMemPrefetchAsync( inv_igp_index, ngpown * sizeof( int ), device ); hipMemPrefetchAsync( vcoul, ncouls * sizeof( double ), device ); hipMemPrefetchAsync( wx_array, 3 * sizeof( double ), device ); hipDeviceSynchronize(); //Start the timer before the work begins. timeval startTimer, endTimer; gettimeofday(&startTimer, NULL); // Complex achstemp(0.0, 0.0); // for(int n1 = 0; n1<number_bands; ++n1) //{ // reduce_achstemp(n1, inv_igp_index, ncouls,aqsmtemp, aqsntemp, I_eps_array, achstemp, // ngpown, vcoul); //} int threads = 256; int block_size = number_bands * ngpown / threads; std::cout << "call_compute<<<" << block_size << ", " << threads << ">>>\n"; hipLaunchKernelGGL(( call_compute), dim3(block_size), dim3(threads), 0, 0, number_bands, ngpown, nvband, inv_igp_index, ncouls, wx_array, aqsmtemp, aqsntemp, I_eps_array, wtilde_array, achtemp, asxtemp, vcoul ); hipDeviceSynchronize(); // Time Taken gettimeofday(&endTimer, NULL); double elapsedTimer = (endTimer.tv_sec - startTimer.tv_sec) +1e-6*(endTimer.tv_usec - startTimer.tv_usec); // Print results for ( int iw = nstart; iw < nend; ++iw ) cout << "achtemp[" << iw << "] = " << std::setprecision( 15 ) << achtemp[iw] << endl; cout << "********** Time Taken **********= " << elapsedTimer << " secs" << endl; // Free memory hipFree( inv_igp_index ); hipFree( vcoul ); hipFree( aqsmtemp ); hipFree( aqsntemp ); hipFree( I_eps_array ); hipFree( wtilde_array ); hipFree( achtemp ); hipFree( asxtemp ); return 0; } // Almost done code
5d9b3acf882cb0f591145e378bec324e25ad2f1f.cu
#include <cmath> #include <complex> #include <cstdlib> #include <iomanip> #include <iostream> #include <memory> #include <omp.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime_api.h> using namespace std; #define CUDA_CHECK() \ do { \ if ( cudaPeekAtLastError() != cudaSuccess ) { \ cudaError_t error = cudaGetLastError(); \ printf( "cuda error: %i\n", error ); \ printf( " %s\n", cudaGetErrorString( error ) ); \ printf( " line: %i\n", (int) __LINE__ ); \ printf( " file: %s\n", __FILE__ ); \ exit( -1 ); \ } \ } while ( 0 ) // Atomic add operation for double #if defined( __CUDA_ARCH__ ) && __CUDA_ARCH__ >= 600 #define atomicAdd2 atomicAdd #else __device__ double atomicAdd2( double *address, double val ) { unsigned long long int *address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __double_as_longlong( val + __longlong_as_double( assumed ) ) ); } while ( assumed != old ); return __longlong_as_double( old ); } #endif template<class TYPE> TYPE *allocate( size_t count ) { TYPE *x; cudaMallocManaged( (void **) &x, count * sizeof( TYPE ) ); memset( x, 0, count * sizeof( TYPE ) ); return x; } class Complex { public: __device__ __host__ Complex( double r = 0, double i = 0 ) : re( r ), im( i ) {} __device__ __host__ Complex &operator+=( const Complex &x ) { re += x.re; im += x.im; return *this; } __device__ __host__ double &real() { return re; } __device__ __host__ double &imag() { return im; } __device__ __host__ friend Complex operator*( const Complex &a, const Complex &b ) { return Complex( a.re * b.re - a.im * b.im, a.re * b.im + a.im * b.re ); } __device__ __host__ friend Complex operator*( const Complex &a, double b ) { return Complex( a.re * b, a.im * b ); } __device__ __host__ friend Complex operator*( double a, const Complex &b ) { return Complex( a * b.re, a * b.im ); } __device__ __host__ friend Complex operator+( const Complex &a, const Complex &b ) { return Complex( a.re + b.re, a.im + b.im ); } __device__ __host__ friend Complex operator-( const Complex &a, const Complex &b ) { return Complex( a.re - b.re, a.im - b.im ); } __device__ __host__ friend Complex operator-( const Complex &x ) { return Complex( -x.re, -x.im ); } __device__ __host__ friend Complex conj( const Complex &x ) { return Complex( x.re, -x.im ); } __device__ __host__ friend double real( const Complex &x ) { return x.re; } __device__ __host__ friend double imag( const Complex &x ) { return x.im; } __device__ __host__ friend double abs( const Complex &x ) { return sqrt( x.re * x.re + x.im * x.im ); } __device__ __host__ friend double abs2( const Complex &x ) { return x.re * x.re + x.im * x.im; } friend ostream &operator<<( ostream &os, const Complex &x ) { os << "(" << x.re << "," << x.im << ")"; return os; } private: double re; double im; }; __device__ __host__ inline void ssxt_scht_solver( double wxt, int igp, int my_igp, int ig, const Complex& wtilde, const Complex& Omega2, const Complex& matngmatmgp, Complex &ssxa, Complex &scha, const Complex& I_eps_array_igp_myIgp ) { const double to1 = 1e-6; const double limitone = 1.0 / ( to1 * 4.0 ); const double limittwo = 0.25; // 0.5^2 Complex wdiff = wxt - wtilde; Complex cden( wdiff ); double rden = 1 / abs2( cden ); Complex delw = wtilde * conj( cden ) * rden; double delwr = abs2( delw ); double wdiffr = abs2( wdiff ); Complex sch, ssx; if ( ( wdiffr > limittwo ) && ( delwr < limitone ) ) { sch = delw * I_eps_array_igp_myIgp; cden = wxt * wxt; rden = abs2( cden ); rden = 1.00 / rden; ssx = Omega2 * conj( cden ) * rden; } else if ( delwr > to1 ) { sch = Complex( 0.0, 0.0 ); cden = 4.0 * wtilde * wtilde * ( delw + 0.50 ); rden = abs2( cden ); rden = 1.00 / rden; ssx = -Omega2 * conj( cden ) * rden * delw; } else { sch = ssx = Complex( 0.0, 0.0 ); } ssxa = matngmatmgp * ssx; scha = matngmatmgp * sch; } /*__device__ __host__ inline void reduce_achstemp( int n1, const int *inv_igp_index, int ncouls, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, Complex &achstemp, int ngpown, const double *vcoul ) { double to1 = 1e-6; Complex schstemp( 0.0, 0.0 ); for ( int my_igp = 0; my_igp < ngpown; my_igp++ ) { Complex mygpvar1, mygpvar2; int igp = inv_igp_index[my_igp]; if ( igp >= ncouls ) igp = ncouls - 1; if ( !( igp > ncouls || igp < 0 ) ) { Complex mygpvar1 = conj( aqsmtemp[n1 * ncouls + igp] ); Complex mygpvar2 = aqsntemp[n1 * ncouls + igp]; Complex schs = -I_eps_array[my_igp * ncouls + igp]; Complex matngmatmgp = aqsntemp[n1 * ncouls + igp] * mygpvar1; if ( abs( schs ) > to1 ) schstemp = schstemp + matngmatmgp * schs; } else { for ( int ig = 1; ig < ncouls; ++ig ) schstemp = schstemp - aqsntemp[n1 * ncouls + igp] * I_eps_array[my_igp * ncouls + ig] * mygpvar1; } achstemp += 0.5 * schstemp * vcoul[igp]; } }*/ __device__ __host__ inline void flagOCC_solver( double wxt, const Complex *wtilde_array, int my_igp, int n1, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, Complex &ssxt, Complex &scht, int ncouls, int igp ) { Complex ssxa, scha; for ( int ig = 0; ig < ncouls; ++ig ) { Complex wtilde = wtilde_array[my_igp * ncouls + ig]; Complex Omega2 = wtilde * wtilde * I_eps_array[my_igp * ncouls + ig]; Complex mygpvar1 = conj( aqsmtemp[n1 * ncouls + igp] ); Complex matngmatmgp = aqsntemp[n1 * ncouls + ig] * mygpvar1; ssxt_scht_solver( wxt, igp, my_igp, ig, wtilde, Omega2, matngmatmgp, ssxa, scha, I_eps_array[my_igp * ncouls + ig] ); ssxt += ssxa; scht += scha; } } __device__ __host__ inline void noflagOCC_solver( double wxt, const Complex *wtilde_array, int my_igp, int n1, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, Complex &scht, int ncouls, int igp ) { Complex mygpvar1 = conj( aqsmtemp[n1 * ncouls + igp] ); Complex scht_loc( 0.00, 0.00 ); for ( int ig = 0; ig < ncouls; ++ig ) { Complex wdiff = wxt - wtilde_array[my_igp * ncouls + ig]; double wdiffr = abs2( wdiff ); double rden = 1.0 / wdiffr; Complex delw = wtilde_array[my_igp * ncouls + ig] * conj( wdiff ) * rden; //*rden double delwr = abs2( delw ); scht_loc += mygpvar1 * aqsntemp[n1 * ncouls + ig] * delw * I_eps_array[my_igp * ncouls + ig]; } scht = scht_loc; } __device__ inline void calculate( int n1, int my_igp, int nvband, const int *inv_igp_index, int ncouls, const double *wx_array, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, const Complex *wtilde_array, Complex *achtemp, Complex *asxtemp, const double *vcoul ) { const int nstart = 0, nend = 3; const double occ = 1.0; bool flag_occ = n1 < nvband; int igp = inv_igp_index[my_igp]; if ( igp >= ncouls ) igp = ncouls - 1; if ( flag_occ ) { Complex scht, ssxt; for ( int iw = nstart; iw < nend; iw++ ) { flagOCC_solver( wx_array[iw], wtilde_array, my_igp, n1, aqsmtemp, aqsntemp, I_eps_array, ssxt, scht, ncouls, igp ); asxtemp[iw] += ssxt * ( occ * vcoul[igp] ); achtemp[iw] += (double) 0.5 * scht * vcoul[igp]; } } else { Complex scht; for ( int iw = nstart; iw < nend; ++iw ) { noflagOCC_solver( wx_array[iw], wtilde_array, my_igp, n1, aqsmtemp, aqsntemp, I_eps_array, scht, ncouls, igp ); achtemp[iw] += (double) 0.5 * ( scht * vcoul[igp] ); } } } // Get the globally unique thread id __device__ int getGlobalIdx3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * ( blockDim.x * blockDim.y * blockDim.z ) + ( threadIdx.z * ( blockDim.x * blockDim.y ) ) + ( threadIdx.y * blockDim.x ) + threadIdx.x; return threadId; } // Compute kernel __global__ void call_compute( int number_bands, int ngpown, int nvband, const int *inv_igp_index, int ncouls, const double *wx_array, const Complex *aqsmtemp, const Complex *aqsntemp, const Complex *I_eps_array, const Complex *wtilde_array, Complex *achtemp, Complex *asxtemp, const double *vcoul ) { int tid = getGlobalIdx3D(); int n1 = tid % number_bands; int my_igp = tid / number_bands; // Perform the local computations Complex achtemp2[3], asxtemp2[3]; if ( n1 < number_bands && my_igp < ngpown ) calculate( n1, my_igp, nvband, inv_igp_index, ncouls, wx_array, aqsmtemp, aqsntemp, I_eps_array, wtilde_array, achtemp2, asxtemp2, vcoul ); // Perform the reduce for ( int i = 0; i < 3; i++ ) { atomicAdd2( &achtemp[i].real(), achtemp2[i].real() ); atomicAdd2( &achtemp[i].imag(), achtemp2[i].imag() ); atomicAdd2( &asxtemp[i].real(), asxtemp2[i].real() ); atomicAdd2( &asxtemp[i].imag(), asxtemp2[i].imag() ); } } int main( int argc, char **argv ) { if ( argc != 5 ) { std::cout << "The correct form of input is : " << endl; std::cout << " ./a.out <number_bands> <number_valence_bands> <number_plane_waves> <matrix_divider>\n"; exit( 0 ); } int number_bands = atoi( argv[1] ); int nvband = atoi( argv[2] ); int ncouls = atoi( argv[3] ); int nodes_per_group = atoi( argv[4] ); int npes = 1; // Represents the number of ranks per node int ngpown = ncouls / ( nodes_per_group * npes ); // Number of gvectors per mpi task double e_lk = 10; double dw = 1; const int nstart = 0, nend = 3; double to1 = 1e-6; double gamma = 0.5; double sexcut = 4.0; double limitone = 1.0 / ( to1 * 4.0 ); double limittwo = 0.25; double e_n1kq = 6.0; // Printing out the params passed. std::cout << "number_bands = " << number_bands << "\t nvband = " << nvband << "\t ncouls = " << ncouls << "\t nodes_per_group = " << nodes_per_group << "\t ngpown = " << ngpown << "\t nend = " << nend << "\t nstart = " << nstart << "\t gamma = " << gamma << "\t sexcut = " << sexcut << "\t limitone = " << limitone << "\t limittwo = " << limittwo << endl; const Complex expr0( 0.0, 0.0 ); const Complex expr( 0.5, 0.5 ); // Memory allocation Complex *aqsmtemp = allocate<Complex>( number_bands * ncouls ); Complex *aqsntemp = allocate<Complex>( number_bands * ncouls ); Complex *I_eps_array = allocate<Complex>( ngpown * ncouls ); Complex *wtilde_array = allocate<Complex>( ngpown * ncouls ); int *inv_igp_index = allocate<int>( ngpown ); double *vcoul = allocate<double>( ncouls ); Complex *achtemp = allocate<Complex>( 3 ); Complex *asxtemp = allocate<Complex>( 3 ); double *wx_array = allocate<double>( 3 ); CUDA_CHECK(); cout << "Size of wtilde_array = " << ( ncouls * ngpown * 2.0 * 8 ) / 1048576 << " Mbytes" << endl; cout << "Size of aqsntemp = " << ( ncouls * number_bands * 2.0 * 8 ) / 1048576 << " Mbytes" << endl; cout << "Size of I_eps_array array = " << ( ncouls * ngpown * 2.0 * 8 ) / 1048576 << " Mbytes" << endl; for ( int i = 0; i < number_bands; i++ ) for ( int j = 0; j < ncouls; j++ ) { aqsntemp[i * ncouls + j] = ( (double) ( i + j ) ) * expr; aqsmtemp[i * ncouls + j] = ( (double) ( i + j ) ) * expr; } for ( int i = 0; i < ngpown; i++ ) for ( int j = 0; j < ncouls; j++ ) { I_eps_array[i * ncouls + j] = ( (double) ( i + j ) ) * expr; wtilde_array[i * ncouls + j] = ( (double) ( i + j ) ) * expr; } for ( int i = 0; i < ncouls; i++ ) vcoul[i] = 1.0 * i; for ( int ig = 0; ig < ngpown; ++ig ) inv_igp_index[ig] = ( ig + 1 ) * ncouls / ngpown; for ( int iw = nstart; iw < nend; ++iw ) { achtemp[iw] = expr0; wx_array[iw] = e_lk - e_n1kq + dw * ( ( iw + 1 ) - 2 ); if ( abs( wx_array[iw] ) < to1 ) wx_array[iw] = to1; } // Prefetch memory for read only variables int device = -1; cudaGetDevice( &device ); cudaMemPrefetchAsync( aqsmtemp, number_bands * ncouls * sizeof( Complex ), device ); cudaMemPrefetchAsync( aqsntemp, number_bands * ncouls * sizeof( Complex ), device ); cudaMemPrefetchAsync( I_eps_array, ngpown * ncouls * sizeof( Complex ), device ); cudaMemPrefetchAsync( wtilde_array, ngpown * ncouls * sizeof( Complex ), device ); cudaMemPrefetchAsync( inv_igp_index, ngpown * sizeof( int ), device ); cudaMemPrefetchAsync( vcoul, ncouls * sizeof( double ), device ); cudaMemPrefetchAsync( wx_array, 3 * sizeof( double ), device ); cudaDeviceSynchronize(); //Start the timer before the work begins. timeval startTimer, endTimer; gettimeofday(&startTimer, NULL); // Complex achstemp(0.0, 0.0); // for(int n1 = 0; n1<number_bands; ++n1) //{ // reduce_achstemp(n1, inv_igp_index, ncouls,aqsmtemp, aqsntemp, I_eps_array, achstemp, // ngpown, vcoul); //} int threads = 256; int block_size = number_bands * ngpown / threads; std::cout << "call_compute<<<" << block_size << ", " << threads << ">>>\n"; call_compute<<<block_size, threads>>>( number_bands, ngpown, nvband, inv_igp_index, ncouls, wx_array, aqsmtemp, aqsntemp, I_eps_array, wtilde_array, achtemp, asxtemp, vcoul ); cudaDeviceSynchronize(); // Time Taken gettimeofday(&endTimer, NULL); double elapsedTimer = (endTimer.tv_sec - startTimer.tv_sec) +1e-6*(endTimer.tv_usec - startTimer.tv_usec); // Print results for ( int iw = nstart; iw < nend; ++iw ) cout << "achtemp[" << iw << "] = " << std::setprecision( 15 ) << achtemp[iw] << endl; cout << "********** Time Taken **********= " << elapsedTimer << " secs" << endl; // Free memory cudaFree( inv_igp_index ); cudaFree( vcoul ); cudaFree( aqsmtemp ); cudaFree( aqsntemp ); cudaFree( I_eps_array ); cudaFree( wtilde_array ); cudaFree( achtemp ); cudaFree( asxtemp ); return 0; } // Almost done code
735d453f5eaa706786f710cace96b6de6b1a3c6a.hip
// !!! This is a file automatically generated by hipify!!! /* Fractal code for CS 4380 / CS 5351 Copyright (c) 2018, Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <cmath> #include <sys/time.h> #include <hip/hip_runtime.h> #include "cs43805351.h" static const float Delta = 0.004f; static const float xMid = 0.2389f; static const float yMid = 0.55267f; static const int ThreadsPerBlock = 512; static __global__ void fractalKernel(const int width, const int frames, unsigned char* pic, int n) { // compute frames const long idx = threadIdx.x + blockIdx.x * (long)blockDim.x; if(idx < n){ const int frame = idx / (width * width); const float delta = Delta * powf(0.98f, frame); const float xMin = xMid - delta; const float yMin = yMid - delta; const float dw = 2.0f * delta / width; const int row = (idx / width) % width; const float cy = yMin + row * dw; const int col = idx % width; const float cx = xMin + col * dw; float x = cx; float y = cy; int depth = 256; float x2, y2; do { x2 = x * x; y2 = y * y; y = 2 * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0)); pic[frame * width * width + row * width + col] = (unsigned char)depth; } } //ChecCuda function static void CheckCuda() { hipError_t e; hipDeviceSynchronize(); if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, hipGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Fractal v1.7\n"); // check command line if (argc != 3) {fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]); exit(-1);} const int width = atoi(argv[1]); if (width < 10) {fprintf(stderr, "error: frame_width must be at least 10\n"); exit(-1);} const int frames = atoi(argv[2]); if (frames < 1) {fprintf(stderr, "error: num_frames must be at least 1\n"); exit(-1);} printf("computing %d frames of %d by %d fractal\n", frames, width, width); // allocate space for pic array on GPU int N = frames * width * width; unsigned char* d_pic; const int size = N * sizeof(char); hipMalloc((void **)&d_pic, size); //allocate space for pic array on host unsigned char* pic = new unsigned char[frames * width * width]; //copying pic value to device if (hipSuccess != hipMemcpy(d_pic, pic, size, hipMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // launch GPU kernel hipLaunchKernelGGL(( fractalKernel), dim3((N + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, width, frames, d_pic, N); hipDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.3f s\n", runtime); CheckCuda(); //copy result back to host if (hipSuccess != hipMemcpy(pic, d_pic, size, hipMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} // verify result by writing frames to BMP files if ((width <= 256) && (frames <= 100)) { for (int frame = 0; frame < frames; frame++) { char name[32]; sprintf(name, "fractal%d.bmp", frame + 1000); writeBMP(width, width, &pic[frame * width * width], name); } } delete [] pic; hipFree(d_pic); return 0; }
735d453f5eaa706786f710cace96b6de6b1a3c6a.cu
/* Fractal code for CS 4380 / CS 5351 Copyright (c) 2018, Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <cmath> #include <sys/time.h> #include <cuda.h> #include "cs43805351.h" static const float Delta = 0.004f; static const float xMid = 0.2389f; static const float yMid = 0.55267f; static const int ThreadsPerBlock = 512; static __global__ void fractalKernel(const int width, const int frames, unsigned char* pic, int n) { // compute frames const long idx = threadIdx.x + blockIdx.x * (long)blockDim.x; if(idx < n){ const int frame = idx / (width * width); const float delta = Delta * powf(0.98f, frame); const float xMin = xMid - delta; const float yMin = yMid - delta; const float dw = 2.0f * delta / width; const int row = (idx / width) % width; const float cy = yMin + row * dw; const int col = idx % width; const float cx = xMin + col * dw; float x = cx; float y = cy; int depth = 256; float x2, y2; do { x2 = x * x; y2 = y * y; y = 2 * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0)); pic[frame * width * width + row * width + col] = (unsigned char)depth; } } //ChecCuda function static void CheckCuda() { cudaError_t e; cudaDeviceSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Fractal v1.7\n"); // check command line if (argc != 3) {fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]); exit(-1);} const int width = atoi(argv[1]); if (width < 10) {fprintf(stderr, "error: frame_width must be at least 10\n"); exit(-1);} const int frames = atoi(argv[2]); if (frames < 1) {fprintf(stderr, "error: num_frames must be at least 1\n"); exit(-1);} printf("computing %d frames of %d by %d fractal\n", frames, width, width); // allocate space for pic array on GPU int N = frames * width * width; unsigned char* d_pic; const int size = N * sizeof(char); cudaMalloc((void **)&d_pic, size); //allocate space for pic array on host unsigned char* pic = new unsigned char[frames * width * width]; //copying pic value to device if (cudaSuccess != cudaMemcpy(d_pic, pic, size, cudaMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // launch GPU kernel fractalKernel<<<(N + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(width, frames, d_pic, N); cudaDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.3f s\n", runtime); CheckCuda(); //copy result back to host if (cudaSuccess != cudaMemcpy(pic, d_pic, size, cudaMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} // verify result by writing frames to BMP files if ((width <= 256) && (frames <= 100)) { for (int frame = 0; frame < frames; frame++) { char name[32]; sprintf(name, "fractal%d.bmp", frame + 1000); writeBMP(width, width, &pic[frame * width * width], name); } } delete [] pic; cudaFree(d_pic); return 0; }
f263c3945d92746e5a237462d4df2a859e708cd8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void computeLabFrameMoments(real4* __restrict__ posq, int4* __restrict__ multipoleParticles, float* __restrict__ molecularDipoles, float* __restrict__ molecularQuadrupoles, real* __restrict__ labFrameDipoles, real* __restrict__ labFrameQuadrupoles) { // get coordinates of this atom and the z & x axis atoms // compute the vector between the atoms and 1/sqrt(d2), d2 is distance between // this atom and the axis atom // this atom is referred to as the k-atom in notes below // code common to ZThenX and Bisector for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += gridDim.x*blockDim.x) { int4 particles = multipoleParticles[atom]; if (particles.x >= 0 && particles.z >= 0) { real4 thisParticlePos = posq[atom]; real4 posZ = posq[particles.z]; real3 vectorZ = make_real3(posZ.x-thisParticlePos.x, posZ.y-thisParticlePos.y, posZ.z-thisParticlePos.z); real4 posX = posq[particles.x]; real3 vectorX = make_real3(posX.x-thisParticlePos.x, posX.y-thisParticlePos.y, posX.z-thisParticlePos.z); int axisType = particles.w; /* z-only (1) norm z (2) select random x (3) x = x - (x.z)z (4) norm x z-then-x (1) norm z (2) norm x (not needed) (3) x = x - (x.z)z (4) norm x bisector (1) norm z (2) norm x (3) z = x + z (4) norm z (5) x = x - (x.z)z (6) norm x z-bisect (1) norm z (2) norm x (3) norm y (3) x = x + y (4) norm x (5) x = x - (x.z)z (6) norm x 3-fold (1) norm z (2) norm x (3) norm y (4) z = x + y + z (5) norm z (6) x = x - (x.z)z (7) norm x */ // branch based on axis type vectorZ = normalize(vectorZ); if (axisType == 1) { // bisector vectorX = normalize(vectorX); vectorZ += vectorX; vectorZ = normalize(vectorZ); } else if (axisType == 2 || axisType == 3) { // z-bisect if (particles.y >= 0 && particles.y < NUM_ATOMS) { real4 posY = posq[particles.y]; real3 vectorY = make_real3(posY.x-thisParticlePos.x, posY.y-thisParticlePos.y, posY.z-thisParticlePos.z); vectorY = normalize(vectorY); vectorX = normalize(vectorX); if (axisType == 2) { vectorX += vectorY; vectorX = normalize(vectorX); } else { // 3-fold vectorZ += vectorX + vectorY; vectorZ = normalize(vectorZ); } } } else if (axisType >= 4) vectorX = make_real3((real) 0.1f); // x = x - (x.z)z vectorX -= dot(vectorZ, vectorX)*vectorZ; vectorX = normalize(vectorX); real3 vectorY = cross(vectorZ, vectorX); // use identity rotation matrix for unrecognized axis types if (axisType < 0 || axisType > 4) { vectorX.x = 1; vectorX.y = 0; vectorX.z = 0; vectorY.x = 0; vectorY.y = 1; vectorY.z = 0; vectorZ.x = 0; vectorZ.y = 0; vectorZ.z = 1; } // Check the chirality and see whether it needs to be reversed bool reverse = false; if (axisType != 0 && particles.x >= 0 && particles.y >=0 && particles.z >= 0) { real4 posY = posq[particles.y]; real delta[4][3]; delta[0][0] = thisParticlePos.x - posY.x; delta[0][1] = thisParticlePos.y - posY.y; delta[0][2] = thisParticlePos.z - posY.z; delta[1][0] = posZ.x - posY.x; delta[1][1] = posZ.y - posY.y; delta[1][2] = posZ.z - posY.z; delta[2][0] = posX.x - posY.x; delta[2][1] = posX.y - posY.y; delta[2][2] = posX.z - posY.z; delta[3][0] = delta[1][1]*delta[2][2] - delta[1][2]*delta[2][1]; delta[3][1] = delta[2][1]*delta[0][2] - delta[2][2]*delta[0][1]; delta[3][2] = delta[0][1]*delta[1][2] - delta[0][2]*delta[1][1]; real volume = delta[3][0]*delta[0][0] + delta[3][1]*delta[1][0] + delta[3][2]*delta[2][0]; reverse = (volume < 0); } // Transform the dipole unsigned int offset = 3*atom; real molDipole[3]; molDipole[0] = molecularDipoles[offset]; molDipole[1] = molecularDipoles[offset+1]; molDipole[2] = molecularDipoles[offset+2]; if (reverse) molDipole[1] *= -1; labFrameDipoles[offset] = molDipole[0]*vectorX.x + molDipole[1]*vectorY.x + molDipole[2]*vectorZ.x; labFrameDipoles[offset+1] = molDipole[0]*vectorX.y + molDipole[1]*vectorY.y + molDipole[2]*vectorZ.y; labFrameDipoles[offset+2] = molDipole[0]*vectorX.z + molDipole[1]*vectorY.z + molDipole[2]*vectorZ.z; // --------------------------------------------------------------------------------------- // Transform the quadrupole offset = 5*atom; real mPoleXX = molecularQuadrupoles[offset]; real mPoleXY = molecularQuadrupoles[offset+1]; real mPoleXZ = molecularQuadrupoles[offset+2]; real mPoleYY = molecularQuadrupoles[offset+3]; real mPoleYZ = molecularQuadrupoles[offset+4]; real mPoleZZ = -(mPoleXX+mPoleYY); if (reverse) { mPoleXY *= -1; mPoleYZ *= -1; } labFrameQuadrupoles[offset] = vectorX.x*(vectorX.x*mPoleXX + vectorY.x*mPoleXY + vectorZ.x*mPoleXZ) + vectorY.x*(vectorX.x*mPoleXY + vectorY.x*mPoleYY + vectorZ.x*mPoleYZ) + vectorZ.x*(vectorX.x*mPoleXZ + vectorY.x*mPoleYZ + vectorZ.x*mPoleZZ); labFrameQuadrupoles[offset+1] = vectorX.x*(vectorX.y*mPoleXX + vectorY.y*mPoleXY + vectorZ.y*mPoleXZ) + vectorY.x*(vectorX.y*mPoleXY + vectorY.y*mPoleYY + vectorZ.y*mPoleYZ) + vectorZ.x*(vectorX.y*mPoleXZ + vectorY.y*mPoleYZ + vectorZ.y*mPoleZZ); labFrameQuadrupoles[offset+2] = vectorX.x*(vectorX.z*mPoleXX + vectorY.z*mPoleXY + vectorZ.z*mPoleXZ) + vectorY.x*(vectorX.z*mPoleXY + vectorY.z*mPoleYY + vectorZ.z*mPoleYZ) + vectorZ.x*(vectorX.z*mPoleXZ + vectorY.z*mPoleYZ + vectorZ.z*mPoleZZ); labFrameQuadrupoles[offset+3] = vectorX.y*(vectorX.y*mPoleXX + vectorY.y*mPoleXY + vectorZ.y*mPoleXZ) + vectorY.y*(vectorX.y*mPoleXY + vectorY.y*mPoleYY + vectorZ.y*mPoleYZ) + vectorZ.y*(vectorX.y*mPoleXZ + vectorY.y*mPoleYZ + vectorZ.y*mPoleZZ); labFrameQuadrupoles[offset+4] = vectorX.y*(vectorX.z*mPoleXX + vectorY.z*mPoleXY + vectorZ.z*mPoleXZ) + vectorY.y*(vectorX.z*mPoleXY + vectorY.z*mPoleYY + vectorZ.z*mPoleYZ) + vectorZ.y*(vectorX.z*mPoleXZ + vectorY.z*mPoleYZ + vectorZ.z*mPoleZZ); } else { labFrameDipoles[3*atom] = molecularDipoles[3*atom]; labFrameDipoles[3*atom+1] = molecularDipoles[3*atom+1]; labFrameDipoles[3*atom+2] = molecularDipoles[3*atom+2]; labFrameQuadrupoles[5*atom] = molecularQuadrupoles[5*atom]; labFrameQuadrupoles[5*atom+1] = molecularQuadrupoles[5*atom+1]; labFrameQuadrupoles[5*atom+2] = molecularQuadrupoles[5*atom+2]; labFrameQuadrupoles[5*atom+3] = molecularQuadrupoles[5*atom+3]; labFrameQuadrupoles[5*atom+4] = molecularQuadrupoles[5*atom+4]; } } } extern "C" __global__ void recordInducedDipoles(const long long* __restrict__ fieldBuffers, const long long* __restrict__ fieldPolarBuffers, #ifdef USE_GK const long long* __restrict__ gkFieldBuffers, real* __restrict__ inducedDipoleS, real* __restrict__ inducedDipolePolarS, #endif real* __restrict__ inducedDipole, real* __restrict__ inducedDipolePolar, const float* __restrict__ polarizability) { for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += gridDim.x*blockDim.x) { real scale = polarizability[atom]/(real) 0x100000000; inducedDipole[3*atom] = scale*fieldBuffers[atom]; inducedDipole[3*atom+1] = scale*fieldBuffers[atom+PADDED_NUM_ATOMS]; inducedDipole[3*atom+2] = scale*fieldBuffers[atom+PADDED_NUM_ATOMS*2]; inducedDipolePolar[3*atom] = scale*fieldPolarBuffers[atom]; inducedDipolePolar[3*atom+1] = scale*fieldPolarBuffers[atom+PADDED_NUM_ATOMS]; inducedDipolePolar[3*atom+2] = scale*fieldPolarBuffers[atom+PADDED_NUM_ATOMS*2]; #ifdef USE_GK inducedDipoleS[3*atom] = scale*(fieldBuffers[atom]+gkFieldBuffers[atom]); inducedDipoleS[3*atom+1] = scale*(fieldBuffers[atom+PADDED_NUM_ATOMS]+gkFieldBuffers[atom+PADDED_NUM_ATOMS]); inducedDipoleS[3*atom+2] = scale*(fieldBuffers[atom+PADDED_NUM_ATOMS*2]+gkFieldBuffers[atom+PADDED_NUM_ATOMS*2]); inducedDipolePolarS[3*atom] = scale*(fieldPolarBuffers[atom]+gkFieldBuffers[atom]); inducedDipolePolarS[3*atom+1] = scale*(fieldPolarBuffers[atom+PADDED_NUM_ATOMS]+gkFieldBuffers[atom+PADDED_NUM_ATOMS]); inducedDipolePolarS[3*atom+2] = scale*(fieldPolarBuffers[atom+PADDED_NUM_ATOMS*2]+gkFieldBuffers[atom+PADDED_NUM_ATOMS*2]); #endif } } /** * Normalize a vector and return what its magnitude was. */ inline __device__ real normVector(real3& v) { real n = SQRT(dot(v, v)); v *= (n > 0 ? RECIP(n) : 0); return n; } /** * Compute the force on each particle due to the torque. */ extern "C" __global__ void mapTorqueToForce(unsigned long long* __restrict__ forceBuffers, const long long* __restrict__ torqueBuffers, const real4* __restrict__ posq, const int4* __restrict__ multipoleParticles) { const int U = 0; const int V = 1; const int W = 2; const int R = 3; const int S = 4; const int UV = 5; const int UW = 6; const int VW = 7; const int UR = 8; const int US = 9; const int VS = 10; const int WS = 11; const int LastVectorIndex = 12; const int X = 0; const int Y = 1; const int Z = 2; const int I = 3; const real torqueScale = RECIP((double) 0x100000000); real3 forces[4]; real norms[LastVectorIndex]; real3 vector[LastVectorIndex]; real angles[LastVectorIndex][2]; for (int atom = blockIdx.x*blockDim.x + threadIdx.x; atom < NUM_ATOMS; atom += gridDim.x*blockDim.x) { int4 particles = multipoleParticles[atom]; int axisAtom = particles.z; int axisType = particles.w; // NoAxisType if (axisType < 5 && particles.z >= 0) { real3 atomPos = trimTo3(posq[atom]); vector[U] = atomPos - trimTo3(posq[axisAtom]); norms[U] = normVector(vector[U]); if (axisType != 4 && particles.x >= 0) vector[V] = atomPos - trimTo3(posq[particles.x]); else vector[V] = make_real3(0.1f); norms[V] = normVector(vector[V]); // W = UxV if (axisType < 2 || axisType > 3) vector[W] = cross(vector[U], vector[V]); else vector[W] = atomPos - trimTo3(posq[particles.y]); norms[W] = normVector(vector[W]); vector[UV] = cross(vector[V], vector[U]); vector[UW] = cross(vector[W], vector[U]); vector[VW] = cross(vector[W], vector[V]); norms[UV] = normVector(vector[UV]); norms[UW] = normVector(vector[UW]); norms[VW] = normVector(vector[VW]); angles[UV][0] = dot(vector[U], vector[V]); angles[UV][1] = SQRT(1 - angles[UV][0]*angles[UV][0]); angles[UW][0] = dot(vector[U], vector[W]); angles[UW][1] = SQRT(1 - angles[UW][0]*angles[UW][0]); angles[VW][0] = dot(vector[V], vector[W]); angles[VW][1] = SQRT(1 - angles[VW][0]*angles[VW][0]); real dphi[3]; real3 torque = make_real3(torqueScale*torqueBuffers[atom], torqueScale*torqueBuffers[atom+PADDED_NUM_ATOMS], torqueScale*torqueBuffers[atom+PADDED_NUM_ATOMS*2]); dphi[U] = -dot(vector[U], torque); dphi[V] = -dot(vector[V], torque); dphi[W] = -dot(vector[W], torque); // z-then-x and bisector if (axisType == 0 || axisType == 1) { real factor1 = dphi[V]/(norms[U]*angles[UV][1]); real factor2 = dphi[W]/(norms[U]); real factor3 = -dphi[U]/(norms[V]*angles[UV][1]); real factor4 = 0; if (axisType == 1) { factor2 *= 0.5f; factor4 = 0.5f*dphi[W]/(norms[V]); } forces[Z] = vector[UV]*factor1 + factor2*vector[UW]; forces[X] = vector[UV]*factor3 + factor4*vector[VW]; forces[I] = -(forces[X]+forces[Z]); forces[Y] = make_real3(0); } else if (axisType == 2) { // z-bisect vector[R] = vector[V] + vector[W]; vector[S] = cross(vector[U], vector[R]); norms[R] = normVector(vector[R]); norms[S] = normVector(vector[S]); vector[UR] = cross(vector[R], vector[U]); vector[US] = cross(vector[S], vector[U]); vector[VS] = cross(vector[S], vector[V]); vector[WS] = cross(vector[S], vector[W]); norms[UR] = normVector(vector[UR]); norms[US] = normVector(vector[US]); norms[VS] = normVector(vector[VS]); norms[WS] = normVector(vector[WS]); angles[UR][0] = dot(vector[U], vector[R]); angles[UR][1] = SQRT(1 - angles[UR][0]*angles[UR][0]); angles[US][0] = dot(vector[U], vector[S]); angles[US][1] = SQRT(1 - angles[US][0]*angles[US][0]); angles[VS][0] = dot(vector[V], vector[S]); angles[VS][1] = SQRT(1 - angles[VS][0]*angles[VS][0]); angles[WS][0] = dot(vector[W], vector[S]); angles[WS][1] = SQRT(1 - angles[WS][0]*angles[WS][0]); real3 t1 = vector[V] - vector[S]*angles[VS][0]; real3 t2 = vector[W] - vector[S]*angles[WS][0]; normVector(t1); normVector(t2); real ut1cos = dot(vector[U], t1); real ut1sin = SQRT(1 - ut1cos*ut1cos); real ut2cos = dot(vector[U], t2); real ut2sin = SQRT(1 - ut2cos*ut2cos); real dphiR = -dot(vector[R], torque); real dphiS = -dot(vector[S], torque); real factor1 = dphiR/(norms[U]*angles[UR][1]); real factor2 = dphiS/(norms[U]); real factor3 = dphi[U]/(norms[V]*(ut1sin+ut2sin)); real factor4 = dphi[U]/(norms[W]*(ut1sin+ut2sin)); forces[Z] = vector[UR]*factor1 + factor2*vector[US]; forces[X] = (angles[VS][1]*vector[S] - angles[VS][0]*t1)*factor3; forces[Y] = (angles[WS][1]*vector[S] - angles[WS][0]*t2)*factor4; forces[I] = -(forces[X] + forces[Y] + forces[Z]); } else if (axisType == 3) { // 3-fold forces[Z] = (vector[UW]*dphi[W]/(norms[U]*angles[UW][1]) + vector[UV]*dphi[V]/(norms[U]*angles[UV][1]) - vector[UW]*dphi[U]/(norms[U]*angles[UW][1]) - vector[UV]*dphi[U]/(norms[U]*angles[UV][1]))/3; forces[X] = (vector[VW]*dphi[W]/(norms[V]*angles[VW][1]) - vector[UV]*dphi[U]/(norms[V]*angles[UV][1]) - vector[VW]*dphi[V]/(norms[V]*angles[VW][1]) + vector[UV]*dphi[V]/(norms[V]*angles[UV][1]))/3; forces[Y] = (-vector[UW]*dphi[U]/(norms[W]*angles[UW][1]) - vector[VW]*dphi[V]/(norms[W]*angles[VW][1]) + vector[UW]*dphi[W]/(norms[W]*angles[UW][1]) + vector[VW]*dphi[W]/(norms[W]*angles[VW][1]))/3; forces[I] = -(forces[X] + forces[Y] + forces[Z]); } else if (axisType == 4) { // z-only forces[Z] = vector[UV]*dphi[V]/(norms[U]*angles[UV][1]); forces[X] = make_real3(0); forces[Y] = make_real3(0); forces[I] = -forces[Z]; } else { forces[Z] = make_real3(0); forces[X] = make_real3(0); forces[Y] = make_real3(0); forces[I] = make_real3(0); } // Store results atomicAdd(&forceBuffers[particles.z], static_cast<unsigned long long>((long long) (forces[Z].x*0x100000000))); atomicAdd(&forceBuffers[particles.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[Z].y*0x100000000))); atomicAdd(&forceBuffers[particles.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[Z].z*0x100000000))); if (axisType != 4) { atomicAdd(&forceBuffers[particles.x], static_cast<unsigned long long>((long long) (forces[X].x*0x100000000))); atomicAdd(&forceBuffers[particles.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[X].y*0x100000000))); atomicAdd(&forceBuffers[particles.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[X].z*0x100000000))); } if ((axisType == 2 || axisType == 3) && particles.y > -1) { atomicAdd(&forceBuffers[particles.y], static_cast<unsigned long long>((long long) (forces[Y].x*0x100000000))); atomicAdd(&forceBuffers[particles.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[Y].y*0x100000000))); atomicAdd(&forceBuffers[particles.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[Y].z*0x100000000))); } atomicAdd(&forceBuffers[atom], static_cast<unsigned long long>((long long) (forces[I].x*0x100000000))); atomicAdd(&forceBuffers[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[I].y*0x100000000))); atomicAdd(&forceBuffers[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[I].z*0x100000000))); } } } /** * Compute the electrostatic potential at each of a set of points. */ extern "C" __global__ void computePotentialAtPoints(const real4* __restrict__ posq, const real* __restrict__ labFrameDipole, const real* __restrict__ labFrameQuadrupole, const real* __restrict__ inducedDipole, const real4* __restrict__ points, real* __restrict__ potential, int numPoints, real4 periodicBoxSize, real4 invPeriodicBoxSize) { extern __shared__ real4 localPosq[]; real3* localDipole = (real3*) &localPosq[blockDim.x]; real3* localInducedDipole = (real3*) &localDipole[blockDim.x]; real* localQuadrupole = (real*) &localInducedDipole[blockDim.x]; for (int basePoint = blockIdx.x*blockDim.x; basePoint < numPoints; basePoint += gridDim.x*blockDim.x) { int point = basePoint+threadIdx.x; real4 pointPos = points[point]; real p = 0; for (int baseAtom = 0; baseAtom < NUM_ATOMS; baseAtom += blockDim.x) { int atom = baseAtom+threadIdx.x; // Load data into shared memory. if (atom < NUM_ATOMS) { localPosq[threadIdx.x] = posq[atom]; localDipole[threadIdx.x] = make_real3(labFrameDipole[3*atom], labFrameDipole[3*atom+1], labFrameDipole[3*atom+2]); localInducedDipole[threadIdx.x] = make_real3(inducedDipole[3*atom], inducedDipole[3*atom+1], inducedDipole[3*atom+2]); localQuadrupole[5*threadIdx.x] = labFrameQuadrupole[5*atom]; localQuadrupole[5*threadIdx.x+1] = labFrameQuadrupole[5*atom+1]; localQuadrupole[5*threadIdx.x+2] = labFrameQuadrupole[5*atom+2]; localQuadrupole[5*threadIdx.x+3] = labFrameQuadrupole[5*atom+3]; localQuadrupole[5*threadIdx.x+4] = labFrameQuadrupole[5*atom+4]; } __syncthreads(); // Loop over atoms and compute the potential at this point. if (point < numPoints) { int end = min(blockDim.x, NUM_ATOMS-baseAtom); for (int i = 0; i < end; i++) { real3 delta = trimTo3(localPosq[i]-pointPos); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = dot(delta, delta); real rInv = RSQRT(r2); p += localPosq[i].w*rInv; real rr2 = rInv*rInv; real rr3 = rInv*rr2; real scd = dot(localDipole[i], delta); real scu = dot(localInducedDipole[i], delta); p -= (scd+scu)*rr3; real rr5 = 3*rr3*rr2; real scq = delta.x*dot(delta, make_real3(localQuadrupole[5*i+0], localQuadrupole[5*i+1], localQuadrupole[5*i+2])) + delta.y*dot(delta, make_real3(localQuadrupole[5*i+1], localQuadrupole[5*i+3], localQuadrupole[5*i+4])) + delta.z*dot(delta, make_real3(localQuadrupole[5*i+2], localQuadrupole[5*i+4], -localQuadrupole[5*i]-localQuadrupole[5*i+3])); p += scq*rr5; } } __syncthreads(); } potential[point] = p*ENERGY_SCALE_FACTOR; } }
f263c3945d92746e5a237462d4df2a859e708cd8.cu
extern "C" __global__ void computeLabFrameMoments(real4* __restrict__ posq, int4* __restrict__ multipoleParticles, float* __restrict__ molecularDipoles, float* __restrict__ molecularQuadrupoles, real* __restrict__ labFrameDipoles, real* __restrict__ labFrameQuadrupoles) { // get coordinates of this atom and the z & x axis atoms // compute the vector between the atoms and 1/sqrt(d2), d2 is distance between // this atom and the axis atom // this atom is referred to as the k-atom in notes below // code common to ZThenX and Bisector for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += gridDim.x*blockDim.x) { int4 particles = multipoleParticles[atom]; if (particles.x >= 0 && particles.z >= 0) { real4 thisParticlePos = posq[atom]; real4 posZ = posq[particles.z]; real3 vectorZ = make_real3(posZ.x-thisParticlePos.x, posZ.y-thisParticlePos.y, posZ.z-thisParticlePos.z); real4 posX = posq[particles.x]; real3 vectorX = make_real3(posX.x-thisParticlePos.x, posX.y-thisParticlePos.y, posX.z-thisParticlePos.z); int axisType = particles.w; /* z-only (1) norm z (2) select random x (3) x = x - (x.z)z (4) norm x z-then-x (1) norm z (2) norm x (not needed) (3) x = x - (x.z)z (4) norm x bisector (1) norm z (2) norm x (3) z = x + z (4) norm z (5) x = x - (x.z)z (6) norm x z-bisect (1) norm z (2) norm x (3) norm y (3) x = x + y (4) norm x (5) x = x - (x.z)z (6) norm x 3-fold (1) norm z (2) norm x (3) norm y (4) z = x + y + z (5) norm z (6) x = x - (x.z)z (7) norm x */ // branch based on axis type vectorZ = normalize(vectorZ); if (axisType == 1) { // bisector vectorX = normalize(vectorX); vectorZ += vectorX; vectorZ = normalize(vectorZ); } else if (axisType == 2 || axisType == 3) { // z-bisect if (particles.y >= 0 && particles.y < NUM_ATOMS) { real4 posY = posq[particles.y]; real3 vectorY = make_real3(posY.x-thisParticlePos.x, posY.y-thisParticlePos.y, posY.z-thisParticlePos.z); vectorY = normalize(vectorY); vectorX = normalize(vectorX); if (axisType == 2) { vectorX += vectorY; vectorX = normalize(vectorX); } else { // 3-fold vectorZ += vectorX + vectorY; vectorZ = normalize(vectorZ); } } } else if (axisType >= 4) vectorX = make_real3((real) 0.1f); // x = x - (x.z)z vectorX -= dot(vectorZ, vectorX)*vectorZ; vectorX = normalize(vectorX); real3 vectorY = cross(vectorZ, vectorX); // use identity rotation matrix for unrecognized axis types if (axisType < 0 || axisType > 4) { vectorX.x = 1; vectorX.y = 0; vectorX.z = 0; vectorY.x = 0; vectorY.y = 1; vectorY.z = 0; vectorZ.x = 0; vectorZ.y = 0; vectorZ.z = 1; } // Check the chirality and see whether it needs to be reversed bool reverse = false; if (axisType != 0 && particles.x >= 0 && particles.y >=0 && particles.z >= 0) { real4 posY = posq[particles.y]; real delta[4][3]; delta[0][0] = thisParticlePos.x - posY.x; delta[0][1] = thisParticlePos.y - posY.y; delta[0][2] = thisParticlePos.z - posY.z; delta[1][0] = posZ.x - posY.x; delta[1][1] = posZ.y - posY.y; delta[1][2] = posZ.z - posY.z; delta[2][0] = posX.x - posY.x; delta[2][1] = posX.y - posY.y; delta[2][2] = posX.z - posY.z; delta[3][0] = delta[1][1]*delta[2][2] - delta[1][2]*delta[2][1]; delta[3][1] = delta[2][1]*delta[0][2] - delta[2][2]*delta[0][1]; delta[3][2] = delta[0][1]*delta[1][2] - delta[0][2]*delta[1][1]; real volume = delta[3][0]*delta[0][0] + delta[3][1]*delta[1][0] + delta[3][2]*delta[2][0]; reverse = (volume < 0); } // Transform the dipole unsigned int offset = 3*atom; real molDipole[3]; molDipole[0] = molecularDipoles[offset]; molDipole[1] = molecularDipoles[offset+1]; molDipole[2] = molecularDipoles[offset+2]; if (reverse) molDipole[1] *= -1; labFrameDipoles[offset] = molDipole[0]*vectorX.x + molDipole[1]*vectorY.x + molDipole[2]*vectorZ.x; labFrameDipoles[offset+1] = molDipole[0]*vectorX.y + molDipole[1]*vectorY.y + molDipole[2]*vectorZ.y; labFrameDipoles[offset+2] = molDipole[0]*vectorX.z + molDipole[1]*vectorY.z + molDipole[2]*vectorZ.z; // --------------------------------------------------------------------------------------- // Transform the quadrupole offset = 5*atom; real mPoleXX = molecularQuadrupoles[offset]; real mPoleXY = molecularQuadrupoles[offset+1]; real mPoleXZ = molecularQuadrupoles[offset+2]; real mPoleYY = molecularQuadrupoles[offset+3]; real mPoleYZ = molecularQuadrupoles[offset+4]; real mPoleZZ = -(mPoleXX+mPoleYY); if (reverse) { mPoleXY *= -1; mPoleYZ *= -1; } labFrameQuadrupoles[offset] = vectorX.x*(vectorX.x*mPoleXX + vectorY.x*mPoleXY + vectorZ.x*mPoleXZ) + vectorY.x*(vectorX.x*mPoleXY + vectorY.x*mPoleYY + vectorZ.x*mPoleYZ) + vectorZ.x*(vectorX.x*mPoleXZ + vectorY.x*mPoleYZ + vectorZ.x*mPoleZZ); labFrameQuadrupoles[offset+1] = vectorX.x*(vectorX.y*mPoleXX + vectorY.y*mPoleXY + vectorZ.y*mPoleXZ) + vectorY.x*(vectorX.y*mPoleXY + vectorY.y*mPoleYY + vectorZ.y*mPoleYZ) + vectorZ.x*(vectorX.y*mPoleXZ + vectorY.y*mPoleYZ + vectorZ.y*mPoleZZ); labFrameQuadrupoles[offset+2] = vectorX.x*(vectorX.z*mPoleXX + vectorY.z*mPoleXY + vectorZ.z*mPoleXZ) + vectorY.x*(vectorX.z*mPoleXY + vectorY.z*mPoleYY + vectorZ.z*mPoleYZ) + vectorZ.x*(vectorX.z*mPoleXZ + vectorY.z*mPoleYZ + vectorZ.z*mPoleZZ); labFrameQuadrupoles[offset+3] = vectorX.y*(vectorX.y*mPoleXX + vectorY.y*mPoleXY + vectorZ.y*mPoleXZ) + vectorY.y*(vectorX.y*mPoleXY + vectorY.y*mPoleYY + vectorZ.y*mPoleYZ) + vectorZ.y*(vectorX.y*mPoleXZ + vectorY.y*mPoleYZ + vectorZ.y*mPoleZZ); labFrameQuadrupoles[offset+4] = vectorX.y*(vectorX.z*mPoleXX + vectorY.z*mPoleXY + vectorZ.z*mPoleXZ) + vectorY.y*(vectorX.z*mPoleXY + vectorY.z*mPoleYY + vectorZ.z*mPoleYZ) + vectorZ.y*(vectorX.z*mPoleXZ + vectorY.z*mPoleYZ + vectorZ.z*mPoleZZ); } else { labFrameDipoles[3*atom] = molecularDipoles[3*atom]; labFrameDipoles[3*atom+1] = molecularDipoles[3*atom+1]; labFrameDipoles[3*atom+2] = molecularDipoles[3*atom+2]; labFrameQuadrupoles[5*atom] = molecularQuadrupoles[5*atom]; labFrameQuadrupoles[5*atom+1] = molecularQuadrupoles[5*atom+1]; labFrameQuadrupoles[5*atom+2] = molecularQuadrupoles[5*atom+2]; labFrameQuadrupoles[5*atom+3] = molecularQuadrupoles[5*atom+3]; labFrameQuadrupoles[5*atom+4] = molecularQuadrupoles[5*atom+4]; } } } extern "C" __global__ void recordInducedDipoles(const long long* __restrict__ fieldBuffers, const long long* __restrict__ fieldPolarBuffers, #ifdef USE_GK const long long* __restrict__ gkFieldBuffers, real* __restrict__ inducedDipoleS, real* __restrict__ inducedDipolePolarS, #endif real* __restrict__ inducedDipole, real* __restrict__ inducedDipolePolar, const float* __restrict__ polarizability) { for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += gridDim.x*blockDim.x) { real scale = polarizability[atom]/(real) 0x100000000; inducedDipole[3*atom] = scale*fieldBuffers[atom]; inducedDipole[3*atom+1] = scale*fieldBuffers[atom+PADDED_NUM_ATOMS]; inducedDipole[3*atom+2] = scale*fieldBuffers[atom+PADDED_NUM_ATOMS*2]; inducedDipolePolar[3*atom] = scale*fieldPolarBuffers[atom]; inducedDipolePolar[3*atom+1] = scale*fieldPolarBuffers[atom+PADDED_NUM_ATOMS]; inducedDipolePolar[3*atom+2] = scale*fieldPolarBuffers[atom+PADDED_NUM_ATOMS*2]; #ifdef USE_GK inducedDipoleS[3*atom] = scale*(fieldBuffers[atom]+gkFieldBuffers[atom]); inducedDipoleS[3*atom+1] = scale*(fieldBuffers[atom+PADDED_NUM_ATOMS]+gkFieldBuffers[atom+PADDED_NUM_ATOMS]); inducedDipoleS[3*atom+2] = scale*(fieldBuffers[atom+PADDED_NUM_ATOMS*2]+gkFieldBuffers[atom+PADDED_NUM_ATOMS*2]); inducedDipolePolarS[3*atom] = scale*(fieldPolarBuffers[atom]+gkFieldBuffers[atom]); inducedDipolePolarS[3*atom+1] = scale*(fieldPolarBuffers[atom+PADDED_NUM_ATOMS]+gkFieldBuffers[atom+PADDED_NUM_ATOMS]); inducedDipolePolarS[3*atom+2] = scale*(fieldPolarBuffers[atom+PADDED_NUM_ATOMS*2]+gkFieldBuffers[atom+PADDED_NUM_ATOMS*2]); #endif } } /** * Normalize a vector and return what its magnitude was. */ inline __device__ real normVector(real3& v) { real n = SQRT(dot(v, v)); v *= (n > 0 ? RECIP(n) : 0); return n; } /** * Compute the force on each particle due to the torque. */ extern "C" __global__ void mapTorqueToForce(unsigned long long* __restrict__ forceBuffers, const long long* __restrict__ torqueBuffers, const real4* __restrict__ posq, const int4* __restrict__ multipoleParticles) { const int U = 0; const int V = 1; const int W = 2; const int R = 3; const int S = 4; const int UV = 5; const int UW = 6; const int VW = 7; const int UR = 8; const int US = 9; const int VS = 10; const int WS = 11; const int LastVectorIndex = 12; const int X = 0; const int Y = 1; const int Z = 2; const int I = 3; const real torqueScale = RECIP((double) 0x100000000); real3 forces[4]; real norms[LastVectorIndex]; real3 vector[LastVectorIndex]; real angles[LastVectorIndex][2]; for (int atom = blockIdx.x*blockDim.x + threadIdx.x; atom < NUM_ATOMS; atom += gridDim.x*blockDim.x) { int4 particles = multipoleParticles[atom]; int axisAtom = particles.z; int axisType = particles.w; // NoAxisType if (axisType < 5 && particles.z >= 0) { real3 atomPos = trimTo3(posq[atom]); vector[U] = atomPos - trimTo3(posq[axisAtom]); norms[U] = normVector(vector[U]); if (axisType != 4 && particles.x >= 0) vector[V] = atomPos - trimTo3(posq[particles.x]); else vector[V] = make_real3(0.1f); norms[V] = normVector(vector[V]); // W = UxV if (axisType < 2 || axisType > 3) vector[W] = cross(vector[U], vector[V]); else vector[W] = atomPos - trimTo3(posq[particles.y]); norms[W] = normVector(vector[W]); vector[UV] = cross(vector[V], vector[U]); vector[UW] = cross(vector[W], vector[U]); vector[VW] = cross(vector[W], vector[V]); norms[UV] = normVector(vector[UV]); norms[UW] = normVector(vector[UW]); norms[VW] = normVector(vector[VW]); angles[UV][0] = dot(vector[U], vector[V]); angles[UV][1] = SQRT(1 - angles[UV][0]*angles[UV][0]); angles[UW][0] = dot(vector[U], vector[W]); angles[UW][1] = SQRT(1 - angles[UW][0]*angles[UW][0]); angles[VW][0] = dot(vector[V], vector[W]); angles[VW][1] = SQRT(1 - angles[VW][0]*angles[VW][0]); real dphi[3]; real3 torque = make_real3(torqueScale*torqueBuffers[atom], torqueScale*torqueBuffers[atom+PADDED_NUM_ATOMS], torqueScale*torqueBuffers[atom+PADDED_NUM_ATOMS*2]); dphi[U] = -dot(vector[U], torque); dphi[V] = -dot(vector[V], torque); dphi[W] = -dot(vector[W], torque); // z-then-x and bisector if (axisType == 0 || axisType == 1) { real factor1 = dphi[V]/(norms[U]*angles[UV][1]); real factor2 = dphi[W]/(norms[U]); real factor3 = -dphi[U]/(norms[V]*angles[UV][1]); real factor4 = 0; if (axisType == 1) { factor2 *= 0.5f; factor4 = 0.5f*dphi[W]/(norms[V]); } forces[Z] = vector[UV]*factor1 + factor2*vector[UW]; forces[X] = vector[UV]*factor3 + factor4*vector[VW]; forces[I] = -(forces[X]+forces[Z]); forces[Y] = make_real3(0); } else if (axisType == 2) { // z-bisect vector[R] = vector[V] + vector[W]; vector[S] = cross(vector[U], vector[R]); norms[R] = normVector(vector[R]); norms[S] = normVector(vector[S]); vector[UR] = cross(vector[R], vector[U]); vector[US] = cross(vector[S], vector[U]); vector[VS] = cross(vector[S], vector[V]); vector[WS] = cross(vector[S], vector[W]); norms[UR] = normVector(vector[UR]); norms[US] = normVector(vector[US]); norms[VS] = normVector(vector[VS]); norms[WS] = normVector(vector[WS]); angles[UR][0] = dot(vector[U], vector[R]); angles[UR][1] = SQRT(1 - angles[UR][0]*angles[UR][0]); angles[US][0] = dot(vector[U], vector[S]); angles[US][1] = SQRT(1 - angles[US][0]*angles[US][0]); angles[VS][0] = dot(vector[V], vector[S]); angles[VS][1] = SQRT(1 - angles[VS][0]*angles[VS][0]); angles[WS][0] = dot(vector[W], vector[S]); angles[WS][1] = SQRT(1 - angles[WS][0]*angles[WS][0]); real3 t1 = vector[V] - vector[S]*angles[VS][0]; real3 t2 = vector[W] - vector[S]*angles[WS][0]; normVector(t1); normVector(t2); real ut1cos = dot(vector[U], t1); real ut1sin = SQRT(1 - ut1cos*ut1cos); real ut2cos = dot(vector[U], t2); real ut2sin = SQRT(1 - ut2cos*ut2cos); real dphiR = -dot(vector[R], torque); real dphiS = -dot(vector[S], torque); real factor1 = dphiR/(norms[U]*angles[UR][1]); real factor2 = dphiS/(norms[U]); real factor3 = dphi[U]/(norms[V]*(ut1sin+ut2sin)); real factor4 = dphi[U]/(norms[W]*(ut1sin+ut2sin)); forces[Z] = vector[UR]*factor1 + factor2*vector[US]; forces[X] = (angles[VS][1]*vector[S] - angles[VS][0]*t1)*factor3; forces[Y] = (angles[WS][1]*vector[S] - angles[WS][0]*t2)*factor4; forces[I] = -(forces[X] + forces[Y] + forces[Z]); } else if (axisType == 3) { // 3-fold forces[Z] = (vector[UW]*dphi[W]/(norms[U]*angles[UW][1]) + vector[UV]*dphi[V]/(norms[U]*angles[UV][1]) - vector[UW]*dphi[U]/(norms[U]*angles[UW][1]) - vector[UV]*dphi[U]/(norms[U]*angles[UV][1]))/3; forces[X] = (vector[VW]*dphi[W]/(norms[V]*angles[VW][1]) - vector[UV]*dphi[U]/(norms[V]*angles[UV][1]) - vector[VW]*dphi[V]/(norms[V]*angles[VW][1]) + vector[UV]*dphi[V]/(norms[V]*angles[UV][1]))/3; forces[Y] = (-vector[UW]*dphi[U]/(norms[W]*angles[UW][1]) - vector[VW]*dphi[V]/(norms[W]*angles[VW][1]) + vector[UW]*dphi[W]/(norms[W]*angles[UW][1]) + vector[VW]*dphi[W]/(norms[W]*angles[VW][1]))/3; forces[I] = -(forces[X] + forces[Y] + forces[Z]); } else if (axisType == 4) { // z-only forces[Z] = vector[UV]*dphi[V]/(norms[U]*angles[UV][1]); forces[X] = make_real3(0); forces[Y] = make_real3(0); forces[I] = -forces[Z]; } else { forces[Z] = make_real3(0); forces[X] = make_real3(0); forces[Y] = make_real3(0); forces[I] = make_real3(0); } // Store results atomicAdd(&forceBuffers[particles.z], static_cast<unsigned long long>((long long) (forces[Z].x*0x100000000))); atomicAdd(&forceBuffers[particles.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[Z].y*0x100000000))); atomicAdd(&forceBuffers[particles.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[Z].z*0x100000000))); if (axisType != 4) { atomicAdd(&forceBuffers[particles.x], static_cast<unsigned long long>((long long) (forces[X].x*0x100000000))); atomicAdd(&forceBuffers[particles.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[X].y*0x100000000))); atomicAdd(&forceBuffers[particles.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[X].z*0x100000000))); } if ((axisType == 2 || axisType == 3) && particles.y > -1) { atomicAdd(&forceBuffers[particles.y], static_cast<unsigned long long>((long long) (forces[Y].x*0x100000000))); atomicAdd(&forceBuffers[particles.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[Y].y*0x100000000))); atomicAdd(&forceBuffers[particles.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[Y].z*0x100000000))); } atomicAdd(&forceBuffers[atom], static_cast<unsigned long long>((long long) (forces[I].x*0x100000000))); atomicAdd(&forceBuffers[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[I].y*0x100000000))); atomicAdd(&forceBuffers[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forces[I].z*0x100000000))); } } } /** * Compute the electrostatic potential at each of a set of points. */ extern "C" __global__ void computePotentialAtPoints(const real4* __restrict__ posq, const real* __restrict__ labFrameDipole, const real* __restrict__ labFrameQuadrupole, const real* __restrict__ inducedDipole, const real4* __restrict__ points, real* __restrict__ potential, int numPoints, real4 periodicBoxSize, real4 invPeriodicBoxSize) { extern __shared__ real4 localPosq[]; real3* localDipole = (real3*) &localPosq[blockDim.x]; real3* localInducedDipole = (real3*) &localDipole[blockDim.x]; real* localQuadrupole = (real*) &localInducedDipole[blockDim.x]; for (int basePoint = blockIdx.x*blockDim.x; basePoint < numPoints; basePoint += gridDim.x*blockDim.x) { int point = basePoint+threadIdx.x; real4 pointPos = points[point]; real p = 0; for (int baseAtom = 0; baseAtom < NUM_ATOMS; baseAtom += blockDim.x) { int atom = baseAtom+threadIdx.x; // Load data into shared memory. if (atom < NUM_ATOMS) { localPosq[threadIdx.x] = posq[atom]; localDipole[threadIdx.x] = make_real3(labFrameDipole[3*atom], labFrameDipole[3*atom+1], labFrameDipole[3*atom+2]); localInducedDipole[threadIdx.x] = make_real3(inducedDipole[3*atom], inducedDipole[3*atom+1], inducedDipole[3*atom+2]); localQuadrupole[5*threadIdx.x] = labFrameQuadrupole[5*atom]; localQuadrupole[5*threadIdx.x+1] = labFrameQuadrupole[5*atom+1]; localQuadrupole[5*threadIdx.x+2] = labFrameQuadrupole[5*atom+2]; localQuadrupole[5*threadIdx.x+3] = labFrameQuadrupole[5*atom+3]; localQuadrupole[5*threadIdx.x+4] = labFrameQuadrupole[5*atom+4]; } __syncthreads(); // Loop over atoms and compute the potential at this point. if (point < numPoints) { int end = min(blockDim.x, NUM_ATOMS-baseAtom); for (int i = 0; i < end; i++) { real3 delta = trimTo3(localPosq[i]-pointPos); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = dot(delta, delta); real rInv = RSQRT(r2); p += localPosq[i].w*rInv; real rr2 = rInv*rInv; real rr3 = rInv*rr2; real scd = dot(localDipole[i], delta); real scu = dot(localInducedDipole[i], delta); p -= (scd+scu)*rr3; real rr5 = 3*rr3*rr2; real scq = delta.x*dot(delta, make_real3(localQuadrupole[5*i+0], localQuadrupole[5*i+1], localQuadrupole[5*i+2])) + delta.y*dot(delta, make_real3(localQuadrupole[5*i+1], localQuadrupole[5*i+3], localQuadrupole[5*i+4])) + delta.z*dot(delta, make_real3(localQuadrupole[5*i+2], localQuadrupole[5*i+4], -localQuadrupole[5*i]-localQuadrupole[5*i+3])); p += scq*rr5; } } __syncthreads(); } potential[point] = p*ENERGY_SCALE_FACTOR; } }
709188fbe80f5d786ab73e0f5059df0aad3c75c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <malloc.h> #include <assert.h> #include <sys/time.h> #define xMin 0.74395 #define xMax 0.74973 #define yMin 0.11321 #define yMax 0.11899 static void WriteBMP(int x, int y, unsigned char *bmp, const char * name) { const unsigned char bmphdr[54] = {66, 77, 255, 255, 255, 255, 0, 0, 0, 0, 54, 4, 0, 0, 40, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 1, 0, 8, 0, 0, 0, 0, 0, 255, 255, 255, 255, 196, 14, 0, 0, 196, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; unsigned char hdr[1078]; int i, j, c, xcorr, diff; FILE *f; xcorr = (x+3) >> 2 << 2; // BMPs have to be a multiple of 4 pixels wide. diff = xcorr - x; for (i = 0; i < 54; i++) hdr[i] = bmphdr[i]; *((int*)(&hdr[18])) = xcorr; *((int*)(&hdr[22])) = y; *((int*)(&hdr[34])) = xcorr*y; *((int*)(&hdr[2])) = xcorr*y + 1078; for (i = 0; i < 256; i++) { j = i*4 + 54; hdr[j+0] = i; // blue hdr[j+1] = i; // green hdr[j+2] = i; // red hdr[j+3] = 0; // dummy } f = fopen(name, "wb"); assert(f != NULL); c = fwrite(hdr, 1, 1078, f); assert(c == 1078); if (diff == 0) { c = fwrite(bmp, 1, x*y, f); assert(c == x*y); } else { *((int*)(&hdr[0])) = 0; // need up to three zero bytes for (j = 0; j < y; j++) { c = fwrite(&bmp[j * x], 1, x, f); assert(c == x); c = fwrite(hdr, 1, diff, f); assert(c == diff); } } fclose(f); } __global__ void fractal(unsigned char *d_cnt, int width, int maxdepth, double dx, double dy) { double x2, y2, x, y, cx, cy; int depth; int index = threadIdx.x + blockIdx.x * blockDim.x; int row, col; if (index < (width * width)) { /* compute fractal */ row = (index / width); //compute row # col = index % width; //compute column # cy = yMin + row * dy; //compute row # cx = xMin + col * dx; //compute column # x = -cx; y = -cy; depth = maxdepth; do { x2 = x * x; y2 = y * y; y = 2 * x * y - cy; x = x2 - y2 - cx; depth--; } while ((depth > 0) && ((x2 + y2) <= 5.0)); d_cnt[index] = depth & 255; } } int main(int argc, char *argv[]) { double dx, dy; int width, maxdepth; unsigned char *cnt; struct timeval start, end; #define THREADS 512 /* check command line */ if(argc != 3) {fprintf(stderr, "usage: exe <width> <depth>\n"); exit(-1);} width = atoi(argv[1]); if (width < 10) {fprintf(stderr, "edge_length must be at least 10\n"); exit(-1);} maxdepth = atoi(argv[2]); if (maxdepth < 10) {fprintf(stderr, "max_depth must be at least 10\n"); exit(-1);} unsigned char *d_cnt; dx = (xMax - xMin) / width; dy = (yMax - yMin) / width; printf("computing %d by %d fractal with a maximum depth of %d\n", width, width, maxdepth); hipHostMalloc((void**)&cnt, (width * width * sizeof(unsigned char)), hipHostMallocDefault); /* allocate space on GPU */ hipMalloc((void**)&d_cnt, width * width * sizeof(unsigned char)); gettimeofday(&start, NULL); hipLaunchKernelGGL(( fractal), dim3(((width * width + THREADS-1) / THREADS)), dim3(THREADS), 0, 0, d_cnt, width, maxdepth, dx, dy); hipDeviceSynchronize(); /* end time */ gettimeofday(&end, NULL); printf("compute time: %.8f s\n", end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0); hipMemcpyAsync(cnt, d_cnt, width * width * sizeof(unsigned char), hipMemcpyDeviceToHost); /* verify result by writing it to a file */ if (width <= 1024) { WriteBMP(width, width, cnt, "fractal.bmp"); } hipHostFree(cnt); hipFree(d_cnt); return 0; }
709188fbe80f5d786ab73e0f5059df0aad3c75c8.cu
#include <stdlib.h> #include <stdio.h> #include <malloc.h> #include <assert.h> #include <sys/time.h> #define xMin 0.74395 #define xMax 0.74973 #define yMin 0.11321 #define yMax 0.11899 static void WriteBMP(int x, int y, unsigned char *bmp, const char * name) { const unsigned char bmphdr[54] = {66, 77, 255, 255, 255, 255, 0, 0, 0, 0, 54, 4, 0, 0, 40, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 1, 0, 8, 0, 0, 0, 0, 0, 255, 255, 255, 255, 196, 14, 0, 0, 196, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; unsigned char hdr[1078]; int i, j, c, xcorr, diff; FILE *f; xcorr = (x+3) >> 2 << 2; // BMPs have to be a multiple of 4 pixels wide. diff = xcorr - x; for (i = 0; i < 54; i++) hdr[i] = bmphdr[i]; *((int*)(&hdr[18])) = xcorr; *((int*)(&hdr[22])) = y; *((int*)(&hdr[34])) = xcorr*y; *((int*)(&hdr[2])) = xcorr*y + 1078; for (i = 0; i < 256; i++) { j = i*4 + 54; hdr[j+0] = i; // blue hdr[j+1] = i; // green hdr[j+2] = i; // red hdr[j+3] = 0; // dummy } f = fopen(name, "wb"); assert(f != NULL); c = fwrite(hdr, 1, 1078, f); assert(c == 1078); if (diff == 0) { c = fwrite(bmp, 1, x*y, f); assert(c == x*y); } else { *((int*)(&hdr[0])) = 0; // need up to three zero bytes for (j = 0; j < y; j++) { c = fwrite(&bmp[j * x], 1, x, f); assert(c == x); c = fwrite(hdr, 1, diff, f); assert(c == diff); } } fclose(f); } __global__ void fractal(unsigned char *d_cnt, int width, int maxdepth, double dx, double dy) { double x2, y2, x, y, cx, cy; int depth; int index = threadIdx.x + blockIdx.x * blockDim.x; int row, col; if (index < (width * width)) { /* compute fractal */ row = (index / width); //compute row # col = index % width; //compute column # cy = yMin + row * dy; //compute row # cx = xMin + col * dx; //compute column # x = -cx; y = -cy; depth = maxdepth; do { x2 = x * x; y2 = y * y; y = 2 * x * y - cy; x = x2 - y2 - cx; depth--; } while ((depth > 0) && ((x2 + y2) <= 5.0)); d_cnt[index] = depth & 255; } } int main(int argc, char *argv[]) { double dx, dy; int width, maxdepth; unsigned char *cnt; struct timeval start, end; #define THREADS 512 /* check command line */ if(argc != 3) {fprintf(stderr, "usage: exe <width> <depth>\n"); exit(-1);} width = atoi(argv[1]); if (width < 10) {fprintf(stderr, "edge_length must be at least 10\n"); exit(-1);} maxdepth = atoi(argv[2]); if (maxdepth < 10) {fprintf(stderr, "max_depth must be at least 10\n"); exit(-1);} unsigned char *d_cnt; dx = (xMax - xMin) / width; dy = (yMax - yMin) / width; printf("computing %d by %d fractal with a maximum depth of %d\n", width, width, maxdepth); cudaHostAlloc((void**)&cnt, (width * width * sizeof(unsigned char)), cudaHostAllocDefault); /* allocate space on GPU */ cudaMalloc((void**)&d_cnt, width * width * sizeof(unsigned char)); gettimeofday(&start, NULL); fractal<<<((width * width + THREADS-1) / THREADS), THREADS>>>( d_cnt, width, maxdepth, dx, dy); cudaDeviceSynchronize(); /* end time */ gettimeofday(&end, NULL); printf("compute time: %.8f s\n", end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0); cudaMemcpyAsync(cnt, d_cnt, width * width * sizeof(unsigned char), cudaMemcpyDeviceToHost); /* verify result by writing it to a file */ if (width <= 1024) { WriteBMP(width, width, cnt, "fractal.bmp"); } cudaFreeHost(cnt); cudaFree(d_cnt); return 0; }
566989e3b5f780aceb48d3219eaf6c2fd9a592dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: dnlebard #include "HarmonicDihedralForceGPU.cuh" #include "TextureTools.h" #include <assert.h> #ifdef SINGLE_PRECISION #define __scalar2int_rn __float2int_rn #else #define __scalar2int_rn __double2int_rn #endif // SMALL a relatively small number #define SMALL Scalar(0.001) /*! \file HarmonicDihedralForceGPU.cu \brief Defines GPU kernel code for calculating the harmonic dihedral forces. Used by HarmonicDihedralForceComputeGPU. */ //! Texture for reading dihedral parameters scalar4_tex_t dihedral_params_tex; //! Kernel for caculating harmonic dihedral forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the device \param d_params Parameters for the angle force \param box Box dimensions for periodic boundary condition handling \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom */ extern "C" __global__ void gpu_compute_harmonic_dihedral_forces_kernel(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_params, BoxDim box, const group_storage<4> *tlist, const unsigned int *dihedral_ABCD, const unsigned int pitch, const unsigned int *n_dihedrals_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_dihedrals = n_dihedrals_list[idx]; // read in the position of our b-particle from the a-b-c-d set. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c-d quartet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 pos_a,pos_b,pos_c, pos_d; // allocate space for the a,b, and c atoms in the a-b-c-d quartet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); // initialize the virial to 0 Scalar virial_idx[6]; for (unsigned int i = 0; i < 6; i++) virial_idx[i] = Scalar(0.0); // loop over all dihedrals for (int dihedral_idx = 0; dihedral_idx < n_dihedrals; dihedral_idx++) { group_storage<4> cur_dihedral = tlist[pitch*dihedral_idx + idx]; unsigned int cur_ABCD = dihedral_ABCD[pitch*dihedral_idx + idx]; int cur_dihedral_x_idx = cur_dihedral.idx[0]; int cur_dihedral_y_idx = cur_dihedral.idx[1]; int cur_dihedral_z_idx = cur_dihedral.idx[2]; int cur_dihedral_type = cur_dihedral.idx[3]; int cur_dihedral_abcd = cur_ABCD; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_dihedral_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_dihedral_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 z_postype = d_pos[cur_dihedral_z_idx]; Scalar3 z_pos = make_scalar3(z_postype.x, z_postype.y, z_postype.z); if (cur_dihedral_abcd == 0) { pos_a = idx_pos; pos_b = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 1) { pos_b = idx_pos; pos_a = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 2) { pos_c = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 3) { pos_d = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_c = z_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = pos_a - pos_b; Scalar3 dcb = pos_c - pos_b; Scalar3 ddc = pos_d - pos_c; dab = box.minImage(dab); dcb = box.minImage(dcb); ddc = box.minImage(ddc); Scalar3 dcbm = -dcb; dcbm = box.minImage(dcbm); // get the dihedral parameters (MEM TRANSFER: 12 bytes) Scalar4 params = texFetchScalar4(d_params, dihedral_params_tex, cur_dihedral_type); Scalar K = params.x; Scalar sign = params.y; Scalar multi = params.z; Scalar aax = dab.y*dcbm.z - dab.z*dcbm.y; Scalar aay = dab.z*dcbm.x - dab.x*dcbm.z; Scalar aaz = dab.x*dcbm.y - dab.y*dcbm.x; Scalar bbx = ddc.y*dcbm.z - ddc.z*dcbm.y; Scalar bby = ddc.z*dcbm.x - ddc.x*dcbm.z; Scalar bbz = ddc.x*dcbm.y - ddc.y*dcbm.x; Scalar raasq = aax*aax + aay*aay + aaz*aaz; Scalar rbbsq = bbx*bbx + bby*bby + bbz*bbz; Scalar rgsq = dcbm.x*dcbm.x + dcbm.y*dcbm.y + dcbm.z*dcbm.z; Scalar rg = sqrtf(rgsq); Scalar rginv, raa2inv, rbb2inv; rginv = raa2inv = rbb2inv = Scalar(0.0); if (rg > Scalar(0.0)) rginv = Scalar(1.0)/rg; if (raasq > Scalar(0.0)) raa2inv = Scalar(1.0)/raasq; if (rbbsq > Scalar(0.0)) rbb2inv = Scalar(1.0)/rbbsq; Scalar rabinv = sqrtf(raa2inv*rbb2inv); Scalar c_abcd = (aax*bbx + aay*bby + aaz*bbz)*rabinv; Scalar s_abcd = rg*rabinv*(aax*ddc.x + aay*ddc.y + aaz*ddc.z); if (c_abcd > Scalar(1.0)) c_abcd = Scalar(1.0); if (c_abcd < -Scalar(1.0)) c_abcd = -Scalar(1.0); Scalar p = Scalar(1.0); Scalar ddfab; Scalar dfab = Scalar(0.0); int m = __scalar2int_rn(multi); for (int jj = 0; jj < m; jj++) { ddfab = p*c_abcd - dfab*s_abcd; dfab = p*s_abcd + dfab*c_abcd; p = ddfab; } ///////////////////////// // FROM LAMMPS: sin_shift is always 0... so dropping all sin_shift terms!!!! ///////////////////////// p *= sign; dfab *= sign; dfab *= -multi; p += Scalar(1.0); if (multi < Scalar(1.0)) { p = Scalar(1.0) + sign; dfab = Scalar(0.0); } Scalar fg = dab.x*dcbm.x + dab.y*dcbm.y + dab.z*dcbm.z; Scalar hg = ddc.x*dcbm.x + ddc.y*dcbm.y + ddc.z*dcbm.z; Scalar fga = fg*raa2inv*rginv; Scalar hgb = hg*rbb2inv*rginv; Scalar gaa = -raa2inv*rg; Scalar gbb = rbb2inv*rg; Scalar dtfx = gaa*aax; Scalar dtfy = gaa*aay; Scalar dtfz = gaa*aaz; Scalar dtgx = fga*aax - hgb*bbx; Scalar dtgy = fga*aay - hgb*bby; Scalar dtgz = fga*aaz - hgb*bbz; Scalar dthx = gbb*bbx; Scalar dthy = gbb*bby; Scalar dthz = gbb*bbz; //Scalar df = -K * dfab; Scalar df = -K * dfab * Scalar(0.500); // the 0.5 term is for 1/2K in the forces Scalar sx2 = df*dtgx; Scalar sy2 = df*dtgy; Scalar sz2 = df*dtgz; Scalar ffax = df*dtfx; Scalar ffay = df*dtfy; Scalar ffaz = df*dtfz; Scalar ffbx = sx2 - ffax; Scalar ffby = sy2 - ffay; Scalar ffbz = sz2 - ffaz; Scalar ffdx = df*dthx; Scalar ffdy = df*dthy; Scalar ffdz = df*dthz; Scalar ffcx = -sx2 - ffdx; Scalar ffcy = -sy2 - ffdy; Scalar ffcz = -sz2 - ffdz; // Now, apply the force to each individual atom a,b,c,d // and accumlate the energy/virial // compute 1/4 of the energy, 1/4 for each atom in the dihedral //Scalar dihedral_eng = p*K*Scalar(1.0/4.0); Scalar dihedral_eng = p*K*Scalar(1.0/8.0); // the 1/8th term is (1/2)K * 1/4 // compute 1/4 of the virial, 1/4 for each atom in the dihedral // upper triangular version of virial tensor Scalar dihedral_virial[6]; dihedral_virial[0] = Scalar(1./4.)*(dab.x*ffax + dcb.x*ffcx + (ddc.x+dcb.x)*ffdx); dihedral_virial[1] = Scalar(1./4.)*(dab.y*ffax + dcb.y*ffcx + (ddc.y+dcb.y)*ffdx); dihedral_virial[2] = Scalar(1./4.)*(dab.z*ffax + dcb.z*ffcx + (ddc.z+dcb.z)*ffdx); dihedral_virial[3] = Scalar(1./4.)*(dab.y*ffay + dcb.y*ffcy + (ddc.y+dcb.y)*ffdy); dihedral_virial[4] = Scalar(1./4.)*(dab.z*ffay + dcb.z*ffcy + (ddc.z+dcb.z)*ffdy); dihedral_virial[5] = Scalar(1./4.)*(dab.z*ffaz + dcb.z*ffcz + (ddc.z+dcb.z)*ffdz); if (cur_dihedral_abcd == 0) { force_idx.x += ffax; force_idx.y += ffay; force_idx.z += ffaz; } if (cur_dihedral_abcd == 1) { force_idx.x += ffbx; force_idx.y += ffby; force_idx.z += ffbz; } if (cur_dihedral_abcd == 2) { force_idx.x += ffcx; force_idx.y += ffcy; force_idx.z += ffcz; } if (cur_dihedral_abcd == 3) { force_idx.x += ffdx; force_idx.y += ffdy; force_idx.z += ffdz; } force_idx.w += dihedral_eng; for (int k = 0; k < 6; k++) virial_idx[k] += dihedral_virial[k]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int k = 0; k < 6; k++) d_virial[k*virial_pitch+idx] = virial_idx[k]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the GPU \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom \param d_params K, sign,multiplicity params packed as padded Scalar4 variables \param n_dihedral_types Number of dihedral types in d_params \param block_size Block size to use when performing calculations \param compute_capability Compute capability of the device (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize() \a d_params should include one Scalar4 element per dihedral type. The x component contains K the spring constant and the y component contains sign, and the z component the multiplicity. */ hipError_t gpu_compute_harmonic_dihedral_forces(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const BoxDim& box, const group_storage<4> *tlist, const unsigned int *dihedral_ABCD, const unsigned int pitch, const unsigned int *n_dihedrals_list, Scalar4 *d_params, unsigned int n_dihedral_types, int block_size, const unsigned int compute_capability) { assert(d_params); static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_dihedral_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid( N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // bind the texture on pre sm35 devices if (compute_capability < 350) { hipError_t error = hipBindTexture(0, dihedral_params_tex, d_params, sizeof(Scalar4) * n_dihedral_types); if (error != hipSuccess) return error; } // run the kernel hipLaunchKernelGGL(( gpu_compute_harmonic_dihedral_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, d_params, box, tlist, dihedral_ABCD, pitch, n_dihedrals_list); return hipSuccess; }
566989e3b5f780aceb48d3219eaf6c2fd9a592dd.cu
/* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: dnlebard #include "HarmonicDihedralForceGPU.cuh" #include "TextureTools.h" #include <assert.h> #ifdef SINGLE_PRECISION #define __scalar2int_rn __float2int_rn #else #define __scalar2int_rn __double2int_rn #endif // SMALL a relatively small number #define SMALL Scalar(0.001) /*! \file HarmonicDihedralForceGPU.cu \brief Defines GPU kernel code for calculating the harmonic dihedral forces. Used by HarmonicDihedralForceComputeGPU. */ //! Texture for reading dihedral parameters scalar4_tex_t dihedral_params_tex; //! Kernel for caculating harmonic dihedral forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the device \param d_params Parameters for the angle force \param box Box dimensions for periodic boundary condition handling \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom */ extern "C" __global__ void gpu_compute_harmonic_dihedral_forces_kernel(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_params, BoxDim box, const group_storage<4> *tlist, const unsigned int *dihedral_ABCD, const unsigned int pitch, const unsigned int *n_dihedrals_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_dihedrals = n_dihedrals_list[idx]; // read in the position of our b-particle from the a-b-c-d set. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c-d quartet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 pos_a,pos_b,pos_c, pos_d; // allocate space for the a,b, and c atoms in the a-b-c-d quartet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); // initialize the virial to 0 Scalar virial_idx[6]; for (unsigned int i = 0; i < 6; i++) virial_idx[i] = Scalar(0.0); // loop over all dihedrals for (int dihedral_idx = 0; dihedral_idx < n_dihedrals; dihedral_idx++) { group_storage<4> cur_dihedral = tlist[pitch*dihedral_idx + idx]; unsigned int cur_ABCD = dihedral_ABCD[pitch*dihedral_idx + idx]; int cur_dihedral_x_idx = cur_dihedral.idx[0]; int cur_dihedral_y_idx = cur_dihedral.idx[1]; int cur_dihedral_z_idx = cur_dihedral.idx[2]; int cur_dihedral_type = cur_dihedral.idx[3]; int cur_dihedral_abcd = cur_ABCD; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_dihedral_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_dihedral_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 z_postype = d_pos[cur_dihedral_z_idx]; Scalar3 z_pos = make_scalar3(z_postype.x, z_postype.y, z_postype.z); if (cur_dihedral_abcd == 0) { pos_a = idx_pos; pos_b = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 1) { pos_b = idx_pos; pos_a = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 2) { pos_c = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 3) { pos_d = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_c = z_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = pos_a - pos_b; Scalar3 dcb = pos_c - pos_b; Scalar3 ddc = pos_d - pos_c; dab = box.minImage(dab); dcb = box.minImage(dcb); ddc = box.minImage(ddc); Scalar3 dcbm = -dcb; dcbm = box.minImage(dcbm); // get the dihedral parameters (MEM TRANSFER: 12 bytes) Scalar4 params = texFetchScalar4(d_params, dihedral_params_tex, cur_dihedral_type); Scalar K = params.x; Scalar sign = params.y; Scalar multi = params.z; Scalar aax = dab.y*dcbm.z - dab.z*dcbm.y; Scalar aay = dab.z*dcbm.x - dab.x*dcbm.z; Scalar aaz = dab.x*dcbm.y - dab.y*dcbm.x; Scalar bbx = ddc.y*dcbm.z - ddc.z*dcbm.y; Scalar bby = ddc.z*dcbm.x - ddc.x*dcbm.z; Scalar bbz = ddc.x*dcbm.y - ddc.y*dcbm.x; Scalar raasq = aax*aax + aay*aay + aaz*aaz; Scalar rbbsq = bbx*bbx + bby*bby + bbz*bbz; Scalar rgsq = dcbm.x*dcbm.x + dcbm.y*dcbm.y + dcbm.z*dcbm.z; Scalar rg = sqrtf(rgsq); Scalar rginv, raa2inv, rbb2inv; rginv = raa2inv = rbb2inv = Scalar(0.0); if (rg > Scalar(0.0)) rginv = Scalar(1.0)/rg; if (raasq > Scalar(0.0)) raa2inv = Scalar(1.0)/raasq; if (rbbsq > Scalar(0.0)) rbb2inv = Scalar(1.0)/rbbsq; Scalar rabinv = sqrtf(raa2inv*rbb2inv); Scalar c_abcd = (aax*bbx + aay*bby + aaz*bbz)*rabinv; Scalar s_abcd = rg*rabinv*(aax*ddc.x + aay*ddc.y + aaz*ddc.z); if (c_abcd > Scalar(1.0)) c_abcd = Scalar(1.0); if (c_abcd < -Scalar(1.0)) c_abcd = -Scalar(1.0); Scalar p = Scalar(1.0); Scalar ddfab; Scalar dfab = Scalar(0.0); int m = __scalar2int_rn(multi); for (int jj = 0; jj < m; jj++) { ddfab = p*c_abcd - dfab*s_abcd; dfab = p*s_abcd + dfab*c_abcd; p = ddfab; } ///////////////////////// // FROM LAMMPS: sin_shift is always 0... so dropping all sin_shift terms!!!! ///////////////////////// p *= sign; dfab *= sign; dfab *= -multi; p += Scalar(1.0); if (multi < Scalar(1.0)) { p = Scalar(1.0) + sign; dfab = Scalar(0.0); } Scalar fg = dab.x*dcbm.x + dab.y*dcbm.y + dab.z*dcbm.z; Scalar hg = ddc.x*dcbm.x + ddc.y*dcbm.y + ddc.z*dcbm.z; Scalar fga = fg*raa2inv*rginv; Scalar hgb = hg*rbb2inv*rginv; Scalar gaa = -raa2inv*rg; Scalar gbb = rbb2inv*rg; Scalar dtfx = gaa*aax; Scalar dtfy = gaa*aay; Scalar dtfz = gaa*aaz; Scalar dtgx = fga*aax - hgb*bbx; Scalar dtgy = fga*aay - hgb*bby; Scalar dtgz = fga*aaz - hgb*bbz; Scalar dthx = gbb*bbx; Scalar dthy = gbb*bby; Scalar dthz = gbb*bbz; //Scalar df = -K * dfab; Scalar df = -K * dfab * Scalar(0.500); // the 0.5 term is for 1/2K in the forces Scalar sx2 = df*dtgx; Scalar sy2 = df*dtgy; Scalar sz2 = df*dtgz; Scalar ffax = df*dtfx; Scalar ffay = df*dtfy; Scalar ffaz = df*dtfz; Scalar ffbx = sx2 - ffax; Scalar ffby = sy2 - ffay; Scalar ffbz = sz2 - ffaz; Scalar ffdx = df*dthx; Scalar ffdy = df*dthy; Scalar ffdz = df*dthz; Scalar ffcx = -sx2 - ffdx; Scalar ffcy = -sy2 - ffdy; Scalar ffcz = -sz2 - ffdz; // Now, apply the force to each individual atom a,b,c,d // and accumlate the energy/virial // compute 1/4 of the energy, 1/4 for each atom in the dihedral //Scalar dihedral_eng = p*K*Scalar(1.0/4.0); Scalar dihedral_eng = p*K*Scalar(1.0/8.0); // the 1/8th term is (1/2)K * 1/4 // compute 1/4 of the virial, 1/4 for each atom in the dihedral // upper triangular version of virial tensor Scalar dihedral_virial[6]; dihedral_virial[0] = Scalar(1./4.)*(dab.x*ffax + dcb.x*ffcx + (ddc.x+dcb.x)*ffdx); dihedral_virial[1] = Scalar(1./4.)*(dab.y*ffax + dcb.y*ffcx + (ddc.y+dcb.y)*ffdx); dihedral_virial[2] = Scalar(1./4.)*(dab.z*ffax + dcb.z*ffcx + (ddc.z+dcb.z)*ffdx); dihedral_virial[3] = Scalar(1./4.)*(dab.y*ffay + dcb.y*ffcy + (ddc.y+dcb.y)*ffdy); dihedral_virial[4] = Scalar(1./4.)*(dab.z*ffay + dcb.z*ffcy + (ddc.z+dcb.z)*ffdy); dihedral_virial[5] = Scalar(1./4.)*(dab.z*ffaz + dcb.z*ffcz + (ddc.z+dcb.z)*ffdz); if (cur_dihedral_abcd == 0) { force_idx.x += ffax; force_idx.y += ffay; force_idx.z += ffaz; } if (cur_dihedral_abcd == 1) { force_idx.x += ffbx; force_idx.y += ffby; force_idx.z += ffbz; } if (cur_dihedral_abcd == 2) { force_idx.x += ffcx; force_idx.y += ffcy; force_idx.z += ffcz; } if (cur_dihedral_abcd == 3) { force_idx.x += ffdx; force_idx.y += ffdy; force_idx.z += ffdz; } force_idx.w += dihedral_eng; for (int k = 0; k < 6; k++) virial_idx[k] += dihedral_virial[k]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int k = 0; k < 6; k++) d_virial[k*virial_pitch+idx] = virial_idx[k]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the GPU \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom \param d_params K, sign,multiplicity params packed as padded Scalar4 variables \param n_dihedral_types Number of dihedral types in d_params \param block_size Block size to use when performing calculations \param compute_capability Compute capability of the device (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize() \a d_params should include one Scalar4 element per dihedral type. The x component contains K the spring constant and the y component contains sign, and the z component the multiplicity. */ cudaError_t gpu_compute_harmonic_dihedral_forces(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const BoxDim& box, const group_storage<4> *tlist, const unsigned int *dihedral_ABCD, const unsigned int pitch, const unsigned int *n_dihedrals_list, Scalar4 *d_params, unsigned int n_dihedral_types, int block_size, const unsigned int compute_capability) { assert(d_params); static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_dihedral_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid( N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // bind the texture on pre sm35 devices if (compute_capability < 350) { cudaError_t error = cudaBindTexture(0, dihedral_params_tex, d_params, sizeof(Scalar4) * n_dihedral_types); if (error != cudaSuccess) return error; } // run the kernel gpu_compute_harmonic_dihedral_forces_kernel<<< grid, threads>>>(d_force, d_virial, virial_pitch, N, d_pos, d_params, box, tlist, dihedral_ABCD, pitch, n_dihedrals_list); return cudaSuccess; }
6b280eb74e3843e16711623a47cbd414a115b07b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } __global__ void doubleElements(int *a, int N) { /* * Use a grid-stride loop so each thread does work * on more than one element in the array. */ int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < N; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 10000; int *a; size_t size = N * sizeof(int); hipMallocManaged(&a, size); init(a, N); size_t threads_per_block = 256; size_t number_of_blocks = 32; hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N); hipDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); hipFree(a); }
6b280eb74e3843e16711623a47cbd414a115b07b.cu
#include <stdio.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } __global__ void doubleElements(int *a, int N) { /* * Use a grid-stride loop so each thread does work * on more than one element in the array. */ int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < N; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 10000; int *a; size_t size = N * sizeof(int); cudaMallocManaged(&a, size); init(a, N); size_t threads_per_block = 256; size_t number_of_blocks = 32; doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); cudaDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); cudaFree(a); }
c2066d2aae28852fc3b53049c5037c232988ceb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <benchmark/benchmark.h> #include "init/init.hpp" #include "unsafe_reduction/args.hpp" #include "utils/utils.hpp" #include "kernel_hip.cuh" using namespace wmma_unsafe_reduction; template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> static void tryCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N(benchmark::State &state) { const size_t num_segments = state.range(0); const size_t segment_size = state.range(1); if (segment_size != SEGMENT_SIZE) { state.SkipWithError(fmt::format("segment_size={} must be equal to SEGMENT_SIZE={} ", segment_size, SEGMENT_SIZE) .c_str()); return; } if (segment_size % WMMA_TILE_SIZE != 0) { state.SkipWithError("segment_size must be multiples of WMMA_TILE_SIZE"); return; } const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE; const size_t num_elements = num_segments * segment_size; half *d_in_fp16 = nullptr; half *d_out = nullptr; try { PRINT_IF_ERROR(hipMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(hipMalloc(&d_out, num_segments * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); dim3 gridDim, blockDim; blockDim.x = BLOCK_DIM; gridDim.x = (num_segments + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK; if (gridDim.x >= CUDA_MAX_GRID_SIZE) { state.SkipWithError( fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x) .c_str()); return; } hipEvent_t start, stop; PRINT_IF_ERROR(hipEventCreate(&start)); PRINT_IF_ERROR(hipEventCreate(&stop)); defer(hipEventDestroy(start)); defer(hipEventDestroy(stop)); for (auto _ : state) { PRINT_IF_ERROR(hipEventRecord(start)); hipLaunchKernelGGL(( compute_wmma_segmented_reduction_256n<half, SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM>) , dim3(gridDim), dim3(blockDim), 0, 0, d_in_fp16, d_out, num_segments); PRINT_IF_ERROR(hipEventRecord(stop)); PRINT_IF_ERROR(hipEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_segments", num_segments}, {"segment_size", segment_size}, {"num_elements", num_segments * segment_size}, {"warps_per_block", WARPS_PER_BLOCK}, {"flops", {state.iterations() * 1.0 * num_segments * segment_size, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half h_out[num_segments]; PRINT_IF_ERROR(hipMemcpy(h_out, d_out, num_segments * sizeof(half), hipMemcpyDeviceToHost)); int errors = 0; for (int j = 0; j < num_segments; j++) { float correct_segment_sum = 0; for (int i = 0; i < segment_size; i++) { correct_segment_sum += h_in[j * segment_size + i]; } if (std::is_same<half, float>::value) { if (fabs((h_out[j]) - correct_segment_sum) > 0.1) { errors++; printf("Expected %f, get h_out[%d] = %f\n", correct_segment_sum, j, (h_out[j])); } } else { if (fabs(half_to_float(h_out[j]) - correct_segment_sum) > 0.1) { errors++; printf("Expected %f, get h_out[%d] = %f\n", correct_segment_sum, j, (half_to_float(h_out[j]))); } } } if (errors > 0) { printf("CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N does not agree with " "SEQUENTIAL! %d " "errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } #endif hipFree(d_in_fp16); hipFree(d_out); } catch (...) { hipFree(d_in_fp16); hipFree(d_out); hipDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> static void iCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N(benchmark::State &state) { hipDeviceReset(); try { tryCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N<SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } catch (const std::exception &e) { state.SkipWithError(e.what()); } catch (const std::string &e) { state.SkipWithError(e.c_str()); } catch (...) { state.SkipWithError("unknown exception"); } } template <int WARPS_PER_BLOCK> static void CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N(benchmark::State &state) { const size_t segment_size = state.range(1); switch (segment_size) { #define Dispatch(N) \ case N: \ iCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N<N, WARPS_PER_BLOCK>(state); \ break Dispatch(256); Dispatch(512); Dispatch(1024); Dispatch(2048); Dispatch(4096); Dispatch(8192); Dispatch(16384); Dispatch(32768); Dispatch(65536); Dispatch(131072); Dispatch(262144); Dispatch(524288); Dispatch(1048576); Dispatch(2097152); Dispatch(4194304); Dispatch(8388608); Dispatch(16777216); Dispatch(33554432); Dispatch(67108864); Dispatch(134217728); Dispatch(268435456); Dispatch(536870912); Dispatch(1073741824); default: static_assert(true, "invalid segment size"); state.SkipWithError("invalid segment size"); #undef DISPATCH } } template <int WARPS_PER_BLOCK> static void CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N(benchmark::State &state) { CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N<WARPS_PER_BLOCK>(state); } #define RUN_CUDA_UNSAFE_WMMA_TUNE(TUNE_ARGS) \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 1) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 2) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 4) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 8) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 16) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_14); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_18); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_22); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_26); RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_30); #define RUN_CUDA_UNSAFE_WMMA(Args) \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 1) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 2) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 4) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 8) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 16) \ ->Args() \ ->UseManualTime() RUN_CUDA_UNSAFE_WMMA(SEG_256_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_512_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_1024_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_2048_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_4096_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_8192_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_16384_ARGS);
c2066d2aae28852fc3b53049c5037c232988ceb4.cu
#include <benchmark/benchmark.h> #include "init/init.hpp" #include "unsafe_reduction/args.hpp" #include "utils/utils.hpp" #include "kernel.cuh" using namespace wmma_unsafe_reduction; template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> static void tryCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N(benchmark::State &state) { const size_t num_segments = state.range(0); const size_t segment_size = state.range(1); if (segment_size != SEGMENT_SIZE) { state.SkipWithError(fmt::format("segment_size={} must be equal to SEGMENT_SIZE={} ", segment_size, SEGMENT_SIZE) .c_str()); return; } if (segment_size % WMMA_TILE_SIZE != 0) { state.SkipWithError("segment_size must be multiples of WMMA_TILE_SIZE"); return; } const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE; const size_t num_elements = num_segments * segment_size; half *d_in_fp16 = nullptr; half *d_out = nullptr; try { PRINT_IF_ERROR(cudaMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(cudaMalloc(&d_out, num_segments * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); dim3 gridDim, blockDim; blockDim.x = BLOCK_DIM; gridDim.x = (num_segments + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK; if (gridDim.x >= CUDA_MAX_GRID_SIZE) { state.SkipWithError( fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x) .c_str()); return; } cudaEvent_t start, stop; PRINT_IF_ERROR(cudaEventCreate(&start)); PRINT_IF_ERROR(cudaEventCreate(&stop)); defer(cudaEventDestroy(start)); defer(cudaEventDestroy(stop)); for (auto _ : state) { PRINT_IF_ERROR(cudaEventRecord(start)); compute_wmma_segmented_reduction_256n<half, SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM> <<<gridDim, blockDim>>>(d_in_fp16, d_out, num_segments); PRINT_IF_ERROR(cudaEventRecord(stop)); PRINT_IF_ERROR(cudaEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_segments", num_segments}, {"segment_size", segment_size}, {"num_elements", num_segments * segment_size}, {"warps_per_block", WARPS_PER_BLOCK}, {"flops", {state.iterations() * 1.0 * num_segments * segment_size, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half h_out[num_segments]; PRINT_IF_ERROR(cudaMemcpy(h_out, d_out, num_segments * sizeof(half), cudaMemcpyDeviceToHost)); int errors = 0; for (int j = 0; j < num_segments; j++) { float correct_segment_sum = 0; for (int i = 0; i < segment_size; i++) { correct_segment_sum += h_in[j * segment_size + i]; } if (std::is_same<half, float>::value) { if (fabs((h_out[j]) - correct_segment_sum) > 0.1) { errors++; printf("Expected %f, get h_out[%d] = %f\n", correct_segment_sum, j, (h_out[j])); } } else { if (fabs(half_to_float(h_out[j]) - correct_segment_sum) > 0.1) { errors++; printf("Expected %f, get h_out[%d] = %f\n", correct_segment_sum, j, (half_to_float(h_out[j]))); } } } if (errors > 0) { printf("CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N does not agree with " "SEQUENTIAL! %d " "errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } #endif cudaFree(d_in_fp16); cudaFree(d_out); } catch (...) { cudaFree(d_in_fp16); cudaFree(d_out); cudaDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> static void iCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N(benchmark::State &state) { cudaDeviceReset(); try { tryCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N<SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } catch (const std::exception &e) { state.SkipWithError(e.what()); } catch (const std::string &e) { state.SkipWithError(e.c_str()); } catch (...) { state.SkipWithError("unknown exception"); } } template <int WARPS_PER_BLOCK> static void CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N(benchmark::State &state) { const size_t segment_size = state.range(1); switch (segment_size) { #define Dispatch(N) \ case N: \ iCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N<N, WARPS_PER_BLOCK>(state); \ break Dispatch(256); Dispatch(512); Dispatch(1024); Dispatch(2048); Dispatch(4096); Dispatch(8192); Dispatch(16384); Dispatch(32768); Dispatch(65536); Dispatch(131072); Dispatch(262144); Dispatch(524288); Dispatch(1048576); Dispatch(2097152); Dispatch(4194304); Dispatch(8388608); Dispatch(16777216); Dispatch(33554432); Dispatch(67108864); Dispatch(134217728); Dispatch(268435456); Dispatch(536870912); Dispatch(1073741824); default: static_assert(true, "invalid segment size"); state.SkipWithError("invalid segment size"); #undef DISPATCH } } template <int WARPS_PER_BLOCK> static void CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N(benchmark::State &state) { CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N<WARPS_PER_BLOCK>(state); } #define RUN_CUDA_UNSAFE_WMMA_TUNE(TUNE_ARGS) \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 1) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 2) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 4) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 8) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_256N, 16) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_14); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_18); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_22); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_26); RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning256_x_30); #define RUN_CUDA_UNSAFE_WMMA(Args) \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 1) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 2) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 4) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 8) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_256N, 16) \ ->Args() \ ->UseManualTime() RUN_CUDA_UNSAFE_WMMA(SEG_256_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_512_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_1024_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_2048_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_4096_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_8192_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_16384_ARGS);
7ab6a420767d09b401ed848b74173e77b8c42dae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/native/IndexingUtils.h> #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/AccumulateType.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/HIPUtils.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHGeneral.h> #include <THH/THHTensorSort.cuh> #include <ATen/hip/HIPContext.h> #include <THH/THHThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/sort.h> #include <THH/THHAtomics.cuh> #include <c10/macros/Macros.h> namespace { template <typename scalar_t, int SZ> __global__ void indexing_backward_kernel( int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) { //numel is total number of flattened indices, not expanded to dimensions that are not indexed. //stride is the cumulative size of the not-indexed last dimensions //stride_before is the stride of the dimension immediately preceding first indexed dimension //if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case //outer_dim is number of elements in the first unindexed dimensions using accscalar_t = at::acc_type<scalar_t, true>; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same destination index as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values processed by each thread (grain size) for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){ int64_t idx = blockIdx.x * blockDim.y + threadIdx.y; if (idx < numel && (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){ do { int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before; const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride; const accscalar_t scale = (accscalar_t)1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; while (start_feature < stride) { #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } start_feature += gridDim.y * blockDim.x * SZ; } idx++; } while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]); } } } } namespace at { namespace native { static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) { //we don't need to check range in backward - if there were out of bounds indices forward should already have errored out if (index.numel() != 0 && check_range) { auto max_idx = index.max().item<int64_t>(); auto min_idx = index.min().item<int64_t>(); if (max_idx >= dim_size) { TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } if (min_idx < -dim_size) { TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } } return index.remainder(dim_size); } static std::vector<int64_t> computeLinearStride(const Tensor & tensor) { // computes the stride as if tensor were contiguous auto sizes = tensor.sizes(); std::vector<int64_t> stride(tensor.dim()); stride[tensor.dim() - 1] = 1; std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>()); return stride; } static std::tuple<Tensor, int64_t, int64_t, int64_t> computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) { auto strides = computeLinearStride(src); const auto& device = src.options().device(); // Compute the linear index by multiplying the indexing tensors by the // stride and summing them. All the indexing tensors have the same shape at // this point. We also compute the number of dimensions before and after that // are not being index. Tensor linearIndex; int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0; for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) { if (indices[i].defined()) { // Cast index to the longType matching src's device // This allows us to support ie indexing a cuda tensor with a cpu tensor Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device); if (linearIndex.defined()) { linearIndex += index; } else { linearIndex = index; if (i>0) { strideBefore = src.stride(i-1); // stride after undefined dimensions } } } else if (linearIndex.defined()) { emptyAfter++; nElemAfter *= src.size(i); } else { emptyBefore++; nElemBefore *= src.size(i); } } return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter); } static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) { checkIndexTensorTypes(orig); // first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors auto indices = expandTensors(self, orig); // next broadcast all index tensors together indices = expand_outplace(indices); // add missing null Tensors so that it matches self.dim() while (indices.size() < (size_t)self.dim()) { indices.emplace_back(); } // if the non-null indices are not all adjacent, transpose self and indices // together so that they're adjacent at the front std::vector<int64_t> inversePerm; if (!hasContiguousSubspace(indices)) { std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices); } int64_t nElemBefore, strideBefore, nElemAfter; Tensor linearIndex; std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range); return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm); } namespace { void index_put_accum_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool unsafe) { if (indices.size() > (size_t)self.dim()) { TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); } auto value_ = value.contiguous(); Tensor linearIndex, expandedValue, src; int64_t nElemBefore, strideBefore, sliceSize; std::vector<int64_t> inversePerm; std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe); int64_t num_indices = linearIndex.numel(); if (num_indices > 0 && sliceSize > 0) { const bool permuted = !src.is_contiguous(); auto src_ = permuted ? src.contiguous() : src; linearIndex = linearIndex.reshape(-1); auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); using device_ptr = thrust::device_ptr<int64_t>; const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); linearIndex.divide_(sliceSize, "trunc"); { sorted_indices.copy_(linearIndex); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices const auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly // Sort; a stable sort is not required // NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>()); } TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel()); const int UNROLL = 4; const int indices_per_block = 4; dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block), std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))), ::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2])); dim3 block(C10_WARP_SIZE, indices_per_block); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, value_.scalar_type(), "indexing_backward", [&] { hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream, sorted_indices.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(), value_.data_ptr<scalar_t>(), src_.data_ptr<scalar_t>(), num_indices, sliceSize, strideBefore, nElemBefore); C10_HIP_KERNEL_LAUNCH_CHECK(); }); if (permuted) self.copy_(src_.permute(inversePerm)); } } REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel); } //anonymous // Check tensor dimensions for index operations, and return the slice size. static ptrdiff_t getSliceSize(const Tensor & dst, int dim, const Tensor & index, const Tensor & src) { int dstDims = dst.dim(); int srcDims = src.dim(); TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar"); ptrdiff_t dstSliceSize = 1; TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds"); for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= dst.size(d); } } TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds"); TORCH_CHECK(index.numel() == src.size(dim), "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= src.size(d); if (!mismatch && dst.size(d) != src.size(d)) mismatch = true; } } TORCH_CHECK(dstSliceSize == srcSliceSize, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { TORCH_WARN_ONCE( "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } return dstSliceSize; } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexAddLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType innerSize, int64_t dstAddDimSize, T alpha) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset] * alpha); } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexAddSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType totalSize, IndexType innerSize, int64_t dstAddDimSize, T alpha) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset] * alpha); } } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. template <typename scalar_t> bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar &alpha) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("index_add_cuda_"); dim = maybe_wrap_dim(dim, self.dim()); TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4}; checkAllSameGPU("index_add", {self_arg, index_arg, source_arg}); TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector"); TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index"); TORCH_CHECK(self.scalar_type() == source.scalar_type(), "index_add_(): self and source must have the same scalar type"); TORCH_CHECK(dim == 0 || dim < source.dim(), "index_add_(): Indexing dim ", dim, " is out of bounds of tensor"); TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)), "index_add_(): Number of indices should be equal to self.size(dim)"); at::assert_no_internal_overlap(self); at::assert_no_overlap(self, index); at::assert_no_overlap(self, source); // Scalars are treated as 1-d tensor Tensor self_ = (self.dim() == 0) ? self.view(1) : self; Tensor source_ = (source.dim() == 0) ? source.view(1) : source; TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); at::assert_no_internal_overlap(self); at::assert_no_partial_overlap(self, index); at::assert_no_partial_overlap(self, source); // The `source` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of index we are choosing, which is the total size // of the tensor `index`. ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_); ptrdiff_t sourceTotalSize = source.numel(); int64_t selfAddDimSize = self_.size(dim); ptrdiff_t numIndex = index.numel(); if (sliceSize == 0) { return self; } const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); bool indContig = index.is_contiguous(); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \ C10_HIP_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sourceTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndex, \ selfAddDimSize, alpha_value); \ C10_HIP_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(source) && cuda::detail::canUse32BitIndexMath(index)) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); auto alpha_value = alpha.to<scalar_t>(); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { auto sourceInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); auto indexInfo = cuda::detail::getTensorInfo<index_t, unsigned int>(index); indexInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // index to choose if (numIndex <= 16) { if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim); if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); auto alpha_value = alpha.to<scalar_t>(); cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { cuda::detail::TensorInfo<index_t, uint64_t> indexInfo = cuda::detail::getTensorInfo<index_t, uint64_t>(index); indexInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); }); } return self; #undef SMALL_INDEX #undef LARGE_INDEX } namespace { // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexSelectLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType innerSize, int64_t srcSelectDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexSelectSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType totalSize, IndexType innerSize, int64_t srcSelectDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } namespace { // When using a 0-dim scalar tensor, we need the legacy (THC) semantics of // TensorInfo: Pretend that the scalar tensor is in fact a one-element vector. template <typename T, typename IndexType> cuda::detail::TensorInfo<T, IndexType> tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) { if (ti.dims == 0) { ti.dims = 1; ti.sizes[0] = 1; ti.strides[0] = 1; } return ti; } } template<typename scalar_t> void index_select_out_cuda_impl(Tensor& out, const Tensor& self, long dim, const Tensor& index) { ptrdiff_t numIndices = index.numel(); int selfDims = self.dim() == 0 ? 1 : self.dim(); const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); TORCH_CHECK(index.dim() <= 1, "Index is supposed to be an empty tensor or a vector"); TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds"); std::vector<int64_t> newSize = self.sizes().vec(); if (self.dim() > 0) { newSize[dim] = numIndices; } at::native::resize_(out, newSize, {}); ptrdiff_t outTotalSize = out.numel(); if (outTotalSize == 0) { return; } bool indContig = index.is_contiguous(); // The `self` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim); ptrdiff_t sliceSize = outTotalSize / numIndices; int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \ selfSelectDimSize); \ C10_HIP_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \ static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \ selfSelectDimSize); \ C10_HIP_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(outTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(out) && cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(index)) { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index)); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim); if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); } else { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index)); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); } #undef SMALL_INDEX #undef LARGE_INDEX } } // anonymous namespace Tensor& index_select_out_cuda(const Tensor& self, int64_t dim, const Tensor& index, Tensor& out) { static constexpr string_view DIM_WARNING = "Tensor too large or too many (> 25) dimensions"; TORCH_CHECK(at::cuda::check_device({out, self, index}), "Input, output and indices must be on the current device"); at::assert_no_internal_overlap(out); at::assert_no_overlap(out, self); at::assert_no_overlap(out, index); dim = at::maybe_wrap_dim(dim, self); TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, out.scalar_type(), "index_select_cuda", [&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); }); return out; } Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) { Tensor out = at::empty({0}, self.options()); at::native::index_select_out_cuda(self, dim, index, out); return out; } namespace { template <typename mask_t> void masked_fill_kernel(TensorIterator& iter, const Scalar& value) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() { const auto value_ = value.to<scalar_t>(); gpu_kernel( iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t { if (mask) { return value_; } return self; }); }); } } // anonymous namespace Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) { TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ", mask.device(), " and self on ", self.device()); TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool, "expected mask dtype to be Bool but got ", mask.scalar_type()); auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); if (at::has_internal_overlap(self) == MemOverlap::YES) { TORCH_WARN( "Use of masked_fill_ on expanded tensors is deprecated. " "Please clone() the tensor before performing this operation. " "This also applies to advanced indexing e.g. tensor[mask] = scalar"); } at::assert_no_partial_overlap(self, mask); Tensor b_mask; std::tie(b_mask) = expand_inplace(self, mask, "masked_fill_"); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self) .add_input(self) .add_input(b_mask) .build(); if (b_mask.dtype() == at::ScalarType::Byte) { TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \ "please use a mask with dtype torch.bool instead."); masked_fill_kernel<uint8_t>(iter, value); } else { masked_fill_kernel<bool>(iter, value); } namedinference::propagate_names_if_nonempty(self, maybe_outnames); return self; } Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) { TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor " "with ", value.dim(), " dimension(s)."); return masked_fill__cuda(self, mask, value.item()); } } // native } // at
7ab6a420767d09b401ed848b74173e77b8c42dae.cu
#include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/native/IndexingUtils.h> #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/AccumulateType.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/CUDAUtils.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCGeneral.h> #include <THC/THCTensorSort.cuh> #include <ATen/cuda/CUDAContext.h> #include <THC/THCThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/sort.h> #include <THC/THCAtomics.cuh> #include <c10/macros/Macros.h> namespace { template <typename scalar_t, int SZ> __global__ void indexing_backward_kernel( int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) { //numel is total number of flattened indices, not expanded to dimensions that are not indexed. //stride is the cumulative size of the not-indexed last dimensions //stride_before is the stride of the dimension immediately preceding first indexed dimension //if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case //outer_dim is number of elements in the first unindexed dimensions using accscalar_t = at::acc_type<scalar_t, true>; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same destination index as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values processed by each thread (grain size) for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){ int64_t idx = blockIdx.x * blockDim.y + threadIdx.y; if (idx < numel && (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){ do { int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before; const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride; const accscalar_t scale = (accscalar_t)1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; while (start_feature < stride) { #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } start_feature += gridDim.y * blockDim.x * SZ; } idx++; } while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]); } } } } namespace at { namespace native { static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) { //we don't need to check range in backward - if there were out of bounds indices forward should already have errored out if (index.numel() != 0 && check_range) { auto max_idx = index.max().item<int64_t>(); auto min_idx = index.min().item<int64_t>(); if (max_idx >= dim_size) { TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } if (min_idx < -dim_size) { TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } } return index.remainder(dim_size); } static std::vector<int64_t> computeLinearStride(const Tensor & tensor) { // computes the stride as if tensor were contiguous auto sizes = tensor.sizes(); std::vector<int64_t> stride(tensor.dim()); stride[tensor.dim() - 1] = 1; std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>()); return stride; } static std::tuple<Tensor, int64_t, int64_t, int64_t> computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) { auto strides = computeLinearStride(src); const auto& device = src.options().device(); // Compute the linear index by multiplying the indexing tensors by the // stride and summing them. All the indexing tensors have the same shape at // this point. We also compute the number of dimensions before and after that // are not being index. Tensor linearIndex; int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0; for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) { if (indices[i].defined()) { // Cast index to the longType matching src's device // This allows us to support ie indexing a cuda tensor with a cpu tensor Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device); if (linearIndex.defined()) { linearIndex += index; } else { linearIndex = index; if (i>0) { strideBefore = src.stride(i-1); // stride after undefined dimensions } } } else if (linearIndex.defined()) { emptyAfter++; nElemAfter *= src.size(i); } else { emptyBefore++; nElemBefore *= src.size(i); } } return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter); } static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) { checkIndexTensorTypes(orig); // first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors auto indices = expandTensors(self, orig); // next broadcast all index tensors together indices = expand_outplace(indices); // add missing null Tensors so that it matches self.dim() while (indices.size() < (size_t)self.dim()) { indices.emplace_back(); } // if the non-null indices are not all adjacent, transpose self and indices // together so that they're adjacent at the front std::vector<int64_t> inversePerm; if (!hasContiguousSubspace(indices)) { std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices); } int64_t nElemBefore, strideBefore, nElemAfter; Tensor linearIndex; std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range); return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm); } namespace { void index_put_accum_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool unsafe) { if (indices.size() > (size_t)self.dim()) { TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); } auto value_ = value.contiguous(); Tensor linearIndex, expandedValue, src; int64_t nElemBefore, strideBefore, sliceSize; std::vector<int64_t> inversePerm; std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe); int64_t num_indices = linearIndex.numel(); if (num_indices > 0 && sliceSize > 0) { const bool permuted = !src.is_contiguous(); auto src_ = permuted ? src.contiguous() : src; linearIndex = linearIndex.reshape(-1); auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); using device_ptr = thrust::device_ptr<int64_t>; const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); linearIndex.divide_(sliceSize, "trunc"); { sorted_indices.copy_(linearIndex); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices const auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly // Sort; a stable sort is not required // NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>()); } TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel()); const int UNROLL = 4; const int indices_per_block = 4; dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block), std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))), std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2])); dim3 block(C10_WARP_SIZE, indices_per_block); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, value_.scalar_type(), "indexing_backward", [&] { indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>( sorted_indices.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(), value_.data_ptr<scalar_t>(), src_.data_ptr<scalar_t>(), num_indices, sliceSize, strideBefore, nElemBefore); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); if (permuted) self.copy_(src_.permute(inversePerm)); } } REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel); } //anonymous // Check tensor dimensions for index operations, and return the slice size. static ptrdiff_t getSliceSize(const Tensor & dst, int dim, const Tensor & index, const Tensor & src) { int dstDims = dst.dim(); int srcDims = src.dim(); TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar"); ptrdiff_t dstSliceSize = 1; TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds"); for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= dst.size(d); } } TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds"); TORCH_CHECK(index.numel() == src.size(dim), "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= src.size(d); if (!mismatch && dst.size(d) != src.size(d)) mismatch = true; } } TORCH_CHECK(dstSliceSize == srcSliceSize, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { TORCH_WARN_ONCE( "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } return dstSliceSize; } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexAddLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType innerSize, int64_t dstAddDimSize, T alpha) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset] * alpha); } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexAddSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType totalSize, IndexType innerSize, int64_t dstAddDimSize, T alpha) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset] * alpha); } } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. template <typename scalar_t> bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar &alpha) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("index_add_cuda_"); dim = maybe_wrap_dim(dim, self.dim()); TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4}; checkAllSameGPU("index_add", {self_arg, index_arg, source_arg}); TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector"); TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index"); TORCH_CHECK(self.scalar_type() == source.scalar_type(), "index_add_(): self and source must have the same scalar type"); TORCH_CHECK(dim == 0 || dim < source.dim(), "index_add_(): Indexing dim ", dim, " is out of bounds of tensor"); TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)), "index_add_(): Number of indices should be equal to self.size(dim)"); at::assert_no_internal_overlap(self); at::assert_no_overlap(self, index); at::assert_no_overlap(self, source); // Scalars are treated as 1-d tensor Tensor self_ = (self.dim() == 0) ? self.view(1) : self; Tensor source_ = (source.dim() == 0) ? source.view(1) : source; TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); at::assert_no_internal_overlap(self); at::assert_no_partial_overlap(self, index); at::assert_no_partial_overlap(self, source); // The `source` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of index we are choosing, which is the total size // of the tensor `index`. ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_); ptrdiff_t sourceTotalSize = source.numel(); int64_t selfAddDimSize = self_.size(dim); ptrdiff_t numIndex = index.numel(); if (sliceSize == 0) { return self; } const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); bool indContig = index.is_contiguous(); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \ indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sourceTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndex, \ selfAddDimSize, alpha_value); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(source) && cuda::detail::canUse32BitIndexMath(index)) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); auto alpha_value = alpha.to<scalar_t>(); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { auto sourceInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); auto indexInfo = cuda::detail::getTensorInfo<index_t, unsigned int>(index); indexInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // index to choose if (numIndex <= 16) { if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim); if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); auto alpha_value = alpha.to<scalar_t>(); cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { cuda::detail::TensorInfo<index_t, uint64_t> indexInfo = cuda::detail::getTensorInfo<index_t, uint64_t>(index); indexInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); }); } return self; #undef SMALL_INDEX #undef LARGE_INDEX } namespace { // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexSelectLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType innerSize, int64_t srcSelectDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexSelectSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType totalSize, IndexType innerSize, int64_t srcSelectDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } namespace { // When using a 0-dim scalar tensor, we need the legacy (THC) semantics of // TensorInfo: Pretend that the scalar tensor is in fact a one-element vector. template <typename T, typename IndexType> cuda::detail::TensorInfo<T, IndexType> tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) { if (ti.dims == 0) { ti.dims = 1; ti.sizes[0] = 1; ti.strides[0] = 1; } return ti; } } template<typename scalar_t> void index_select_out_cuda_impl(Tensor& out, const Tensor& self, long dim, const Tensor& index) { ptrdiff_t numIndices = index.numel(); int selfDims = self.dim() == 0 ? 1 : self.dim(); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); TORCH_CHECK(index.dim() <= 1, "Index is supposed to be an empty tensor or a vector"); TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds"); std::vector<int64_t> newSize = self.sizes().vec(); if (self.dim() > 0) { newSize[dim] = numIndices; } at::native::resize_(out, newSize, {}); ptrdiff_t outTotalSize = out.numel(); if (outTotalSize == 0) { return; } bool indContig = index.is_contiguous(); // The `self` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim); ptrdiff_t sliceSize = outTotalSize / numIndices; int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \ selfSelectDimSize); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \ static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \ selfSelectDimSize); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(outTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(out) && cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(index)) { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index)); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim); if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); } else { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index)); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); } #undef SMALL_INDEX #undef LARGE_INDEX } } // anonymous namespace Tensor& index_select_out_cuda(const Tensor& self, int64_t dim, const Tensor& index, Tensor& out) { static constexpr string_view DIM_WARNING = "Tensor too large or too many (> 25) dimensions"; TORCH_CHECK(at::cuda::check_device({out, self, index}), "Input, output and indices must be on the current device"); at::assert_no_internal_overlap(out); at::assert_no_overlap(out, self); at::assert_no_overlap(out, index); dim = at::maybe_wrap_dim(dim, self); TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, out.scalar_type(), "index_select_cuda", [&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); }); return out; } Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) { Tensor out = at::empty({0}, self.options()); at::native::index_select_out_cuda(self, dim, index, out); return out; } namespace { template <typename mask_t> void masked_fill_kernel(TensorIterator& iter, const Scalar& value) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() { const auto value_ = value.to<scalar_t>(); gpu_kernel( iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t { if (mask) { return value_; } return self; }); }); } } // anonymous namespace Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) { TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ", mask.device(), " and self on ", self.device()); TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool, "expected mask dtype to be Bool but got ", mask.scalar_type()); auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); if (at::has_internal_overlap(self) == MemOverlap::YES) { TORCH_WARN( "Use of masked_fill_ on expanded tensors is deprecated. " "Please clone() the tensor before performing this operation. " "This also applies to advanced indexing e.g. tensor[mask] = scalar"); } at::assert_no_partial_overlap(self, mask); Tensor b_mask; std::tie(b_mask) = expand_inplace(self, mask, "masked_fill_"); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self) .add_input(self) .add_input(b_mask) .build(); if (b_mask.dtype() == at::ScalarType::Byte) { TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \ "please use a mask with dtype torch.bool instead."); masked_fill_kernel<uint8_t>(iter, value); } else { masked_fill_kernel<bool>(iter, value); } namedinference::propagate_names_if_nonempty(self, maybe_outnames); return self; } Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) { TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor " "with ", value.dim(), " dimension(s)."); return masked_fill__cuda(self, mask, value.item()); } } // native } // at
1a900d56ed377cf37fa9d8a7cc5fe810414377a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/magma_zpreselect.cu, normal z -> s, Mon Jun 25 18:24:25 2018 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // kernel copying everything except the last element __global__ void magma_spreselect_gpu0( magma_int_t num_rows, magmaIndex_ptr row, float *val, float *valn) { int tidx = threadIdx.x; int bidx = blockIdx.x; int gtidx = bidx * blockDim.x + tidx; if (gtidx < num_rows) { for (int i=row[gtidx]; i<row[gtidx+1]-1; i++){ valn[i-gtidx] = val[i]; } } } // kernel copying everything except the first element __global__ void magma_spreselect_gpu1( magma_int_t num_rows, magmaIndex_ptr row, float *val, float *valn) { int tidx = threadIdx.x; int bidx = blockIdx.x; int gtidx = bidx * blockDim.x + tidx; if (gtidx < num_rows) { for (int i=row[gtidx]+1; i<row[gtidx+1]; i++){ valn[i-gtidx] = val[i]; } } } /***************************************************************************//** Purpose ------- This function takes a list of candidates with residuals, and selects the largest in every row. The output matrix only contains these largest elements (respectively a zero element if there is no candidate for a certain row). Arguments --------- @param[in] order magma_int_t order==0 lower triangular order==1 upper triangular @param[in] A magma_s_matrix* Matrix where elements are removed. @param[out] oneA magma_s_matrix* Matrix where elements are removed. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux *******************************************************************************/ extern "C" magma_int_t magma_spreselect_gpu( magma_int_t order, magma_s_matrix *A, magma_s_matrix *oneA, magma_queue_t queue ) { magma_int_t info = 0; dim3 block(BLOCK_SIZE, 1, 1); dim3 grid(magma_ceildiv(A->num_rows, BLOCK_SIZE), 1, 1); oneA->num_rows = A->num_rows; oneA->num_cols = A->num_cols; oneA->nnz = A->nnz - A->num_rows; oneA->storage_type = Magma_CSR; oneA->memory_location = Magma_DEV; CHECK( magma_smalloc( &oneA->dval, oneA->nnz ) ); if( order == 1 ){ // don't copy the first hipLaunchKernelGGL(( magma_spreselect_gpu1), dim3(grid), dim3(block), 0, queue->cuda_stream(), A->num_rows, A->drow, A->dval, oneA->dval ); // #pragma omp parallel for // for( magma_int_t row=0; row<A->num_rows; row++){ // for( magma_int_t i=A->row[row]+1; i<A->row[row+1]; i++ ){ // oneA->val[ i-row ] = A->val[i]; // } // } } else { // don't copy the last hipLaunchKernelGGL(( magma_spreselect_gpu0), dim3(grid), dim3(block), 0, queue->cuda_stream(), A->num_rows, A->drow, A->dval, oneA->dval ); // #pragma omp parallel for // for( magma_int_t row=0; row<A->num_rows; row++){ // for( magma_int_t i=A->row[row]; i<A->row[row+1]-1; i++ ){ // oneA->val[ i-row ] = A->val[i]; // } // } } cleanup: return info; }
1a900d56ed377cf37fa9d8a7cc5fe810414377a1.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/magma_zpreselect.cu, normal z -> s, Mon Jun 25 18:24:25 2018 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // kernel copying everything except the last element __global__ void magma_spreselect_gpu0( magma_int_t num_rows, magmaIndex_ptr row, float *val, float *valn) { int tidx = threadIdx.x; int bidx = blockIdx.x; int gtidx = bidx * blockDim.x + tidx; if (gtidx < num_rows) { for (int i=row[gtidx]; i<row[gtidx+1]-1; i++){ valn[i-gtidx] = val[i]; } } } // kernel copying everything except the first element __global__ void magma_spreselect_gpu1( magma_int_t num_rows, magmaIndex_ptr row, float *val, float *valn) { int tidx = threadIdx.x; int bidx = blockIdx.x; int gtidx = bidx * blockDim.x + tidx; if (gtidx < num_rows) { for (int i=row[gtidx]+1; i<row[gtidx+1]; i++){ valn[i-gtidx] = val[i]; } } } /***************************************************************************//** Purpose ------- This function takes a list of candidates with residuals, and selects the largest in every row. The output matrix only contains these largest elements (respectively a zero element if there is no candidate for a certain row). Arguments --------- @param[in] order magma_int_t order==0 lower triangular order==1 upper triangular @param[in] A magma_s_matrix* Matrix where elements are removed. @param[out] oneA magma_s_matrix* Matrix where elements are removed. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux *******************************************************************************/ extern "C" magma_int_t magma_spreselect_gpu( magma_int_t order, magma_s_matrix *A, magma_s_matrix *oneA, magma_queue_t queue ) { magma_int_t info = 0; dim3 block(BLOCK_SIZE, 1, 1); dim3 grid(magma_ceildiv(A->num_rows, BLOCK_SIZE), 1, 1); oneA->num_rows = A->num_rows; oneA->num_cols = A->num_cols; oneA->nnz = A->nnz - A->num_rows; oneA->storage_type = Magma_CSR; oneA->memory_location = Magma_DEV; CHECK( magma_smalloc( &oneA->dval, oneA->nnz ) ); if( order == 1 ){ // don't copy the first magma_spreselect_gpu1<<<grid, block, 0, queue->cuda_stream()>>> ( A->num_rows, A->drow, A->dval, oneA->dval ); // #pragma omp parallel for // for( magma_int_t row=0; row<A->num_rows; row++){ // for( magma_int_t i=A->row[row]+1; i<A->row[row+1]; i++ ){ // oneA->val[ i-row ] = A->val[i]; // } // } } else { // don't copy the last magma_spreselect_gpu0<<<grid, block, 0, queue->cuda_stream()>>> ( A->num_rows, A->drow, A->dval, oneA->dval ); // #pragma omp parallel for // for( magma_int_t row=0; row<A->num_rows; row++){ // for( magma_int_t i=A->row[row]; i<A->row[row+1]-1; i++ ){ // oneA->val[ i-row ] = A->val[i]; // } // } } cleanup: return info; }
a32e28db52c84b58948f72d8e6c06ca8bcb38362.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //rough summing kernel (does not need to be efficient) __global__ void kcSumLangevinVars(KC_FP_TYPE * der, KC_FP_TYPE * der_sum, KC_FP_TYPE * G, KC_FP_TYPE * G_sum, KC_FP_TYPE * ll, KC_FP_TYPE * ll_sum, int * mBlkIdx, int NT, int NH, KC_FP_TYPE * gPrior, KC_FP_TYPE * lPrior) { int nsum = blockIdx.x*blockDim.x+threadIdx.x; if(nsum == 0) { for(int idx = 0; idx < NH+1; idx++){ der_sum[idx]=lPrior[idx]; } for(int idx = 0; idx < NT; idx++) { for(int idx2 = 0; idx2 < NH+1; idx2++) { der_sum[idx2] += der[idx+NT*idx2]; } } } else if(nsum == 1) { for(int idx = 0; idx < NH+1; idx++) { for(int idx2 = 0; idx2 < NH+1; idx2++) { G_sum[idx*(NH+1)+idx2] = 0; G_sum[idx*(NH+1)+idx2] = gPrior[idx*(NH+1)+idx2]; } } for(int idx = 0; idx < NT; idx++) { for(int idx2 = 0; idx2 < NH+1; idx2++) { for(int idx3 =0; idx3 < NH+1; idx3++) { G_sum[idx2*(NH+1)+idx3] -= G[idx+(idx2*(NH+1)+idx3)*NT]; } } } } else if(nsum == 2) { ll_sum[0] = 0; for(int idx = 0; idx < NT; idx++) { ll_sum[0] += ll[idx]; } } } //derivates of firing rate function w.r.t. gamma and history filters (assuming fixed latent variables) __device__ KC_FP_TYPE h(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE modelInd, KC_FP_TYPE sh) { if (modelInd > 1e-10) { return KC_MIN(KC_POW(log(1.0+exp(lambda*gamma+sh)),modelInd)*dt,KC_MAXN); } else { return KC_MIN(exp(lambda*gamma+sh)*dt,KC_MAXN); } } __device__ KC_FP_TYPE dh(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE modelInd, KC_FP_TYPE sh, KC_FP_TYPE mult) { if (modelInd > 1e-10) { KC_FP_TYPE logex = KC_MIN(log(1.0+exp(lambda*gamma+sh)),KC_MAXN); KC_FP_TYPE log_der = KC_MIN(mult/(1+exp(-lambda*gamma-sh)),KC_MAXN); KC_FP_TYPE der = modelInd*KC_POW(logex,modelInd-1)*log_der; return KC_MIN(der*dt,KC_MAXN); } else { return KC_MIN(dt*mult*KC_EXP(gamma*lambda+sh),KC_MAXN); } } // computes log p(single trial | gamma, fixed lambdas, spike history) __global__ void kcBoundaryLikelihoodTrialHist(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * lambdas, int * crossingTimes, int * mBlkIdx, KC_FP_TYPE g, KC_FP_TYPE dt, int NT, KC_FP_TYPE * llSum, KC_FP_TYPE * trialSum, KC_FP_TYPE * trialSumRiemann, KC_FP_TYPE * h_filt, KC_FP_TYPE * y_hist, int NH, KC_FP_TYPE modelInd) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { for(int jj = 0; jj<NH+1; jj++){ trialSum[idx+jj*NT]=0; for(int kk = 0; kk<NH+1; kk++){ trialSumRiemann[idx+(jj*(NH+1)+kk)*NT]=0; } } llSum[idx] = 0; for(int ii = mBlkIdx[idx]; ii < mBlkIdx[idx+1]; ii++) { KC_FP_TYPE trueLambda = fmin(1, ((ii-mBlkIdx[idx]) < crossingTimes[idx])?lambdas[ii]:1); //KC_FP_TYPE trueLambda = fmin(1, lambdas[ii]); KC_FP_TYPE sh = spe[ii]; KC_FP_TYPE r = KC_MAX(KC_MINN,h(trueLambda,g,1,modelInd,sh)); llSum[idx] += y[ii]*(KC_LOG(r)+KC_LOG(dt)) - dt*r -KC_GAMMALN(y[ii]+1.0); for(int jj = 0; jj < NH+1 ; jj++) { KC_FP_TYPE yh1 = 0; // if index is one of the first NH indices of y, the spike history depends on spikes in the time before the analyzed spike train y // in that case, we want the ii - jj spike of the y history if(jj < NH && ii<(mBlkIdx[idx]+jj+1)) { yh1 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - jj-1]; } else if(jj < NH) { yh1 = y[ii-jj-1]; } else if(jj == NH) { yh1 = trueLambda; } KC_FP_TYPE dr = dh(trueLambda,g,1,modelInd,sh,yh1); trialSum[idx+jj*NT] += (y[ii]/r-dt)*dr; //for(int kk = jj+1; kk < NH+1; kk++) for(int kk = 0; kk < NH+1; kk++) { KC_FP_TYPE yh2 = 0; // if index is one of the first NH indices of y, the spike history depends on spikes in the time before the analyzed spike train y // in that case, we want the ii - jj spike of the y history if(kk < NH && ii<(mBlkIdx[idx]+kk+1)) { yh2 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - kk -1]; } else if(kk < NH) { yh2 = y[ii-kk-1]; } else if(kk == NH) { yh2 = trueLambda; } KC_FP_TYPE dr2 = dh(trueLambda,g,1,modelInd,sh,yh2); trialSumRiemann[idx+(jj*(NH+1)+kk)*NT] += KC_MIN(-1*dt*dr*dr2/r,KC_MAXN); } } } } } //Computes the the log probability of a set of spike trains under the ramping model given a fixed set of latent variable // as a function of \gamma (the bound height) along with first/second derivates w.r.t. \gamma //args // 0 = lambda (latent variables, on GPU. Same size as y) // 1 = auxillary variable - threshold crossing time (latent variable boundary crossing time, on GPU. vector length number of trials: NT) // 2 = y (observations, on GPU) // 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 4 = g (absorbing boundary effective height) // 5 = spe (spike history effect, TT x 1) // 6 = dt (bin size in seconds) // 7 = gPrior (Fisher information of log prior probability of filters and gamma) // 8 = spike history filters // 9 = spike history (spikes before start of trials, NH*NT x 1) // 10 = lPrior (derivative of log prior probability of filters and gamma) // 11 = modelInd (power if use log1p transfer function, 0 if using exp) // //outputs (left-hand side) // 0 = log p(y|lambdas,gamma) // 1 = d/dg log p(y|lambdas,gamma) // 2 = d^2/d^2g log p(y|lambdas,gamma) void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { hipError_t ce; //loads up trial information unsigned int TT = kcGetArrayNumEl(prhs[0]); int * crossingTimes = kcGetArrayDataInt(prhs[1]); KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT); int * trIdx = kcGetArrayDataInt(prhs[3]); unsigned int NT = kcGetArrayNumEl(prhs[3])-1; KC_FP_TYPE dt = mxGetScalar(prhs[6]); //loads gamma and latent variables KC_FP_TYPE g = mxGetScalar(prhs[4]); KC_FP_TYPE * lambda = kcGetArrayData(prhs[0]); //loads spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[5],TT); int NH = mxGetNumberOfElements(prhs[8]); //loads Fisher information prior if(mxGetClassID(prhs[7]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!"); } KC_FP_TYPE * gPrior; checkCudaErrors(hipMalloc((void**)&gPrior,sizeof(KC_FP_TYPE)*(NH+1)*(NH+1))); checkCudaErrors(hipMemcpy(gPrior,(KC_FP_TYPE*)mxGetPr(prhs[7]),sizeof(KC_FP_TYPE)*((NH+1)*(NH+1)),hipMemcpyHostToDevice)); //loads derivative of log prior probability of parameters if(mxGetClassID(prhs[10]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!"); } KC_FP_TYPE * lPrior; checkCudaErrors(hipMalloc((void**)&lPrior,sizeof(KC_FP_TYPE)*(NH+1))); checkCudaErrors(hipMemcpy(lPrior,(KC_FP_TYPE*)mxGetPr(prhs[10]),sizeof(KC_FP_TYPE)*(NH+1),hipMemcpyHostToDevice)); //loads filter values KC_FP_TYPE * h_filt; checkCudaErrors(hipMalloc((void**)&h_filt,sizeof(KC_FP_TYPE)*NH)); checkCudaErrors(hipMemcpy(h_filt,(KC_FP_TYPE*)mxGetPr(prhs[8]),sizeof(KC_FP_TYPE)*NH,hipMemcpyHostToDevice)); //loads spike history before trials KC_FP_TYPE * y_hist = kcGetArrayData(prhs[9],NH*NT); KC_FP_TYPE modelInd = mxGetScalar(prhs[11]); //sets up space for computations on GPU KC_FP_TYPE * der_log_p_y; checkCudaErrors(hipMalloc((void**)&der_log_p_y,sizeof(KC_FP_TYPE)*(NT)*(NH+1))); KC_FP_TYPE * der_log_p_y_sum; checkCudaErrors(hipMalloc((void**)&der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+1)*1)); KC_FP_TYPE * log_p_y; checkCudaErrors(hipMalloc((void**)&log_p_y,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * log_p_y_sum; checkCudaErrors(hipMalloc((void**)&log_p_y_sum,sizeof(KC_FP_TYPE)*1)); KC_FP_TYPE * G_log_p_y1; checkCudaErrors(hipMalloc((void**)&G_log_p_y1,sizeof(KC_FP_TYPE)*(NT)*(NH+1)*(NH+1))); KC_FP_TYPE * G_log_p_y_sum; checkCudaErrors(hipMalloc((void**)&G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+1)*(NH+1))); //sets up CUDA variables int blockSize = 2; int numBlocks = (int)NT/(int)blockSize + ((NT%blockSize==0)?0:1); //gets each trials likelihood + derivatives of filter hipLaunchKernelGGL(( kcBoundaryLikelihoodTrialHist), dim3(numBlocks),dim3(blockSize) , 0, 0, y,spe,lambda,crossingTimes,trIdx,g,dt, NT,log_p_y,der_log_p_y,G_log_p_y1,h_filt,y_hist,NH,modelInd); checkCudaErrors(hipDeviceSynchronize()); //sums up all the trials' likelihoods and derivatives with respect to gamma int nBlocksC = 3; int blockSizeC = 1; hipLaunchKernelGGL(( kcSumLangevinVars) , dim3(nBlocksC),dim3(blockSizeC) , 0, 0, der_log_p_y, der_log_p_y_sum, G_log_p_y1, G_log_p_y_sum, log_p_y, log_p_y_sum, trIdx, NT, NH, gPrior, lPrior); checkCudaErrors(hipDeviceSynchronize()); //pushes answers back to MATLAB if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),log_p_y_sum,sizeof(KC_FP_TYPE)*1,hipMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NH+1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+1)*(1),hipMemcpyDeviceToHost)); } if(nlhs > 2) { plhs[2] = mxCreateNumericMatrix(NH+1,NH+1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[2]),G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+1)*(NH+1),hipMemcpyDeviceToHost)); } //clears up GPU variables checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(log_p_y)); checkCudaErrors(hipFree(log_p_y_sum)); checkCudaErrors(hipFree(der_log_p_y)); checkCudaErrors(hipFree(der_log_p_y_sum)); checkCudaErrors(hipFree(G_log_p_y1)); checkCudaErrors(hipFree(G_log_p_y_sum)); checkCudaErrors(hipFree(h_filt)); checkCudaErrors(hipFree(lPrior)); checkCudaErrors(hipFree(gPrior)); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error at the end of kcLangevinStep.cu "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } }
a32e28db52c84b58948f72d8e6c06ca8bcb38362.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <cusparse_v2.h> #include "cublas_v2.h" #include <curand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //rough summing kernel (does not need to be efficient) __global__ void kcSumLangevinVars(KC_FP_TYPE * der, KC_FP_TYPE * der_sum, KC_FP_TYPE * G, KC_FP_TYPE * G_sum, KC_FP_TYPE * ll, KC_FP_TYPE * ll_sum, int * mBlkIdx, int NT, int NH, KC_FP_TYPE * gPrior, KC_FP_TYPE * lPrior) { int nsum = blockIdx.x*blockDim.x+threadIdx.x; if(nsum == 0) { for(int idx = 0; idx < NH+1; idx++){ der_sum[idx]=lPrior[idx]; } for(int idx = 0; idx < NT; idx++) { for(int idx2 = 0; idx2 < NH+1; idx2++) { der_sum[idx2] += der[idx+NT*idx2]; } } } else if(nsum == 1) { for(int idx = 0; idx < NH+1; idx++) { for(int idx2 = 0; idx2 < NH+1; idx2++) { G_sum[idx*(NH+1)+idx2] = 0; G_sum[idx*(NH+1)+idx2] = gPrior[idx*(NH+1)+idx2]; } } for(int idx = 0; idx < NT; idx++) { for(int idx2 = 0; idx2 < NH+1; idx2++) { for(int idx3 =0; idx3 < NH+1; idx3++) { G_sum[idx2*(NH+1)+idx3] -= G[idx+(idx2*(NH+1)+idx3)*NT]; } } } } else if(nsum == 2) { ll_sum[0] = 0; for(int idx = 0; idx < NT; idx++) { ll_sum[0] += ll[idx]; } } } //derivates of firing rate function w.r.t. gamma and history filters (assuming fixed latent variables) __device__ KC_FP_TYPE h(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE modelInd, KC_FP_TYPE sh) { if (modelInd > 1e-10) { return KC_MIN(KC_POW(log(1.0+exp(lambda*gamma+sh)),modelInd)*dt,KC_MAXN); } else { return KC_MIN(exp(lambda*gamma+sh)*dt,KC_MAXN); } } __device__ KC_FP_TYPE dh(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE modelInd, KC_FP_TYPE sh, KC_FP_TYPE mult) { if (modelInd > 1e-10) { KC_FP_TYPE logex = KC_MIN(log(1.0+exp(lambda*gamma+sh)),KC_MAXN); KC_FP_TYPE log_der = KC_MIN(mult/(1+exp(-lambda*gamma-sh)),KC_MAXN); KC_FP_TYPE der = modelInd*KC_POW(logex,modelInd-1)*log_der; return KC_MIN(der*dt,KC_MAXN); } else { return KC_MIN(dt*mult*KC_EXP(gamma*lambda+sh),KC_MAXN); } } // computes log p(single trial | gamma, fixed lambdas, spike history) __global__ void kcBoundaryLikelihoodTrialHist(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * lambdas, int * crossingTimes, int * mBlkIdx, KC_FP_TYPE g, KC_FP_TYPE dt, int NT, KC_FP_TYPE * llSum, KC_FP_TYPE * trialSum, KC_FP_TYPE * trialSumRiemann, KC_FP_TYPE * h_filt, KC_FP_TYPE * y_hist, int NH, KC_FP_TYPE modelInd) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { for(int jj = 0; jj<NH+1; jj++){ trialSum[idx+jj*NT]=0; for(int kk = 0; kk<NH+1; kk++){ trialSumRiemann[idx+(jj*(NH+1)+kk)*NT]=0; } } llSum[idx] = 0; for(int ii = mBlkIdx[idx]; ii < mBlkIdx[idx+1]; ii++) { KC_FP_TYPE trueLambda = fmin(1, ((ii-mBlkIdx[idx]) < crossingTimes[idx])?lambdas[ii]:1); //KC_FP_TYPE trueLambda = fmin(1, lambdas[ii]); KC_FP_TYPE sh = spe[ii]; KC_FP_TYPE r = KC_MAX(KC_MINN,h(trueLambda,g,1,modelInd,sh)); llSum[idx] += y[ii]*(KC_LOG(r)+KC_LOG(dt)) - dt*r -KC_GAMMALN(y[ii]+1.0); for(int jj = 0; jj < NH+1 ; jj++) { KC_FP_TYPE yh1 = 0; // if index is one of the first NH indices of y, the spike history depends on spikes in the time before the analyzed spike train y // in that case, we want the ii - jj spike of the y history if(jj < NH && ii<(mBlkIdx[idx]+jj+1)) { yh1 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - jj-1]; } else if(jj < NH) { yh1 = y[ii-jj-1]; } else if(jj == NH) { yh1 = trueLambda; } KC_FP_TYPE dr = dh(trueLambda,g,1,modelInd,sh,yh1); trialSum[idx+jj*NT] += (y[ii]/r-dt)*dr; //for(int kk = jj+1; kk < NH+1; kk++) for(int kk = 0; kk < NH+1; kk++) { KC_FP_TYPE yh2 = 0; // if index is one of the first NH indices of y, the spike history depends on spikes in the time before the analyzed spike train y // in that case, we want the ii - jj spike of the y history if(kk < NH && ii<(mBlkIdx[idx]+kk+1)) { yh2 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - kk -1]; } else if(kk < NH) { yh2 = y[ii-kk-1]; } else if(kk == NH) { yh2 = trueLambda; } KC_FP_TYPE dr2 = dh(trueLambda,g,1,modelInd,sh,yh2); trialSumRiemann[idx+(jj*(NH+1)+kk)*NT] += KC_MIN(-1*dt*dr*dr2/r,KC_MAXN); } } } } } //Computes the the log probability of a set of spike trains under the ramping model given a fixed set of latent variable // as a function of \gamma (the bound height) along with first/second derivates w.r.t. \gamma //args // 0 = lambda (latent variables, on GPU. Same size as y) // 1 = auxillary variable - threshold crossing time (latent variable boundary crossing time, on GPU. vector length number of trials: NT) // 2 = y (observations, on GPU) // 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 4 = g (absorbing boundary effective height) // 5 = spe (spike history effect, TT x 1) // 6 = dt (bin size in seconds) // 7 = gPrior (Fisher information of log prior probability of filters and gamma) // 8 = spike history filters // 9 = spike history (spikes before start of trials, NH*NT x 1) // 10 = lPrior (derivative of log prior probability of filters and gamma) // 11 = modelInd (power if use log1p transfer function, 0 if using exp) // //outputs (left-hand side) // 0 = log p(y|lambdas,gamma) // 1 = d/dg log p(y|lambdas,gamma) // 2 = d^2/d^2g log p(y|lambdas,gamma) void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { cudaError_t ce; //loads up trial information unsigned int TT = kcGetArrayNumEl(prhs[0]); int * crossingTimes = kcGetArrayDataInt(prhs[1]); KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT); int * trIdx = kcGetArrayDataInt(prhs[3]); unsigned int NT = kcGetArrayNumEl(prhs[3])-1; KC_FP_TYPE dt = mxGetScalar(prhs[6]); //loads gamma and latent variables KC_FP_TYPE g = mxGetScalar(prhs[4]); KC_FP_TYPE * lambda = kcGetArrayData(prhs[0]); //loads spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[5],TT); int NH = mxGetNumberOfElements(prhs[8]); //loads Fisher information prior if(mxGetClassID(prhs[7]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!"); } KC_FP_TYPE * gPrior; checkCudaErrors(cudaMalloc((void**)&gPrior,sizeof(KC_FP_TYPE)*(NH+1)*(NH+1))); checkCudaErrors(cudaMemcpy(gPrior,(KC_FP_TYPE*)mxGetPr(prhs[7]),sizeof(KC_FP_TYPE)*((NH+1)*(NH+1)),cudaMemcpyHostToDevice)); //loads derivative of log prior probability of parameters if(mxGetClassID(prhs[10]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!"); } KC_FP_TYPE * lPrior; checkCudaErrors(cudaMalloc((void**)&lPrior,sizeof(KC_FP_TYPE)*(NH+1))); checkCudaErrors(cudaMemcpy(lPrior,(KC_FP_TYPE*)mxGetPr(prhs[10]),sizeof(KC_FP_TYPE)*(NH+1),cudaMemcpyHostToDevice)); //loads filter values KC_FP_TYPE * h_filt; checkCudaErrors(cudaMalloc((void**)&h_filt,sizeof(KC_FP_TYPE)*NH)); checkCudaErrors(cudaMemcpy(h_filt,(KC_FP_TYPE*)mxGetPr(prhs[8]),sizeof(KC_FP_TYPE)*NH,cudaMemcpyHostToDevice)); //loads spike history before trials KC_FP_TYPE * y_hist = kcGetArrayData(prhs[9],NH*NT); KC_FP_TYPE modelInd = mxGetScalar(prhs[11]); //sets up space for computations on GPU KC_FP_TYPE * der_log_p_y; checkCudaErrors(cudaMalloc((void**)&der_log_p_y,sizeof(KC_FP_TYPE)*(NT)*(NH+1))); KC_FP_TYPE * der_log_p_y_sum; checkCudaErrors(cudaMalloc((void**)&der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+1)*1)); KC_FP_TYPE * log_p_y; checkCudaErrors(cudaMalloc((void**)&log_p_y,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * log_p_y_sum; checkCudaErrors(cudaMalloc((void**)&log_p_y_sum,sizeof(KC_FP_TYPE)*1)); KC_FP_TYPE * G_log_p_y1; checkCudaErrors(cudaMalloc((void**)&G_log_p_y1,sizeof(KC_FP_TYPE)*(NT)*(NH+1)*(NH+1))); KC_FP_TYPE * G_log_p_y_sum; checkCudaErrors(cudaMalloc((void**)&G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+1)*(NH+1))); //sets up CUDA variables int blockSize = 2; int numBlocks = (int)NT/(int)blockSize + ((NT%blockSize==0)?0:1); //gets each trials likelihood + derivatives of filter kcBoundaryLikelihoodTrialHist<<< numBlocks,blockSize >>>(y,spe,lambda,crossingTimes,trIdx,g,dt, NT,log_p_y,der_log_p_y,G_log_p_y1,h_filt,y_hist,NH,modelInd); checkCudaErrors(cudaDeviceSynchronize()); //sums up all the trials' likelihoods and derivatives with respect to gamma int nBlocksC = 3; int blockSizeC = 1; kcSumLangevinVars <<< nBlocksC,blockSizeC >>> (der_log_p_y, der_log_p_y_sum, G_log_p_y1, G_log_p_y_sum, log_p_y, log_p_y_sum, trIdx, NT, NH, gPrior, lPrior); checkCudaErrors(cudaDeviceSynchronize()); //pushes answers back to MATLAB if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),log_p_y_sum,sizeof(KC_FP_TYPE)*1,cudaMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NH+1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+1)*(1),cudaMemcpyDeviceToHost)); } if(nlhs > 2) { plhs[2] = mxCreateNumericMatrix(NH+1,NH+1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[2]),G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+1)*(NH+1),cudaMemcpyDeviceToHost)); } //clears up GPU variables checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(log_p_y)); checkCudaErrors(cudaFree(log_p_y_sum)); checkCudaErrors(cudaFree(der_log_p_y)); checkCudaErrors(cudaFree(der_log_p_y_sum)); checkCudaErrors(cudaFree(G_log_p_y1)); checkCudaErrors(cudaFree(G_log_p_y_sum)); checkCudaErrors(cudaFree(h_filt)); checkCudaErrors(cudaFree(lPrior)); checkCudaErrors(cudaFree(gPrior)); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error at the end of kcLangevinStep.cu "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } }
eaa16a5d25623687337dca144d23cbb3c0433a6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "RMSDKernels.h" __global__ void gpu_correlationMatrix( double *d_coords1, double *d_coords2, double *R, int *num_atoms, int coords_stride){ uint batch_idx = blockIdx.x; uint i = threadIdx.x; uint j = threadIdx.y; int r_index = 9*batch_idx + 3*i+j; int n_atoms = num_atoms[batch_idx]; double *coords1 = d_coords1 + batch_idx*coords_stride; double *coords2 = d_coords2 + batch_idx*coords_stride; R[r_index] = 0.0; for(int k=0; k<n_atoms; k++){ R[r_index] += coords1[3*k + i]*coords2[3*k + j]; } } __global__ void gpu_TMatrix( double *d_R, double *d_T){ uint batch_idx = blockIdx.x; double *R = d_R + batch_idx*9; double *T = d_T + batch_idx*16; T[0] = R[0]+R[4]+R[8]; T[1] = R[5]-R[7]; T[2] = R[6]-R[2]; T[3] = R[1]-R[3]; T[4] = R[5]-R[7]; T[5] = R[0]-R[4]-R[8]; T[6] = R[1]+R[3]; T[7] = R[2]+R[6]; T[8] = R[6]-R[2]; T[9] = R[1]+R[3]; T[10] = -R[0]+R[4]-R[8]; T[11] = R[5]+R[7]; T[12] = R[1]-R[3]; T[13] = R[2]+R[6]; T[14] = R[5]+R[7]; T[15] = -R[0]-R[4]+R[8]; } __global__ void gpu_computeR2( double *d_coordinates, int num_atoms, double *R2){ int dim_index = threadIdx.x; R2[dim_index]=0.0; for(int i=0;i<num_atoms;i++){ R2[dim_index]+=d_coordinates[3*i+dim_index]*d_coordinates[3*i+dim_index]; } } __device__ void mat33Vec3Mul(double *d_m, double *d_v, double *dst){ if(dst == d_v){ double tmp[3]; for(int i=0;i<3;i++){ tmp[i] = 0.0; for(int j=0;j<3;j++){ tmp[i] += d_m[i*3+j]*d_v[j]; } } memcpy(dst, tmp, 3*sizeof(double)); }else{ for(int i=0;i<3;i++){ dst[i] = 0.0; for(int j=0;j<3;j++){ dst[i] += d_m[i*3+j]*d_v[j]; } } } } __global__ void gpu_transformCoordinates( double *d_coordinates_src, double *d_coordinates_dst, double *d_matrix, int atoms_stride){ int atom_idx = blockIdx.x; int batch_idx = threadIdx.x; double *coordinates_src = d_coordinates_src + batch_idx*atoms_stride*3; double *coordinates_dst = d_coordinates_dst + batch_idx*atoms_stride*3; double *matrix = d_matrix + 9*batch_idx; mat33Vec3Mul(matrix, coordinates_src + 3*atom_idx, coordinates_dst + 3*atom_idx); } void cpu_correlationMatrix( double *d_coords1, //input: coordinates 1 double *d_coords2, //input: coordinates 2 double *T, //output: T-correlation matrix int *num_atoms, int batch_size, int coords_stride){ double *R ; hipMalloc( &R, batch_size*9*sizeof(double)); dim3 coords_dim(3, 3, 1); hipLaunchKernelGGL(( gpu_correlationMatrix), dim3(batch_size), dim3(coords_dim), 0, 0, d_coords1, d_coords2, R, num_atoms, coords_stride); hipLaunchKernelGGL(( gpu_TMatrix), dim3(batch_size),dim3(1), 0, 0, R, T); hipFree(R); } void cpu_computeR2( double *d_coordinates, int num_atoms, double *R2){ hipLaunchKernelGGL(( gpu_computeR2), dim3(1),dim3(3), 0, 0, d_coordinates, num_atoms, R2); } void cpu_transformCoordinates( double *d_coordinates_src, //input: coordinates to transform double *d_coordinates_dst, //output: transformed coordinates double *d_matrix, //input: transformation matrix int batch_size, int coords_stride){ int max_num_atoms = coords_stride/3; hipLaunchKernelGGL(( gpu_transformCoordinates), dim3(max_num_atoms), dim3(batch_size), 0, 0, d_coordinates_src, d_coordinates_dst, d_matrix, max_num_atoms); }
eaa16a5d25623687337dca144d23cbb3c0433a6b.cu
#include "RMSDKernels.h" __global__ void gpu_correlationMatrix( double *d_coords1, double *d_coords2, double *R, int *num_atoms, int coords_stride){ uint batch_idx = blockIdx.x; uint i = threadIdx.x; uint j = threadIdx.y; int r_index = 9*batch_idx + 3*i+j; int n_atoms = num_atoms[batch_idx]; double *coords1 = d_coords1 + batch_idx*coords_stride; double *coords2 = d_coords2 + batch_idx*coords_stride; R[r_index] = 0.0; for(int k=0; k<n_atoms; k++){ R[r_index] += coords1[3*k + i]*coords2[3*k + j]; } } __global__ void gpu_TMatrix( double *d_R, double *d_T){ uint batch_idx = blockIdx.x; double *R = d_R + batch_idx*9; double *T = d_T + batch_idx*16; T[0] = R[0]+R[4]+R[8]; T[1] = R[5]-R[7]; T[2] = R[6]-R[2]; T[3] = R[1]-R[3]; T[4] = R[5]-R[7]; T[5] = R[0]-R[4]-R[8]; T[6] = R[1]+R[3]; T[7] = R[2]+R[6]; T[8] = R[6]-R[2]; T[9] = R[1]+R[3]; T[10] = -R[0]+R[4]-R[8]; T[11] = R[5]+R[7]; T[12] = R[1]-R[3]; T[13] = R[2]+R[6]; T[14] = R[5]+R[7]; T[15] = -R[0]-R[4]+R[8]; } __global__ void gpu_computeR2( double *d_coordinates, int num_atoms, double *R2){ int dim_index = threadIdx.x; R2[dim_index]=0.0; for(int i=0;i<num_atoms;i++){ R2[dim_index]+=d_coordinates[3*i+dim_index]*d_coordinates[3*i+dim_index]; } } __device__ void mat33Vec3Mul(double *d_m, double *d_v, double *dst){ if(dst == d_v){ double tmp[3]; for(int i=0;i<3;i++){ tmp[i] = 0.0; for(int j=0;j<3;j++){ tmp[i] += d_m[i*3+j]*d_v[j]; } } memcpy(dst, tmp, 3*sizeof(double)); }else{ for(int i=0;i<3;i++){ dst[i] = 0.0; for(int j=0;j<3;j++){ dst[i] += d_m[i*3+j]*d_v[j]; } } } } __global__ void gpu_transformCoordinates( double *d_coordinates_src, double *d_coordinates_dst, double *d_matrix, int atoms_stride){ int atom_idx = blockIdx.x; int batch_idx = threadIdx.x; double *coordinates_src = d_coordinates_src + batch_idx*atoms_stride*3; double *coordinates_dst = d_coordinates_dst + batch_idx*atoms_stride*3; double *matrix = d_matrix + 9*batch_idx; mat33Vec3Mul(matrix, coordinates_src + 3*atom_idx, coordinates_dst + 3*atom_idx); } void cpu_correlationMatrix( double *d_coords1, //input: coordinates 1 double *d_coords2, //input: coordinates 2 double *T, //output: T-correlation matrix int *num_atoms, int batch_size, int coords_stride){ double *R ; cudaMalloc( &R, batch_size*9*sizeof(double)); dim3 coords_dim(3, 3, 1); gpu_correlationMatrix<<<batch_size, coords_dim>>>(d_coords1, d_coords2, R, num_atoms, coords_stride); gpu_TMatrix<<<batch_size,1>>>(R, T); cudaFree(R); } void cpu_computeR2( double *d_coordinates, int num_atoms, double *R2){ gpu_computeR2<<<1,3>>>( d_coordinates, num_atoms, R2); } void cpu_transformCoordinates( double *d_coordinates_src, //input: coordinates to transform double *d_coordinates_dst, //output: transformed coordinates double *d_matrix, //input: transformation matrix int batch_size, int coords_stride){ int max_num_atoms = coords_stride/3; gpu_transformCoordinates<<<max_num_atoms, batch_size>>>(d_coordinates_src, d_coordinates_dst, d_matrix, max_num_atoms); }
172dc8b707315fabe94402804f323885cb6ecf11.hip
// !!! This is a file automatically generated by hipify!!! /** %%cu * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> #include <helper_cuda.h> /* */ /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements, int operaciones) { for(int j = 0; j < operaciones; j++){ int i = ((blockDim.x * blockIdx.x + threadIdx.x)*operaciones) + j; if (i < numElements) { C[i] = A[i] + B[i]; } } } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; int dev = 0; hipSetDevice(dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); int threadsPerBlock = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); threadsPerBlock = threadsPerBlock*2; int blocksPerGrid = deviceProp.multiProcessorCount; // Print the vector length to be used, and compute its size int numElements = 5000; int operacionPorHilo = numElements>(blocksPerGrid*threadsPerBlock)?((numElements/blocksPerGrid*threadsPerBlock)+1):1; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel /*int cantidadCores = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount;*/ printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements,operacionPorHilo); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("device_c%f, host_c%f\n", *d_C, *h_C); fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); fprintf(stderr, " esperado: %f, tenemos: %f\n", (h_A[i] + h_B[i]), h_C[i]); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
172dc8b707315fabe94402804f323885cb6ecf11.cu
/** %%cu * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include <helper_cuda.h> /* */ /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements, int operaciones) { for(int j = 0; j < operaciones; j++){ int i = ((blockDim.x * blockIdx.x + threadIdx.x)*operaciones) + j; if (i < numElements) { C[i] = A[i] + B[i]; } } } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; int dev = 0; cudaSetDevice(dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); int threadsPerBlock = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); threadsPerBlock = threadsPerBlock*2; int blocksPerGrid = deviceProp.multiProcessorCount; // Print the vector length to be used, and compute its size int numElements = 5000; int operacionPorHilo = numElements>(blocksPerGrid*threadsPerBlock)?((numElements/blocksPerGrid*threadsPerBlock)+1):1; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel /*int cantidadCores = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount;*/ printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements,operacionPorHilo); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("device_c%f, host_c%f\n", *d_C, *h_C); fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); fprintf(stderr, " esperado: %f, tenemos: %f\n", (h_A[i] + h_B[i]), h_C[i]); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
897570f8b279e3852fb745e4430c255ebd75930a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <iostream> #include <algorithm> #include <cstdio> #include <math.h> #include <unistd.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } #define TILE_WIDTH 32 #define SYSTEMTIME struct timespec #ifdef __INTELLISENSE__ void __syncthreads(); #endif inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d \n", hipGetErrorString(code), file, line); if (abort){ system("pause"); exit(code); } } } using namespace std; // ------------------------------------ INIT, PRINT & FREE MATRICES; PRINT HEADERS & RESULTS; MAIN LOOP; void initMatrices(double ** pha, double ** phb, double ** phc, int m_ar, int m_br){ long int i, j; *pha = (double *)malloc((m_ar * m_br) * sizeof(double)); *phb = (double *)malloc((m_ar * m_br) * sizeof(double)); *phc = (double *)malloc((m_ar * m_ar) * sizeof(double)); for (i = 0; i< m_ar; i++) for (j = 0; j< m_br; j++) { (*pha)[i*m_br + j] = i == j ? (double)i : (double)0; } for (i = 0; i< m_br; i++) for (j = 0; j< m_ar; j++) { (*phb)[i*m_ar + j] = i == j ? (double)i : (double)0; } for (i = 0; i< m_ar; i++) for (j = 0; j< m_ar; j++) { (*phc)[i*m_ar + j] = 0; } } void freeMatrices(double * pha, double * phb, double * phc){ free(pha); free(phb); free(phc); } void printResultMatrix(double ** phc, int m_br) { int i, j; cout << endl << endl; cout << "Result matrix: " << endl; for (i = 0; i<10; i++) { for (j = 0; j<min(10, m_br); j++) cout << (*phc)[i*m_br + j] << " "; cout << "\n"; } cout << endl << endl; } // ------------------------------------ CUDA HEADERS int getSPcores(hipDeviceProp_t devProp) { int cores = 0; int mp = devProp.multiProcessorCount; switch (devProp.major){ case 2: // Fermi if (devProp.minor == 1) cores = mp * 48; else cores = mp * 32; break; case 3: // Kepler cores = mp * 192; break; case 5: // Maxwell cores = mp * 128; break; default: printf("Unknown device type\n"); break; } return cores; } void DisplayHeader() { const int kb = 1024; const int mb = kb * kb; cout << "GPU" << endl << "=========" << endl; cout << "CUDA version: v" << CUDART_VERSION << endl; //wcout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION << endl << endl; int devCount; hipGetDeviceCount(&devCount); cout << "CUDA Devices: " << endl << endl; for (int i = 0; i < devCount; i++) { hipDeviceProp_t props; hipGetDeviceProperties(&props, i); cout << i << ": " << props.name << ": " << props.major << "." << props.minor << endl; cout << " Global memory: " << props.totalGlobalMem / mb << "mb" << endl; cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" << endl; cout << " Constant memory: " << props.totalConstMem / kb << "kb" << endl; cout << " Block registers: " << props.regsPerBlock << endl << endl; cout << " Warp size: " << props.warpSize << endl; cout << " Number of cores: " << getSPcores(props) << endl; cout << " Threads per block: " << props.maxThreadsPerBlock << endl; cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << endl; cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << endl; cout << endl; } cout << "=========" << endl; } // ------------------------------------ STAT FUNCTIONS __global__ void MatrixMulKernelNaive(double * Md, double * Nd, double * Pd, int Width){ int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Y = by*TILE_WIDTH + ty; int X = bx*TILE_WIDTH + tx; double Pvalue = 0; for(int m = 0; m < Width; m++){ Pvalue += Md[X*Width + m]* Nd[m*Width + X]; } Pd[Y*Width + X] = Pvalue; } __global__ void MatrixMulKernel(double * Md, double * Nd, double * Pd, /*double * Td,*/ int Width){ __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Y = by*TILE_WIDTH + ty; int X = bx*TILE_WIDTH + tx; double Pvalue = 0; for (int m = 0; m < Width/TILE_WIDTH; ++m){ Mds[tx][ty] = Md[(m*TILE_WIDTH + tx)*Width + Y]; Nds[tx][ty] = Nd[X*Width + (m*TILE_WIDTH + ty)]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += Mds[tx][k] * Nds[k][ty]; __syncthreads(); } Pd[Y*Width + X] = Pvalue; __syncthreads(); if(tx == 0 && ty == 0){ } } // ------------------------------------ CUDA OPERATIONS PREPARATION void MatrixMulOnDevice(double * M, double * N, double * P, int Width){ long int size = Width * Width * sizeof(double); double * Md, *Nd, *Pd;// *Td; dim3 dimGrid(Width / TILE_WIDTH, Width / TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); // Allocate and Load M, N to device memory gpuErrchk(hipMalloc(&Md, size)); gpuErrchk(hipMemcpy(Md, M, size, hipMemcpyHostToDevice)); gpuErrchk(hipMalloc(&Nd, size)); gpuErrchk(hipMemcpy(Nd, N, size, hipMemcpyHostToDevice)); // Allocate P on the device gpuErrchk(hipMalloc(&Pd, size)); // gpuErrchk(hipMalloc(&Td, pow((float)Width / TILE_WIDTH, 2) ); SYSTEMTIME time1; if(clock_gettime(CLOCK_MONOTONIC,&time1) < 0) { perror("Failure getting process time (1)"); exit(EXIT_FAILURE); } // Kernel invocation MatrixMulKernel << <dimGrid, dimBlock >> >(Md, Nd, Pd, /*Td,*/ Width); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); SYSTEMTIME time2; if(clock_gettime(CLOCK_MONOTONIC,&time2) < 0) { perror("Failure getting process time (2)"); exit(EXIT_FAILURE); } double delta = (double)(time2.tv_sec - time1.tv_sec) + ((double)(time2.tv_nsec - time1.tv_nsec) / pow(10.f,9)); long long calcNrInsOp = ((long long)3*pow((float)Width,3)); double tPerformance = calcNrInsOp/(double)(delta* pow(10.f,9)); printf("Time : %3.3f seconds\n", delta); printf("T.Prfr: %3.3f GFLOPS\n", tPerformance); // Read P from the device gpuErrchk(hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost)); gpuErrchk(hipDeviceSynchronize()); // Free device matrices hipFree(Md); hipFree(Nd); hipFree(Pd); //hipFree(Td); } int main(int argc, char *argv[]) { DisplayHeader(); int Width = argc > 1 ? atoi(argv[1]) : 1024; long int size = Width * Width * sizeof(double); cout << "Width: " << Width << endl; cout << "Global mem: " << (size * 3) / (1024 * 1024) << "mb" << endl; cout << "Shared mem: " << (TILE_WIDTH*TILE_WIDTH*sizeof(double) * 2) / (1024) << "kb" << endl; cout << "dim grid: " << (Width / TILE_WIDTH) << ", " << (Width / TILE_WIDTH) << endl; cout << "dim block: " << TILE_WIDTH << ", " << TILE_WIDTH << endl; double * M, *N, *P; initMatrices(&M, &N, &P, Width, Width); MatrixMulOnDevice(M, N, P, Width); printResultMatrix(&P, Width); freeMatrices(N, M, P); hipDeviceReset(); return 0; }
897570f8b279e3852fb745e4430c255ebd75930a.cu
#include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <iostream> #include <algorithm> #include <cstdio> #include <math.h> #include <unistd.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } #define TILE_WIDTH 32 #define SYSTEMTIME struct timespec #ifdef __INTELLISENSE__ void __syncthreads(); #endif inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d \n", cudaGetErrorString(code), file, line); if (abort){ system("pause"); exit(code); } } } using namespace std; // ------------------------------------ INIT, PRINT & FREE MATRICES; PRINT HEADERS & RESULTS; MAIN LOOP; void initMatrices(double ** pha, double ** phb, double ** phc, int m_ar, int m_br){ long int i, j; *pha = (double *)malloc((m_ar * m_br) * sizeof(double)); *phb = (double *)malloc((m_ar * m_br) * sizeof(double)); *phc = (double *)malloc((m_ar * m_ar) * sizeof(double)); for (i = 0; i< m_ar; i++) for (j = 0; j< m_br; j++) { (*pha)[i*m_br + j] = i == j ? (double)i : (double)0; } for (i = 0; i< m_br; i++) for (j = 0; j< m_ar; j++) { (*phb)[i*m_ar + j] = i == j ? (double)i : (double)0; } for (i = 0; i< m_ar; i++) for (j = 0; j< m_ar; j++) { (*phc)[i*m_ar + j] = 0; } } void freeMatrices(double * pha, double * phb, double * phc){ free(pha); free(phb); free(phc); } void printResultMatrix(double ** phc, int m_br) { int i, j; cout << endl << endl; cout << "Result matrix: " << endl; for (i = 0; i<10; i++) { for (j = 0; j<min(10, m_br); j++) cout << (*phc)[i*m_br + j] << " "; cout << "\n"; } cout << endl << endl; } // ------------------------------------ CUDA HEADERS int getSPcores(cudaDeviceProp devProp) { int cores = 0; int mp = devProp.multiProcessorCount; switch (devProp.major){ case 2: // Fermi if (devProp.minor == 1) cores = mp * 48; else cores = mp * 32; break; case 3: // Kepler cores = mp * 192; break; case 5: // Maxwell cores = mp * 128; break; default: printf("Unknown device type\n"); break; } return cores; } void DisplayHeader() { const int kb = 1024; const int mb = kb * kb; cout << "GPU" << endl << "=========" << endl; cout << "CUDA version: v" << CUDART_VERSION << endl; //wcout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION << endl << endl; int devCount; cudaGetDeviceCount(&devCount); cout << "CUDA Devices: " << endl << endl; for (int i = 0; i < devCount; i++) { cudaDeviceProp props; cudaGetDeviceProperties(&props, i); cout << i << ": " << props.name << ": " << props.major << "." << props.minor << endl; cout << " Global memory: " << props.totalGlobalMem / mb << "mb" << endl; cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" << endl; cout << " Constant memory: " << props.totalConstMem / kb << "kb" << endl; cout << " Block registers: " << props.regsPerBlock << endl << endl; cout << " Warp size: " << props.warpSize << endl; cout << " Number of cores: " << getSPcores(props) << endl; cout << " Threads per block: " << props.maxThreadsPerBlock << endl; cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << endl; cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << endl; cout << endl; } cout << "=========" << endl; } // ------------------------------------ STAT FUNCTIONS __global__ void MatrixMulKernelNaive(double * Md, double * Nd, double * Pd, int Width){ int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Y = by*TILE_WIDTH + ty; int X = bx*TILE_WIDTH + tx; double Pvalue = 0; for(int m = 0; m < Width; m++){ Pvalue += Md[X*Width + m]* Nd[m*Width + X]; } Pd[Y*Width + X] = Pvalue; } __global__ void MatrixMulKernel(double * Md, double * Nd, double * Pd, /*double * Td,*/ int Width){ __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Y = by*TILE_WIDTH + ty; int X = bx*TILE_WIDTH + tx; double Pvalue = 0; for (int m = 0; m < Width/TILE_WIDTH; ++m){ Mds[tx][ty] = Md[(m*TILE_WIDTH + tx)*Width + Y]; Nds[tx][ty] = Nd[X*Width + (m*TILE_WIDTH + ty)]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += Mds[tx][k] * Nds[k][ty]; __syncthreads(); } Pd[Y*Width + X] = Pvalue; __syncthreads(); if(tx == 0 && ty == 0){ } } // ------------------------------------ CUDA OPERATIONS PREPARATION void MatrixMulOnDevice(double * M, double * N, double * P, int Width){ long int size = Width * Width * sizeof(double); double * Md, *Nd, *Pd;// *Td; dim3 dimGrid(Width / TILE_WIDTH, Width / TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); // Allocate and Load M, N to device memory gpuErrchk(cudaMalloc(&Md, size)); gpuErrchk(cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&Nd, size)); gpuErrchk(cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice)); // Allocate P on the device gpuErrchk(cudaMalloc(&Pd, size)); // gpuErrchk(cudaMalloc(&Td, pow((float)Width / TILE_WIDTH, 2) ); SYSTEMTIME time1; if(clock_gettime(CLOCK_MONOTONIC,&time1) < 0) { perror("Failure getting process time (1)"); exit(EXIT_FAILURE); } // Kernel invocation MatrixMulKernel << <dimGrid, dimBlock >> >(Md, Nd, Pd, /*Td,*/ Width); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); SYSTEMTIME time2; if(clock_gettime(CLOCK_MONOTONIC,&time2) < 0) { perror("Failure getting process time (2)"); exit(EXIT_FAILURE); } double delta = (double)(time2.tv_sec - time1.tv_sec) + ((double)(time2.tv_nsec - time1.tv_nsec) / pow(10.f,9)); long long calcNrInsOp = ((long long)3*pow((float)Width,3)); double tPerformance = calcNrInsOp/(double)(delta* pow(10.f,9)); printf("Time : %3.3f seconds\n", delta); printf("T.Prfr: %3.3f GFLOPS\n", tPerformance); // Read P from the device gpuErrchk(cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost)); gpuErrchk(cudaDeviceSynchronize()); // Free device matrices cudaFree(Md); cudaFree(Nd); cudaFree(Pd); //cudaFree(Td); } int main(int argc, char *argv[]) { DisplayHeader(); int Width = argc > 1 ? atoi(argv[1]) : 1024; long int size = Width * Width * sizeof(double); cout << "Width: " << Width << endl; cout << "Global mem: " << (size * 3) / (1024 * 1024) << "mb" << endl; cout << "Shared mem: " << (TILE_WIDTH*TILE_WIDTH*sizeof(double) * 2) / (1024) << "kb" << endl; cout << "dim grid: " << (Width / TILE_WIDTH) << ", " << (Width / TILE_WIDTH) << endl; cout << "dim block: " << TILE_WIDTH << ", " << TILE_WIDTH << endl; double * M, *N, *P; initMatrices(&M, &N, &P, Width, Width); MatrixMulOnDevice(M, N, P, Width); printResultMatrix(&P, Width); freeMatrices(N, M, P); cudaDeviceReset(); return 0; }
6ee7a62fa1889cacc76770bdeef1fef0dffde249.hip
// !!! This is a file automatically generated by hipify!!! /** * For licensing, see ./LICENSE * * Currently, this code allows insertions and lookups, but not * deletions. The data structure is a simple trie, which provides an * O(log n) bound on insertions and lookups. Deletions wouldn't be * tricky to add. * * When performing a lookup, bit comparisons decide left/right * traversal from the head of the tree, and the prefix length defines * a maximum depth when inserting. The lookup function will traverse * the tree until it determines that no more specific match than the * best already found is possible. The code will replace all valid IP * addresses (according to inet_pton()) with the matching prefix, or * "NF" if there was no match. It will not attempt to match tokens * that are not prefixes, but will print them out in the output. * * The code reads the lines to convert from standard input; it reads a * list of prefixes from a file, specified by the "-f" parameter. The * prefix file should contain one prefix per line, with the prefix and * the netmask separated by a space. All output is sent to standard * output. */ #include "cuda-lookup.cuh" #include <stdio.h> //#include <math.h> #include <hip/hip_runtime.h> #ifdef USECUPRINTF #include "cuPrintf_hip.cuh" #if __CUDA_ARCH__ < 200 //Compute capability 1.x architectures #define CUPRINTF cuPrintf #else //Compute capability 2.x architectures #define CUPRINTF(fmt, ...) printf("[%d, %d]:\t" fmt, \ blockIdx.y*gridDim.x+blockIdx.x,\ threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x,\ __VA_ARGS__) #endif #endif #ifdef USEDEBUG int gpudebug = 1; #define DEBUG(...) do { if (gpudebug) fprintf(stdout, __VA_ARGS__); } while (0) #else int gpudebug = 0; #define DEBUG(...) (void)0 #endif //void cuda_lookup(struct iplookup_node *ilun, struct internal_node* n); //int cuda_lpm_lookup(struct lpm_tree* tree, struct iplookup_node *ilun); __device__ void cuda_lookup(struct iplookup_node *ilun, struct internal_node* n) { uint32_t b = MAX_BITS; struct internal_node* next = n; ilun->port = (uint16_t) 0; do { n = next; b--; //parent = (struct internal_node*)n; //uint32_t v_bit = ilun->ip & ((uint32_t)pow((double)2, (double)b)); uint32_t v_bit = ilun->ip & ((uint32_t)1 << b); /* If we've found an internal node, determine which direction to descend. */ if (v_bit) { //next = n->r; next = (struct internal_node*)((char*)n + n->r_offset); } else { //next = n->l; next = (struct internal_node*)((char*)n + n->l_offset); } if (n->type == DAT_NODE) { struct data_node* node = (struct data_node*)n; uint32_t mask = 0xFFFFFFFF; //mask = mask - ((uint32_t)pow((double)2, (double)(32 - node->netmask)) - 1); mask = mask - (((uint32_t)1 << (32 - node->netmask)) - 1); if ((ilun->ip & mask) == node->prefix) { ilun->port = node->port; } else { break; } } } while (next != n); //termination when offset is 0 and they are equal //} while (next != NULL); } __global__ void cuda_lpm_lookup(char* d_serializedtree, struct iplookup_node *ilun_array, uint32_t ilunarraysize) { //int i = threadIdx.x; uint16_t iterations = 0; int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= ilunarraysize) return; //cuda_lookup(&(ilun_array[i]), (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset)); struct iplookup_node *ilun = &(ilun_array[i]); struct internal_node* n = (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset); ilun->port = (uint16_t) 0; uint32_t b = MAX_BITS; struct internal_node* next = n; #ifdef USECUPRINTF CUPRINTF("root is:%p, loffset %d roffset %d\n", next, next->l_offset, next->r_offset); #endif do { n = next; b--; iterations++; //parent = (struct internal_node*)n; //uint32_t v_bit = ilun->ip & ((uint32_t)pow((double)2, (double)b)); uint32_t v_bit = ilun->ip & ((uint32_t)1 << b); #ifdef USECUPRINTF CUPRINTF("v_bit %u \t bits %u \t pow2 %u \t mypow %u\n", v_bit, b, (uint32_t)pow((double)2, (double)b), 1<<b); #endif /* If we've found an internal node, determine which direction to descend. */ if (v_bit) { //next = n->r; next = (struct internal_node*)((char*)n + n->r_offset); #ifdef USECUPRINTF CUPRINTF("next right is:%p loffset %d roffset %d\n", next, next->l_offset, next->r_offset); #endif } else { //next = n->l; next = (struct internal_node*)((char*)n + n->l_offset); #ifdef USECUPRINTF CUPRINTF("next left is:%p loffset %d roffset %d\n", next, next->l_offset, next->r_offset); #endif } if (n->type == DAT_NODE) { struct data_node* node = (struct data_node*)n; #ifdef USECUPRINTF CUPRINTF("data node found , iter %d\n", iterations); #endif uint32_t mask = 0xFFFFFFFF; //mask = mask - ((uint32_t)pow((double)2, (double)(32 - node->netmask)) - 1); mask = mask - (((uint32_t)1 << (32 - node->netmask)) - 1); if ((ilun->ip & mask) == node->prefix) { ilun->port = node->port; iterations *=100; } else { iterations *=10; break; } } else { //if(next==n) ilun->port = 0; } } while (next != n); //termination when offset is 0 and they are equal //} while (next != NULL); #ifdef USECUPRINTF CUPRINTF("abandoning , iter %d\n", iterations); #endif ilun->port2 = iterations; } void cuda_lpm_lookup_oncpu(char* d_serializedtree, struct iplookup_node *ilun_array, uint32_t ilunarraysize) { //int i = threadIdx.x; uint16_t iterations = 0; int i = 0; while(i < ilunarraysize) { if(i >= ilunarraysize) return; //cuda_lookup(&(ilun_array[i]), (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset)); struct iplookup_node *ilun = &(ilun_array[i]); struct internal_node* n = (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset); ilun->port = (uint16_t) 0; uint32_t b = MAX_BITS; struct internal_node* next = n; do { n = next; b--; iterations++; //parent = (struct internal_node*)n; //uint32_t v_bit = ilun->ip & ((uint32_t)pow((double)2, (double)b)); uint32_t v_bit = ilun->ip & ((uint32_t)1 << b); /* If we've found an internal node, determine which direction to descend. */ if (v_bit) { //next = n->r; next = (struct internal_node*)((char*)n + n->r_offset); } else { //next = n->l; next = (struct internal_node*)((char*)n + n->l_offset); } if (n->type == DAT_NODE) { struct data_node* node = (struct data_node*)n; uint32_t mask = 0xFFFFFFFF; //mask = mask - ((uint32_t)pow((double)2, (double)(32 - node->netmask)) - 1); mask = mask - (((uint32_t)1 << (32 - node->netmask)) - 1); if ((ilun->ip & mask) == node->prefix) { ilun->port = node->port; //iterations *=100; } else { //iterations *=10; break; } } else { //if(next==n) ilun->port = 0; } } while (next != n); //termination when offset is 0 and they are equal //} while (next != NULL); ilun->port2 = iterations; i++; } } /* lpm_lookup: * Perform a lookup. Given a string 'ip_string' convert to the * best-matching prefix if the string is a valid IPv4 address * (according to inet_pton), and store it in 'output' and return 1. If * no match is found, store the string "NF" in 'output' and return * 1. If 'ip_string' is not a valid IPv4 address, return 0, and * 'output' is not modified. */ //int lpm_lookup(struct lpm_tree* tree, char* ip_string, char* output) //int lpm_lookup(struct lpm_tree* tree, struct iplookup_node *ilun, char* ip_string, char* output) /* __global__ void cuda_lpm_lookup(char* d_serializedtree, struct iplookup_node *ilun_array, uint32_t ilunarraysize) { //int i = threadIdx.x; int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<ilunarraysize) cuda_lookup(&(ilun_array[i]), (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset)); //cuPrintf("here\n"); } */ __global__ void cuda_addonip(struct iplookup_node *ilun_array, uint32_t ilunarraysize) { //int i = threadIdx.x; int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<ilunarraysize) (&ilun_array[i])->ip += MAX_BITS; } char* _transfer_to_gpu(char *buffer, uint32_t size) { //allocate space on the device for the tree char *d_buffer = NULL; hipError_t err; err = hipMalloc((void **)&d_buffer, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate memory on GPU (error code %s)!\n", hipGetErrorString(err)); exit(-1); } //copy buffer on the device err = hipMemcpy(d_buffer, buffer, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy tree on the GPU (error code %s)!\n", hipGetErrorString(err)); exit(-2); } DEBUG("Transfer to device (%d bytes from %p) successful.\n", size, buffer); return d_buffer; } void* _transfer_to_host(char *d_buffer, char *buffer, uint32_t size) { hipError_t err; //memset(buffer, '\0', size); //copy device buffer to the host buffer err = hipMemcpy(buffer, d_buffer, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy device buffer to host (error code %s)!\n", hipGetErrorString(err)); exit(-4); } DEBUG("Transfer to host (%d bytes at %p) successful.\n", size, buffer); return (void*)buffer; } char* go_cuda(char *serializedtree, uint32_t treesize, struct iplookup_node *ilun_array, uint32_t ilunarraysize) { char *d_serializedtree = NULL; struct iplookup_node* d_ilun_array = NULL; hipError_t err; #ifdef USECUPRINTF cudaPrintfInit(); #endif //DEBUG("go_cuda received: treeser %p size %d ilun %p ilunarraysize %d\n", serializedtree, treesize, &(ilun_array[0]), ilunarraysize); if(serializedtree != NULL) // the idea is not to transfer if not needed, in that case take old pointer d_serializedtree = _transfer_to_gpu(serializedtree, treesize); d_ilun_array = (struct iplookup_node *)_transfer_to_gpu((char *)ilun_array, ilunarraysize * sizeof(struct iplookup_node) ); int threadsPerBlock = 256; int blocksPerGrid =(ilunarraysize + threadsPerBlock - 1) / threadsPerBlock; //int blocksPerGrid =1; printf("CUDA launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( cuda_lpm_lookup), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_serializedtree, d_ilun_array, ilunarraysize); //cuda_addonip<<<blocksPerGrid, threadsPerBlock>>>(d_ilun_array, ilunarraysize); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch lookup kernel (error code %s)!\n", hipGetErrorString(err)); exit(-3); } else printf("CUDA launch successful!\n"); //d_ilun_array->port += 1; _transfer_to_host((char *)d_ilun_array, (char *)ilun_array, ilunarraysize*sizeof(struct iplookup_node)); _transfer_to_host((char *)d_serializedtree, (char *)serializedtree, treesize); #ifdef USECUPRINTF cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif hipFree(d_serializedtree); hipFree(d_ilun_array); hipDeviceSynchronize(); hipDeviceReset(); //cuda_lpm_lookup_oncpu(serializedtree, ilun_array, ilunarraysize); //DEBUG("go_cuda delivers: treeser %p size %ud ilun %p ilunarraysize %ud (sizeof char %lu, sizeof char* %lu)\n", serializedtree, treesize, &(ilun_array[0]), ilunarraysize, sizeof(char), sizeof(char*)); //DEBUG("strustrure sizes: structtree=%d pointertree=%d structintnode=%d structdatanode=%d structilun=%d uint64=%d,uint32=%d, uint16=%d):\n", sizeof(struct lpm_tree),sizeof(struct lpm_tree*), sizeof(struct internal_node),sizeof(struct data_node),sizeof(struct iplookup_node), sizeof(uint64_t), sizeof(uint32_t), sizeof(uint16_t)); return serializedtree; }
6ee7a62fa1889cacc76770bdeef1fef0dffde249.cu
/** * For licensing, see ./LICENSE * * Currently, this code allows insertions and lookups, but not * deletions. The data structure is a simple trie, which provides an * O(log n) bound on insertions and lookups. Deletions wouldn't be * tricky to add. * * When performing a lookup, bit comparisons decide left/right * traversal from the head of the tree, and the prefix length defines * a maximum depth when inserting. The lookup function will traverse * the tree until it determines that no more specific match than the * best already found is possible. The code will replace all valid IP * addresses (according to inet_pton()) with the matching prefix, or * "NF" if there was no match. It will not attempt to match tokens * that are not prefixes, but will print them out in the output. * * The code reads the lines to convert from standard input; it reads a * list of prefixes from a file, specified by the "-f" parameter. The * prefix file should contain one prefix per line, with the prefix and * the netmask separated by a space. All output is sent to standard * output. */ #include "cuda-lookup.cuh" #include <stdio.h> //#include <math.h> #include <cuda_runtime.h> #ifdef USECUPRINTF #include "cuPrintf.cuh" #if __CUDA_ARCH__ < 200 //Compute capability 1.x architectures #define CUPRINTF cuPrintf #else //Compute capability 2.x architectures #define CUPRINTF(fmt, ...) printf("[%d, %d]:\t" fmt, \ blockIdx.y*gridDim.x+blockIdx.x,\ threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x,\ __VA_ARGS__) #endif #endif #ifdef USEDEBUG int gpudebug = 1; #define DEBUG(...) do { if (gpudebug) fprintf(stdout, __VA_ARGS__); } while (0) #else int gpudebug = 0; #define DEBUG(...) (void)0 #endif //void cuda_lookup(struct iplookup_node *ilun, struct internal_node* n); //int cuda_lpm_lookup(struct lpm_tree* tree, struct iplookup_node *ilun); __device__ void cuda_lookup(struct iplookup_node *ilun, struct internal_node* n) { uint32_t b = MAX_BITS; struct internal_node* next = n; ilun->port = (uint16_t) 0; do { n = next; b--; //parent = (struct internal_node*)n; //uint32_t v_bit = ilun->ip & ((uint32_t)pow((double)2, (double)b)); uint32_t v_bit = ilun->ip & ((uint32_t)1 << b); /* If we've found an internal node, determine which direction to descend. */ if (v_bit) { //next = n->r; next = (struct internal_node*)((char*)n + n->r_offset); } else { //next = n->l; next = (struct internal_node*)((char*)n + n->l_offset); } if (n->type == DAT_NODE) { struct data_node* node = (struct data_node*)n; uint32_t mask = 0xFFFFFFFF; //mask = mask - ((uint32_t)pow((double)2, (double)(32 - node->netmask)) - 1); mask = mask - (((uint32_t)1 << (32 - node->netmask)) - 1); if ((ilun->ip & mask) == node->prefix) { ilun->port = node->port; } else { break; } } } while (next != n); //termination when offset is 0 and they are equal //} while (next != NULL); } __global__ void cuda_lpm_lookup(char* d_serializedtree, struct iplookup_node *ilun_array, uint32_t ilunarraysize) { //int i = threadIdx.x; uint16_t iterations = 0; int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= ilunarraysize) return; //cuda_lookup(&(ilun_array[i]), (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset)); struct iplookup_node *ilun = &(ilun_array[i]); struct internal_node* n = (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset); ilun->port = (uint16_t) 0; uint32_t b = MAX_BITS; struct internal_node* next = n; #ifdef USECUPRINTF CUPRINTF("root is:%p, loffset %d roffset %d\n", next, next->l_offset, next->r_offset); #endif do { n = next; b--; iterations++; //parent = (struct internal_node*)n; //uint32_t v_bit = ilun->ip & ((uint32_t)pow((double)2, (double)b)); uint32_t v_bit = ilun->ip & ((uint32_t)1 << b); #ifdef USECUPRINTF CUPRINTF("v_bit %u \t bits %u \t pow2 %u \t mypow %u\n", v_bit, b, (uint32_t)pow((double)2, (double)b), 1<<b); #endif /* If we've found an internal node, determine which direction to descend. */ if (v_bit) { //next = n->r; next = (struct internal_node*)((char*)n + n->r_offset); #ifdef USECUPRINTF CUPRINTF("next right is:%p loffset %d roffset %d\n", next, next->l_offset, next->r_offset); #endif } else { //next = n->l; next = (struct internal_node*)((char*)n + n->l_offset); #ifdef USECUPRINTF CUPRINTF("next left is:%p loffset %d roffset %d\n", next, next->l_offset, next->r_offset); #endif } if (n->type == DAT_NODE) { struct data_node* node = (struct data_node*)n; #ifdef USECUPRINTF CUPRINTF("data node found , iter %d\n", iterations); #endif uint32_t mask = 0xFFFFFFFF; //mask = mask - ((uint32_t)pow((double)2, (double)(32 - node->netmask)) - 1); mask = mask - (((uint32_t)1 << (32 - node->netmask)) - 1); if ((ilun->ip & mask) == node->prefix) { ilun->port = node->port; iterations *=100; } else { iterations *=10; break; } } else { //if(next==n) ilun->port = 0; } } while (next != n); //termination when offset is 0 and they are equal //} while (next != NULL); #ifdef USECUPRINTF CUPRINTF("abandoning , iter %d\n", iterations); #endif ilun->port2 = iterations; } void cuda_lpm_lookup_oncpu(char* d_serializedtree, struct iplookup_node *ilun_array, uint32_t ilunarraysize) { //int i = threadIdx.x; uint16_t iterations = 0; int i = 0; while(i < ilunarraysize) { if(i >= ilunarraysize) return; //cuda_lookup(&(ilun_array[i]), (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset)); struct iplookup_node *ilun = &(ilun_array[i]); struct internal_node* n = (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset); ilun->port = (uint16_t) 0; uint32_t b = MAX_BITS; struct internal_node* next = n; do { n = next; b--; iterations++; //parent = (struct internal_node*)n; //uint32_t v_bit = ilun->ip & ((uint32_t)pow((double)2, (double)b)); uint32_t v_bit = ilun->ip & ((uint32_t)1 << b); /* If we've found an internal node, determine which direction to descend. */ if (v_bit) { //next = n->r; next = (struct internal_node*)((char*)n + n->r_offset); } else { //next = n->l; next = (struct internal_node*)((char*)n + n->l_offset); } if (n->type == DAT_NODE) { struct data_node* node = (struct data_node*)n; uint32_t mask = 0xFFFFFFFF; //mask = mask - ((uint32_t)pow((double)2, (double)(32 - node->netmask)) - 1); mask = mask - (((uint32_t)1 << (32 - node->netmask)) - 1); if ((ilun->ip & mask) == node->prefix) { ilun->port = node->port; //iterations *=100; } else { //iterations *=10; break; } } else { //if(next==n) ilun->port = 0; } } while (next != n); //termination when offset is 0 and they are equal //} while (next != NULL); ilun->port2 = iterations; i++; } } /* lpm_lookup: * Perform a lookup. Given a string 'ip_string' convert to the * best-matching prefix if the string is a valid IPv4 address * (according to inet_pton), and store it in 'output' and return 1. If * no match is found, store the string "NF" in 'output' and return * 1. If 'ip_string' is not a valid IPv4 address, return 0, and * 'output' is not modified. */ //int lpm_lookup(struct lpm_tree* tree, char* ip_string, char* output) //int lpm_lookup(struct lpm_tree* tree, struct iplookup_node *ilun, char* ip_string, char* output) /* __global__ void cuda_lpm_lookup(char* d_serializedtree, struct iplookup_node *ilun_array, uint32_t ilunarraysize) { //int i = threadIdx.x; int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<ilunarraysize) cuda_lookup(&(ilun_array[i]), (struct internal_node*)((char*)d_serializedtree + ((struct lpm_tree*)d_serializedtree)->h_offset)); //cuPrintf("here\n"); } */ __global__ void cuda_addonip(struct iplookup_node *ilun_array, uint32_t ilunarraysize) { //int i = threadIdx.x; int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<ilunarraysize) (&ilun_array[i])->ip += MAX_BITS; } char* _transfer_to_gpu(char *buffer, uint32_t size) { //allocate space on the device for the tree char *d_buffer = NULL; cudaError_t err; err = cudaMalloc((void **)&d_buffer, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate memory on GPU (error code %s)!\n", cudaGetErrorString(err)); exit(-1); } //copy buffer on the device err = cudaMemcpy(d_buffer, buffer, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy tree on the GPU (error code %s)!\n", cudaGetErrorString(err)); exit(-2); } DEBUG("Transfer to device (%d bytes from %p) successful.\n", size, buffer); return d_buffer; } void* _transfer_to_host(char *d_buffer, char *buffer, uint32_t size) { cudaError_t err; //memset(buffer, '\0', size); //copy device buffer to the host buffer err = cudaMemcpy(buffer, d_buffer, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy device buffer to host (error code %s)!\n", cudaGetErrorString(err)); exit(-4); } DEBUG("Transfer to host (%d bytes at %p) successful.\n", size, buffer); return (void*)buffer; } char* go_cuda(char *serializedtree, uint32_t treesize, struct iplookup_node *ilun_array, uint32_t ilunarraysize) { char *d_serializedtree = NULL; struct iplookup_node* d_ilun_array = NULL; cudaError_t err; #ifdef USECUPRINTF cudaPrintfInit(); #endif //DEBUG("go_cuda received: treeser %p size %d ilun %p ilunarraysize %d\n", serializedtree, treesize, &(ilun_array[0]), ilunarraysize); if(serializedtree != NULL) // the idea is not to transfer if not needed, in that case take old pointer d_serializedtree = _transfer_to_gpu(serializedtree, treesize); d_ilun_array = (struct iplookup_node *)_transfer_to_gpu((char *)ilun_array, ilunarraysize * sizeof(struct iplookup_node) ); int threadsPerBlock = 256; int blocksPerGrid =(ilunarraysize + threadsPerBlock - 1) / threadsPerBlock; //int blocksPerGrid =1; printf("CUDA launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); cuda_lpm_lookup<<<blocksPerGrid, threadsPerBlock>>>(d_serializedtree, d_ilun_array, ilunarraysize); //cuda_addonip<<<blocksPerGrid, threadsPerBlock>>>(d_ilun_array, ilunarraysize); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch lookup kernel (error code %s)!\n", cudaGetErrorString(err)); exit(-3); } else printf("CUDA launch successful!\n"); //d_ilun_array->port += 1; _transfer_to_host((char *)d_ilun_array, (char *)ilun_array, ilunarraysize*sizeof(struct iplookup_node)); _transfer_to_host((char *)d_serializedtree, (char *)serializedtree, treesize); #ifdef USECUPRINTF cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif cudaFree(d_serializedtree); cudaFree(d_ilun_array); cudaDeviceSynchronize(); cudaDeviceReset(); //cuda_lpm_lookup_oncpu(serializedtree, ilun_array, ilunarraysize); //DEBUG("go_cuda delivers: treeser %p size %ud ilun %p ilunarraysize %ud (sizeof char %lu, sizeof char* %lu)\n", serializedtree, treesize, &(ilun_array[0]), ilunarraysize, sizeof(char), sizeof(char*)); //DEBUG("strustrure sizes: structtree=%d pointertree=%d structintnode=%d structdatanode=%d structilun=%d uint64=%d,uint32=%d, uint16=%d):\n", sizeof(struct lpm_tree),sizeof(struct lpm_tree*), sizeof(struct internal_node),sizeof(struct data_node),sizeof(struct iplookup_node), sizeof(uint64_t), sizeof(uint32_t), sizeof(uint16_t)); return serializedtree; }
b3269ab7bdd2195338f6e992280a8ba456ff1f06.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void eq_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "eq_cuda", [&]() { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "eq_cuda", [&] { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a == b; }); }); }); } REGISTER_DISPATCH(eq_stub, &eq_kernel_cuda); }} // namespace at::native
b3269ab7bdd2195338f6e992280a8ba456ff1f06.cu
#include <ATen/Dispatch.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void eq_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "eq_cuda", [&]() { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "eq_cuda", [&] { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a == b; }); }); }); } REGISTER_DISPATCH(eq_stub, &eq_kernel_cuda); }} // namespace at::native
76ef9be8e5e07580a3b41ff58eed438aee82254b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: dnlebard #include "HarmonicAngleForceGPU.cuh" #include "hoomd/TextureTools.h" #include <assert.h> // SMALL a relatively small number #define SMALL Scalar(0.001) /*! \file HarmonicAngleForceGPU.cu \brief Defines GPU kernel code for calculating the harmonic angle forces. Used by HarmonicAngleForceComputeGPU. */ //! Texture for reading angle parameters scalar2_tex_t angle_params_tex; //! Kernel for calculating harmonic angle forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch Pitch of 2D virial array \param N number of particles \param d_pos device array of particle positions \param d_params Parameters for the angle force \param box Box dimensions for periodic boundary condition handling \param alist Angle data to use in calculating the forces \param pitch Pitch of 2D angles list \param n_angles_list List of numbers of angles stored on the GPU */ extern "C" __global__ void gpu_compute_harmonic_angle_forces_kernel(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const Scalar2 *d_params, BoxDim box, const group_storage<3> *alist, const unsigned int *apos_list, const unsigned int pitch, const unsigned int *n_angles_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_angles = n_angles_list[idx]; // read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); Scalar fab[3], fcb[3]; // initialize the virial to 0 Scalar virial[6]; for (int i = 0; i < 6; i++) virial[i] = Scalar(0.0); // loop over all angles for (int angle_idx = 0; angle_idx < n_angles; angle_idx++) { group_storage<3> cur_angle = alist[pitch*angle_idx + idx]; int cur_angle_x_idx = cur_angle.idx[0]; int cur_angle_y_idx = cur_angle.idx[1]; int cur_angle_type = cur_angle.idx[2]; int cur_angle_abc = apos_list[pitch*angle_idx + idx]; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_angle_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_angle_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); if (cur_angle_abc == 0) { a_pos = idx_pos; b_pos = x_pos; c_pos = y_pos; } if (cur_angle_abc == 1) { b_pos = idx_pos; a_pos = x_pos; c_pos = y_pos; } if (cur_angle_abc == 2) { c_pos = idx_pos; a_pos = x_pos; b_pos = y_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = a_pos - b_pos; Scalar3 dcb = c_pos - b_pos; Scalar3 dac = a_pos - c_pos; // apply periodic boundary conditions dab = box.minImage(dab); dcb = box.minImage(dcb); dac = box.minImage(dac); // get the angle parameters (MEM TRANSFER: 8 bytes) Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type); Scalar K = params.x; Scalar t_0 = params.y; Scalar rsqab = dot(dab, dab); Scalar rab = sqrtf(rsqab); Scalar rsqcb = dot(dcb, dcb); Scalar rcb = sqrtf(rsqcb); Scalar c_abbc = dot(dab, dcb); c_abbc /= rab*rcb; if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0); if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0); Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc); if (s_abbc < SMALL) s_abbc = SMALL; s_abbc = Scalar(1.0)/s_abbc; // actually calculate the force Scalar dth = fast::acos(c_abbc) - t_0; Scalar tk = K*dth; Scalar a = -Scalar(1.0) * tk * s_abbc; Scalar a11 = a*c_abbc/rsqab; Scalar a12 = -a / (rab*rcb); Scalar a22 = a*c_abbc / rsqcb; fab[0] = a11*dab.x + a12*dcb.x; fab[1] = a11*dab.y + a12*dcb.y; fab[2] = a11*dab.z + a12*dcb.z; fcb[0] = a22*dcb.x + a12*dab.x; fcb[1] = a22*dcb.y + a12*dab.y; fcb[2] = a22*dcb.z + a12*dab.z; // compute 1/3 of the energy, 1/3 for each atom in the angle Scalar angle_eng = tk*dth*Scalar(Scalar(1.0)/Scalar(6.0)); // upper triangular version of virial tensor Scalar angle_virial[6]; angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]); angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]); angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]); angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]); angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]); angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]); if (cur_angle_abc == 0) { force_idx.x += fab[0]; force_idx.y += fab[1]; force_idx.z += fab[2]; } if (cur_angle_abc == 1) { force_idx.x -= fab[0] + fcb[0]; force_idx.y -= fab[1] + fcb[1]; force_idx.z -= fab[2] + fcb[2]; } if (cur_angle_abc == 2) { force_idx.x += fcb[0]; force_idx.y += fcb[1]; force_idx.z += fcb[2]; } force_idx.w += angle_eng; for (int i = 0; i < 6; i++) virial[i] += angle_virial[i]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int i = 0; i < 6; i++) d_virial[i*virial_pitch+idx] = virial[i]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos device array of particle positions \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param atable List of angles stored on the GPU \param pitch Pitch of 2D angles list \param n_angles_list List of numbers of angles stored on the GPU \param d_params K and t_0 params packed as Scalar2 variables \param n_angle_types Number of angle types in d_params \param block_size Block size to use when performing calculations \param compute_capability Device compute capability (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize() \a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant and the y component contains t_0 the equilibrium angle. */ hipError_t gpu_compute_harmonic_angle_forces(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const BoxDim& box, const group_storage<3> *atable, const unsigned int *apos_list, const unsigned int pitch, const unsigned int *n_angles_list, Scalar2 *d_params, unsigned int n_angle_types, int block_size, const unsigned int compute_capability) { assert(d_params); static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_angle_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid( N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // bind the texture on pre sm 35 arches if (compute_capability < 350) { hipError_t error = hipBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types); if (error != hipSuccess) return error; } // run the kernel hipLaunchKernelGGL(( gpu_compute_harmonic_angle_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, d_params, box, atable, apos_list, pitch, n_angles_list); return hipSuccess; }
76ef9be8e5e07580a3b41ff58eed438aee82254b.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: dnlebard #include "HarmonicAngleForceGPU.cuh" #include "hoomd/TextureTools.h" #include <assert.h> // SMALL a relatively small number #define SMALL Scalar(0.001) /*! \file HarmonicAngleForceGPU.cu \brief Defines GPU kernel code for calculating the harmonic angle forces. Used by HarmonicAngleForceComputeGPU. */ //! Texture for reading angle parameters scalar2_tex_t angle_params_tex; //! Kernel for calculating harmonic angle forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch Pitch of 2D virial array \param N number of particles \param d_pos device array of particle positions \param d_params Parameters for the angle force \param box Box dimensions for periodic boundary condition handling \param alist Angle data to use in calculating the forces \param pitch Pitch of 2D angles list \param n_angles_list List of numbers of angles stored on the GPU */ extern "C" __global__ void gpu_compute_harmonic_angle_forces_kernel(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const Scalar2 *d_params, BoxDim box, const group_storage<3> *alist, const unsigned int *apos_list, const unsigned int pitch, const unsigned int *n_angles_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_angles = n_angles_list[idx]; // read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); Scalar fab[3], fcb[3]; // initialize the virial to 0 Scalar virial[6]; for (int i = 0; i < 6; i++) virial[i] = Scalar(0.0); // loop over all angles for (int angle_idx = 0; angle_idx < n_angles; angle_idx++) { group_storage<3> cur_angle = alist[pitch*angle_idx + idx]; int cur_angle_x_idx = cur_angle.idx[0]; int cur_angle_y_idx = cur_angle.idx[1]; int cur_angle_type = cur_angle.idx[2]; int cur_angle_abc = apos_list[pitch*angle_idx + idx]; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_angle_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_angle_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); if (cur_angle_abc == 0) { a_pos = idx_pos; b_pos = x_pos; c_pos = y_pos; } if (cur_angle_abc == 1) { b_pos = idx_pos; a_pos = x_pos; c_pos = y_pos; } if (cur_angle_abc == 2) { c_pos = idx_pos; a_pos = x_pos; b_pos = y_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = a_pos - b_pos; Scalar3 dcb = c_pos - b_pos; Scalar3 dac = a_pos - c_pos; // apply periodic boundary conditions dab = box.minImage(dab); dcb = box.minImage(dcb); dac = box.minImage(dac); // get the angle parameters (MEM TRANSFER: 8 bytes) Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type); Scalar K = params.x; Scalar t_0 = params.y; Scalar rsqab = dot(dab, dab); Scalar rab = sqrtf(rsqab); Scalar rsqcb = dot(dcb, dcb); Scalar rcb = sqrtf(rsqcb); Scalar c_abbc = dot(dab, dcb); c_abbc /= rab*rcb; if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0); if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0); Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc); if (s_abbc < SMALL) s_abbc = SMALL; s_abbc = Scalar(1.0)/s_abbc; // actually calculate the force Scalar dth = fast::acos(c_abbc) - t_0; Scalar tk = K*dth; Scalar a = -Scalar(1.0) * tk * s_abbc; Scalar a11 = a*c_abbc/rsqab; Scalar a12 = -a / (rab*rcb); Scalar a22 = a*c_abbc / rsqcb; fab[0] = a11*dab.x + a12*dcb.x; fab[1] = a11*dab.y + a12*dcb.y; fab[2] = a11*dab.z + a12*dcb.z; fcb[0] = a22*dcb.x + a12*dab.x; fcb[1] = a22*dcb.y + a12*dab.y; fcb[2] = a22*dcb.z + a12*dab.z; // compute 1/3 of the energy, 1/3 for each atom in the angle Scalar angle_eng = tk*dth*Scalar(Scalar(1.0)/Scalar(6.0)); // upper triangular version of virial tensor Scalar angle_virial[6]; angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]); angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]); angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]); angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]); angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]); angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]); if (cur_angle_abc == 0) { force_idx.x += fab[0]; force_idx.y += fab[1]; force_idx.z += fab[2]; } if (cur_angle_abc == 1) { force_idx.x -= fab[0] + fcb[0]; force_idx.y -= fab[1] + fcb[1]; force_idx.z -= fab[2] + fcb[2]; } if (cur_angle_abc == 2) { force_idx.x += fcb[0]; force_idx.y += fcb[1]; force_idx.z += fcb[2]; } force_idx.w += angle_eng; for (int i = 0; i < 6; i++) virial[i] += angle_virial[i]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int i = 0; i < 6; i++) d_virial[i*virial_pitch+idx] = virial[i]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos device array of particle positions \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param atable List of angles stored on the GPU \param pitch Pitch of 2D angles list \param n_angles_list List of numbers of angles stored on the GPU \param d_params K and t_0 params packed as Scalar2 variables \param n_angle_types Number of angle types in d_params \param block_size Block size to use when performing calculations \param compute_capability Device compute capability (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize() \a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant and the y component contains t_0 the equilibrium angle. */ cudaError_t gpu_compute_harmonic_angle_forces(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const BoxDim& box, const group_storage<3> *atable, const unsigned int *apos_list, const unsigned int pitch, const unsigned int *n_angles_list, Scalar2 *d_params, unsigned int n_angle_types, int block_size, const unsigned int compute_capability) { assert(d_params); static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_angle_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid( N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // bind the texture on pre sm 35 arches if (compute_capability < 350) { cudaError_t error = cudaBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types); if (error != cudaSuccess) return error; } // run the kernel gpu_compute_harmonic_angle_forces_kernel<<< grid, threads>>>(d_force, d_virial, virial_pitch, N, d_pos, d_params, box, atable, apos_list, pitch, n_angles_list); return cudaSuccess; }
5a4fc548f57e913e6d52d9d73eaa2b3a8e26912d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * main.cu * * Created on: Nov 14, 2019 * Author: cuda-s01 */ #include <stdio.h> const int TILE_WIDTH = 2; __global__ void matrixMultiplicationKernel(float* M, float* N, float* P, int Width) { // Calculate the row index of the P element and M int Row = blockIdx.y*blockDim.y+threadIdx.y; // Calculate the column index of P and N int Col = blockIdx.x*blockDim.x+threadIdx.x; __shared__ float sum_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float sum_N[TILE_WIDTH][TILE_WIDTH]; sum_M[threadIdx.y][threadIdx.x]=0.0; sum_N[threadIdx.y][threadIdx.x]=0.0; float Pval = 0; for(int k=0; k<((Width - 1)/TILE_WIDTH + 1); k++) { //printf("Col:%d, Row:%d, k:%d, th:(%d,%d), "); if(k*TILE_WIDTH + threadIdx.x < Width && Row < Width) sum_M[threadIdx.y][threadIdx.x] = M[Row*Width + k*TILE_WIDTH + threadIdx.x]; else sum_M[threadIdx.y][threadIdx.x] = 0.0; if(k*TILE_WIDTH + threadIdx.y < Width && Col < Width) sum_N[threadIdx.y][threadIdx.x] = N[(k*TILE_WIDTH + threadIdx.y)*Width + Col]; else sum_N[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for(int n=0; n<TILE_WIDTH;++n) Pval += sum_M[threadIdx.y][n] * sum_N[n][threadIdx.x]; __syncthreads(); } if(Row < Width && Col < Width) { P[Row * Width + Col] = Pval; //printf("(%d,%d)=%f\n",Row,Col,P[Row*Width+Col]); } } void matrixMultiplication(float *M, float *N, float *P, int Width){ // declare the number of blocks per grid and the number of threads per block int th = TILE_WIDTH; int bl = (Width/TILE_WIDTH) + 1; dim3 threadsPerBlock(th,th,1); dim3 blocksPerGrid(bl,bl,1); printf("Kernel started: (%d,%d,1) grid, (%d,%d,1) blocks.\n", bl,bl, th,th); hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, M, N, P, Width); } void PrintMatrix(float* M, int Width) { for(int i = 0; i < Width; i++) { for(int j = 0; j < Width; j++) printf("%f ",M[i*Width+j]); printf("\n"); } printf("\n"); } int main(void) { printf("Starting the program:\n"); hipError_t err = hipSuccess; int matrix_size = 8; int num_of_elements = matrix_size * matrix_size; size_t size = num_of_elements * sizeof(float); printf("matrix [%d x %d] multiplication.\n", matrix_size, matrix_size); //==========================Shared Memory============================================ //allocate matrixes on the device: printf("Started variables allocation for the device.\n"); printf("First matrix.\n"); float *M; err = hipMallocManaged((void**)&M, size); if(err != hipSuccess) { fprintf(stderr, "Failed to allocate M matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); printf("Second matrix.\n"); float *N; err = hipMallocManaged((void**)&N, size); if(err != hipSuccess) { fprintf(stderr, "Failed to allocate N matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); printf("Third matrix.\n"); float *P; err = hipMallocManaged((void**)&P, size); if(err != hipSuccess) { fprintf(stderr, "Failed to allocate P matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); //initialisation: for(int i=0; i<num_of_elements; i++) { M[i] = rand()/(float)RAND_MAX; N[i] = rand()/(float)RAND_MAX; } printf("Initialisation finished.\n"); //calculations: matrixMultiplication(M, N, P, matrix_size); err = hipGetLastError(); if(err != hipSuccess) { fprintf(stderr, "Failed to launch kernel. Error: %s.\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } else printf("Kerel operations successful.\n"); //==========================TEST=============================================== PrintMatrix(M, matrix_size); PrintMatrix(N, matrix_size); PrintMatrix(P, matrix_size); for(int i = 0; i < matrix_size; i++) { for(int j = 0; j < matrix_size; j++) { float tmp = 0; for(int k = 0; k < matrix_size; k++) tmp += M[i*matrix_size + k] * N[k*matrix_size + j]; //debug line: //printf("%f ",tmp); if(fabs(tmp - P[i*matrix_size + j]) > 1e-3) { fprintf(stderr, "Verification test failed.!\nElement at index (%d, %d) should be %f, but is %f. \n", i,j,tmp,P[i*matrix_size + j]); exit(EXIT_FAILURE); } } } printf("Test PASSED\n"); // Free device global memory err = hipFree(M); if (err != hipSuccess) { fprintf(stderr, "Failed to free device matrix M (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(N); if (err != hipSuccess) { fprintf(stderr, "Failed to free device matrix N (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(P); if (err != hipSuccess) { fprintf(stderr, "Failed to free device matrix P (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
5a4fc548f57e913e6d52d9d73eaa2b3a8e26912d.cu
/* * main.cu * * Created on: Nov 14, 2019 * Author: cuda-s01 */ #include <stdio.h> const int TILE_WIDTH = 2; __global__ void matrixMultiplicationKernel(float* M, float* N, float* P, int Width) { // Calculate the row index of the P element and M int Row = blockIdx.y*blockDim.y+threadIdx.y; // Calculate the column index of P and N int Col = blockIdx.x*blockDim.x+threadIdx.x; __shared__ float sum_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float sum_N[TILE_WIDTH][TILE_WIDTH]; sum_M[threadIdx.y][threadIdx.x]=0.0; sum_N[threadIdx.y][threadIdx.x]=0.0; float Pval = 0; for(int k=0; k<((Width - 1)/TILE_WIDTH + 1); k++) { //printf("Col:%d, Row:%d, k:%d, th:(%d,%d), "); if(k*TILE_WIDTH + threadIdx.x < Width && Row < Width) sum_M[threadIdx.y][threadIdx.x] = M[Row*Width + k*TILE_WIDTH + threadIdx.x]; else sum_M[threadIdx.y][threadIdx.x] = 0.0; if(k*TILE_WIDTH + threadIdx.y < Width && Col < Width) sum_N[threadIdx.y][threadIdx.x] = N[(k*TILE_WIDTH + threadIdx.y)*Width + Col]; else sum_N[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for(int n=0; n<TILE_WIDTH;++n) Pval += sum_M[threadIdx.y][n] * sum_N[n][threadIdx.x]; __syncthreads(); } if(Row < Width && Col < Width) { P[Row * Width + Col] = Pval; //printf("(%d,%d)=%f\n",Row,Col,P[Row*Width+Col]); } } void matrixMultiplication(float *M, float *N, float *P, int Width){ // declare the number of blocks per grid and the number of threads per block int th = TILE_WIDTH; int bl = (Width/TILE_WIDTH) + 1; dim3 threadsPerBlock(th,th,1); dim3 blocksPerGrid(bl,bl,1); printf("Kernel started: (%d,%d,1) grid, (%d,%d,1) blocks.\n", bl,bl, th,th); matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(M, N, P, Width); } void PrintMatrix(float* M, int Width) { for(int i = 0; i < Width; i++) { for(int j = 0; j < Width; j++) printf("%f ",M[i*Width+j]); printf("\n"); } printf("\n"); } int main(void) { printf("Starting the program:\n"); cudaError_t err = cudaSuccess; int matrix_size = 8; int num_of_elements = matrix_size * matrix_size; size_t size = num_of_elements * sizeof(float); printf("matrix [%d x %d] multiplication.\n", matrix_size, matrix_size); //==========================Shared Memory============================================ //allocate matrixes on the device: printf("Started variables allocation for the device.\n"); printf("First matrix.\n"); float *M; err = cudaMallocManaged((void**)&M, size); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate M matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); printf("Second matrix.\n"); float *N; err = cudaMallocManaged((void**)&N, size); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate N matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); printf("Third matrix.\n"); float *P; err = cudaMallocManaged((void**)&P, size); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate P matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); //initialisation: for(int i=0; i<num_of_elements; i++) { M[i] = rand()/(float)RAND_MAX; N[i] = rand()/(float)RAND_MAX; } printf("Initialisation finished.\n"); //calculations: matrixMultiplication(M, N, P, matrix_size); err = cudaGetLastError(); if(err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel. Error: %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else printf("Kerel operations successful.\n"); //==========================TEST=============================================== PrintMatrix(M, matrix_size); PrintMatrix(N, matrix_size); PrintMatrix(P, matrix_size); for(int i = 0; i < matrix_size; i++) { for(int j = 0; j < matrix_size; j++) { float tmp = 0; for(int k = 0; k < matrix_size; k++) tmp += M[i*matrix_size + k] * N[k*matrix_size + j]; //debug line: //printf("%f ",tmp); if(fabs(tmp - P[i*matrix_size + j]) > 1e-3) { fprintf(stderr, "Verification test failed.!\nElement at index (%d, %d) should be %f, but is %f. \n", i,j,tmp,P[i*matrix_size + j]); exit(EXIT_FAILURE); } } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(M); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device matrix M (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(N); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device matrix N (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(P); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device matrix P (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
a835324f79caf2e6a2e2b8e41fa1c82ab90d4aa7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { typedef struct { int e0; char* e1; } struct_Buffer_4844; typedef struct { struct_Buffer_4844 e0; struct_Buffer_4844 e1; int e2; int e3; } struct_image_4847; __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_17870(double*, struct_Buffer_4844, struct_image_4847, double*); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_17870(double* _17873_19351, struct_Buffer_4844 _17874_19352, struct_image_4847 _17875_19353, double* _17876_19354) { int _19360; int p_19360; int _19366; int p_19366; int _19372; int p_19372; int _19378; int p_19378; int _19384; int p_19384; int _19390; int p_19390; int _19415; int p_19415; double sum_19417; double psum_19417; int _19422; int p_19422; double sum_19424; double psum_19424; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19360 = blockIdx_x(); p_19360 = _19360; l19358: ; _19360 = p_19360; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19366 = blockDim_x(); p_19366 = _19366; l19364: ; _19366 = p_19366; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19372 = threadIdx_x(); p_19372 = _19372; l19370: ; _19372 = p_19372; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19378 = blockIdx_y(); p_19378 = _19378; l19376: ; _19378 = p_19378; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19384 = blockDim_y(); p_19384 = _19384; l19382: ; _19384 = p_19384; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19390 = threadIdx_y(); p_19390 = _19390; l19388: ; _19390 = p_19390; #line 11 "main.impala" int _19394; _19394 = _17875_19353.e2; #line 119 "gpu_device.impala" int _19391; _19391 = _19360 * _19366; #line 119 "gpu_device.impala" int gid_x_19392; gid_x_19392 = _19391 + _19372; #line 124 "gpu_device.impala" bool _19395; _19395 = gid_x_19392 < _19394; #line 124 "gpu_device.impala" if (_19395) goto l19396; else goto l19476; l19476: ; #line 127 "gpu_device.impala" goto l19475; l19396: ; #line 121 "gpu_device.impala" int _19397; _19397 = _19378 * _19384; #line 121 "gpu_device.impala" int gid_y_19398; gid_y_19398 = _19397 + _19390; #line 11 "main.impala" int _19400; _19400 = _17875_19353.e3; #line 124 "gpu_device.impala" bool _19401; _19401 = gid_y_19398 < _19400; #line 124 "gpu_device.impala" if (_19401) goto l19402; else goto l19474; l19474: ; #line 127 "gpu_device.impala" goto l19475; l19475: ; return ; l19402: ; #line 68 "gaussian.impala" bool _19404; _19404 = 3 <= gid_x_19392; #line 49 "gpu_device.impala" int _19459; _19459 = gid_y_19398 * _19394; #line 49 "gpu_device.impala" int _19460; _19460 = _19459 + gid_x_19392; #line 44 "gpu_device.impala" char* _19457; _19457 = _17874_19352.e1; #line 49 "gpu_device.impala" struct_Buffer_4844 _19439; _19439 = _17875_19353.e1; #line 44 "gpu_device.impala" double* _19458; union { double* dst; char* src; } u_19458; u_19458.src = _19457; _19458 = u_19458.dst; #line 44 "gpu_device.impala" double* _19461; _19461 = _19458 + _19460; #line 49 "gpu_device.impala" char* _19440; _19440 = _19439.e1; #line 49 "gpu_device.impala" double* _19441; union { double* dst; char* src; } u_19441; u_19441.src = _19440; _19441 = u_19441.dst; #line 68 "gaussian.impala" if (_19404) goto l19405; else goto l19473; l19473: ; #line 80 "gaussian.impala" goto l19465; l19405: ; #line 68 "gaussian.impala" int _19406; _19406 = _19394 - 3; #line 68 "gaussian.impala" bool _19407; _19407 = gid_x_19392 < _19406; #line 68 "gaussian.impala" if (_19407) goto l19408; else goto l19472; l19472: ; #line 80 "gaussian.impala" goto l19465; l19408: ; #line 68 "gaussian.impala" bool _19409; _19409 = 3 <= gid_y_19398; #line 68 "gaussian.impala" if (_19409) goto l19410; else goto l19471; l19471: ; #line 80 "gaussian.impala" goto l19465; l19410: ; #line 68 "gaussian.impala" int _19411; _19411 = _19400 - 3; #line 68 "gaussian.impala" bool _19412; _19412 = gid_y_19398 < _19411; #line 68 "gaussian.impala" if (_19412) goto l19413; else goto l19464; l19464: ; #line 80 "gaussian.impala" goto l19465; l19465: ; #line 49 "gpu_device.impala" double* _19466; _19466 = _19441 + _19460; #line 49 "gpu_device.impala" double _19467; _19467 = *_19466; #line 49 "gpu_device.impala" double _19469; _19469 = _19467; #line 44 "gpu_device.impala" *_19461 = _19469; return ; l19413: ; #line 18 "gpu_device.impala" p_19415 = -3; psum_19417 = 0.000000e+00; goto l19414; l19414: ; _19415 = p_19415; sum_19417 = psum_19417; #line 18 "gpu_device.impala" bool _19419; _19419 = _19415 < 4; #line 18 "gpu_device.impala" if (_19419) goto l19420; else goto l19456; l19456: ; #line 44 "gpu_device.impala" *_19461 = sum_19417; return ; l19420: ; #line 73 "gaussian.impala" int _19444; _19444 = gid_x_19392 + _19415; #line 18 "gpu_device.impala" p_19422 = -3; psum_19424 = sum_19417; goto l19421; l19421: ; _19422 = p_19422; sum_19424 = psum_19424; #line 18 "gpu_device.impala" bool _19425; _19425 = _19422 < 4; #line 18 "gpu_device.impala" if (_19425) goto l19426; else goto l19453; l19453: ; #line 22 "gpu_device.impala" int _19454; _19454 = 1 + _19415; #line 18 "gpu_device.impala" p_19415 = _19454; psum_19417 = sum_19424; goto l19414; l19426: ; #line 73 "gaussian.impala" int _19442; _19442 = gid_y_19398 + _19422; #line 73 "gaussian.impala" int _19430; _19430 = 3 + _19422; #line 22 "gpu_device.impala" int _19428; _19428 = 1 + _19422; #line 59 "gpu_device.impala" int _19431; _19431 = 7 * _19430; #line 49 "gpu_device.impala" int _19443; _19443 = _19442 * _19394; #line 59 "gpu_device.impala" int _19432; _19432 = _19431 + _19415; #line 49 "gpu_device.impala" int _19445; _19445 = _19443 + _19444; #line 59 "gpu_device.impala" int _19433; _19433 = 3 + _19432; #line 49 "gpu_device.impala" double* _19446; _19446 = _19441 + _19445; #line 59 "gpu_device.impala" double* _19434; _19434 = _17876_19354 + _19433; #line 59 "gpu_device.impala" double _19435; _19435 = *_19434; #line 59 "gpu_device.impala" double _19449; _19449 = _19435; #line 49 "gpu_device.impala" double _19447; _19447 = *_19446; #line 49 "gpu_device.impala" double _19450; _19450 = _19447; #line 73 "gaussian.impala" double _19451; _19451 = _19449 * _19450; #line 73 "gaussian.impala" double _19452; _19452 = sum_19424 + _19451; #line 18 "gpu_device.impala" p_19422 = _19428; psum_19424 = _19452; goto l19421; } }
a835324f79caf2e6a2e2b8e41fa1c82ab90d4aa7.cu
extern "C" { typedef struct { int e0; char* e1; } struct_Buffer_4844; typedef struct { struct_Buffer_4844 e0; struct_Buffer_4844 e1; int e2; int e3; } struct_image_4847; __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_17870(double*, struct_Buffer_4844, struct_image_4847, double*); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_17870(double* _17873_19351, struct_Buffer_4844 _17874_19352, struct_image_4847 _17875_19353, double* _17876_19354) { int _19360; int p_19360; int _19366; int p_19366; int _19372; int p_19372; int _19378; int p_19378; int _19384; int p_19384; int _19390; int p_19390; int _19415; int p_19415; double sum_19417; double psum_19417; int _19422; int p_19422; double sum_19424; double psum_19424; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19360 = blockIdx_x(); p_19360 = _19360; l19358: ; _19360 = p_19360; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19366 = blockDim_x(); p_19366 = _19366; l19364: ; _19366 = p_19366; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19372 = threadIdx_x(); p_19372 = _19372; l19370: ; _19372 = p_19372; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19378 = blockIdx_y(); p_19378 = _19378; l19376: ; _19378 = p_19378; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19384 = blockDim_y(); p_19384 = _19384; l19382: ; _19384 = p_19384; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _19390 = threadIdx_y(); p_19390 = _19390; l19388: ; _19390 = p_19390; #line 11 "main.impala" int _19394; _19394 = _17875_19353.e2; #line 119 "gpu_device.impala" int _19391; _19391 = _19360 * _19366; #line 119 "gpu_device.impala" int gid_x_19392; gid_x_19392 = _19391 + _19372; #line 124 "gpu_device.impala" bool _19395; _19395 = gid_x_19392 < _19394; #line 124 "gpu_device.impala" if (_19395) goto l19396; else goto l19476; l19476: ; #line 127 "gpu_device.impala" goto l19475; l19396: ; #line 121 "gpu_device.impala" int _19397; _19397 = _19378 * _19384; #line 121 "gpu_device.impala" int gid_y_19398; gid_y_19398 = _19397 + _19390; #line 11 "main.impala" int _19400; _19400 = _17875_19353.e3; #line 124 "gpu_device.impala" bool _19401; _19401 = gid_y_19398 < _19400; #line 124 "gpu_device.impala" if (_19401) goto l19402; else goto l19474; l19474: ; #line 127 "gpu_device.impala" goto l19475; l19475: ; return ; l19402: ; #line 68 "gaussian.impala" bool _19404; _19404 = 3 <= gid_x_19392; #line 49 "gpu_device.impala" int _19459; _19459 = gid_y_19398 * _19394; #line 49 "gpu_device.impala" int _19460; _19460 = _19459 + gid_x_19392; #line 44 "gpu_device.impala" char* _19457; _19457 = _17874_19352.e1; #line 49 "gpu_device.impala" struct_Buffer_4844 _19439; _19439 = _17875_19353.e1; #line 44 "gpu_device.impala" double* _19458; union { double* dst; char* src; } u_19458; u_19458.src = _19457; _19458 = u_19458.dst; #line 44 "gpu_device.impala" double* _19461; _19461 = _19458 + _19460; #line 49 "gpu_device.impala" char* _19440; _19440 = _19439.e1; #line 49 "gpu_device.impala" double* _19441; union { double* dst; char* src; } u_19441; u_19441.src = _19440; _19441 = u_19441.dst; #line 68 "gaussian.impala" if (_19404) goto l19405; else goto l19473; l19473: ; #line 80 "gaussian.impala" goto l19465; l19405: ; #line 68 "gaussian.impala" int _19406; _19406 = _19394 - 3; #line 68 "gaussian.impala" bool _19407; _19407 = gid_x_19392 < _19406; #line 68 "gaussian.impala" if (_19407) goto l19408; else goto l19472; l19472: ; #line 80 "gaussian.impala" goto l19465; l19408: ; #line 68 "gaussian.impala" bool _19409; _19409 = 3 <= gid_y_19398; #line 68 "gaussian.impala" if (_19409) goto l19410; else goto l19471; l19471: ; #line 80 "gaussian.impala" goto l19465; l19410: ; #line 68 "gaussian.impala" int _19411; _19411 = _19400 - 3; #line 68 "gaussian.impala" bool _19412; _19412 = gid_y_19398 < _19411; #line 68 "gaussian.impala" if (_19412) goto l19413; else goto l19464; l19464: ; #line 80 "gaussian.impala" goto l19465; l19465: ; #line 49 "gpu_device.impala" double* _19466; _19466 = _19441 + _19460; #line 49 "gpu_device.impala" double _19467; _19467 = *_19466; #line 49 "gpu_device.impala" double _19469; _19469 = _19467; #line 44 "gpu_device.impala" *_19461 = _19469; return ; l19413: ; #line 18 "gpu_device.impala" p_19415 = -3; psum_19417 = 0.000000e+00; goto l19414; l19414: ; _19415 = p_19415; sum_19417 = psum_19417; #line 18 "gpu_device.impala" bool _19419; _19419 = _19415 < 4; #line 18 "gpu_device.impala" if (_19419) goto l19420; else goto l19456; l19456: ; #line 44 "gpu_device.impala" *_19461 = sum_19417; return ; l19420: ; #line 73 "gaussian.impala" int _19444; _19444 = gid_x_19392 + _19415; #line 18 "gpu_device.impala" p_19422 = -3; psum_19424 = sum_19417; goto l19421; l19421: ; _19422 = p_19422; sum_19424 = psum_19424; #line 18 "gpu_device.impala" bool _19425; _19425 = _19422 < 4; #line 18 "gpu_device.impala" if (_19425) goto l19426; else goto l19453; l19453: ; #line 22 "gpu_device.impala" int _19454; _19454 = 1 + _19415; #line 18 "gpu_device.impala" p_19415 = _19454; psum_19417 = sum_19424; goto l19414; l19426: ; #line 73 "gaussian.impala" int _19442; _19442 = gid_y_19398 + _19422; #line 73 "gaussian.impala" int _19430; _19430 = 3 + _19422; #line 22 "gpu_device.impala" int _19428; _19428 = 1 + _19422; #line 59 "gpu_device.impala" int _19431; _19431 = 7 * _19430; #line 49 "gpu_device.impala" int _19443; _19443 = _19442 * _19394; #line 59 "gpu_device.impala" int _19432; _19432 = _19431 + _19415; #line 49 "gpu_device.impala" int _19445; _19445 = _19443 + _19444; #line 59 "gpu_device.impala" int _19433; _19433 = 3 + _19432; #line 49 "gpu_device.impala" double* _19446; _19446 = _19441 + _19445; #line 59 "gpu_device.impala" double* _19434; _19434 = _17876_19354 + _19433; #line 59 "gpu_device.impala" double _19435; _19435 = *_19434; #line 59 "gpu_device.impala" double _19449; _19449 = _19435; #line 49 "gpu_device.impala" double _19447; _19447 = *_19446; #line 49 "gpu_device.impala" double _19450; _19450 = _19447; #line 73 "gaussian.impala" double _19451; _19451 = _19449 * _19450; #line 73 "gaussian.impala" double _19452; _19452 = sum_19424 + _19451; #line 18 "gpu_device.impala" p_19422 = _19428; psum_19424 = _19452; goto l19421; } }
af051a5558477761ea94ec3f93f80d96b409332c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "timer.hpp" #include <algorithm> #include <iostream> #include <stdio.h> #include <vector> #include <iostream> __global__ void shuffleKernel(const int* x, int* y, const int N) { int sum = 0; int maxSum = 0; int sqrSum = 0; int maxMod = 0; int min = x[0]; int max = 0; int zeros = 0; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < N; tid += gridDim.x * blockDim.x) { if (tid < N) { // this if is actually important for when N is smaller than gridsize*blocksize int val = x[tid]; sum += val; maxSum += std::abs(val); sqrSum += val*val; maxMod = std::abs(val) > maxMod ? val : maxMod; min = val < min ? val : min; max = val > max ? val :max; zeros += val == 0 ? 1 : 0; } } int tid = threadIdx.x; for (int i = warpSize / 2; i != 0; i /= 2) { sum += __shfl_down_sync(0xffffffff, sum, i); maxSum += __shfl_down_sync(0xffffffff, maxSum, i); sqrSum += __shfl_down_sync(0xffffffff, sqrSum, i); int temporary = __shfl_down_sync(0xffffffff, maxMod, i); maxMod = temporary > maxMod ? temporary : maxMod; temporary = __shfl_down_sync(0xffffffff, min, i); min = temporary < min ? temporary : min; temporary = __shfl_down_sync(0xffffffff, max, i); max = temporary > max ? temporary : max; zeros += __shfl_down_sync(0xffffffff, zeros, i); } __syncthreads(); if (tid % warpSize == 0) { atomicAdd(y, sum); atomicAdd(y+1, maxSum); atomicAdd(y+2, sqrSum); atomicMax(y+3, maxMod); atomicMin(y+4, min); atomicMax(y+5, max); atomicAdd(y+6, zeros); } } template <typename T> void printContainer(T container, int N) { for (int i = 0; i < N; i++) { std::cout << container[i] << " | "; } } int main() { Timer timer; int N = 1000; int *x = (int *)malloc(sizeof(int) * N); int *y = (int *)malloc(sizeof(int) * 7); for (int i = 0; i < N; i++) { x[i] = i - N/2; } int *cuda_x; int *cuda_y; hipMalloc(&cuda_x, sizeof(int) * N); hipMalloc(&cuda_y, sizeof(int) * 7); hipMemcpy(cuda_x, x, sizeof(int) * N, hipMemcpyHostToDevice); std::vector<double> timings; for(int reps=0; reps < 10; ++reps) { timer.reset(); hipLaunchKernelGGL(( shuffleKernel), dim3(N/256), dim3(128), 0, 0, cuda_x, cuda_y, N); hipMemcpy(y, cuda_y, sizeof(int) * 7, hipMemcpyDeviceToHost); timings.push_back(timer.get()); } std::sort(timings.begin(), timings.end()); double time_elapsed = timings[10/2]; std::cout << "Time elapsed: " << time_elapsed << std::endl << std::endl; return EXIT_SUCCESS; }
af051a5558477761ea94ec3f93f80d96b409332c.cu
#include "timer.hpp" #include <algorithm> #include <iostream> #include <stdio.h> #include <vector> #include <iostream> __global__ void shuffleKernel(const int* x, int* y, const int N) { int sum = 0; int maxSum = 0; int sqrSum = 0; int maxMod = 0; int min = x[0]; int max = 0; int zeros = 0; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < N; tid += gridDim.x * blockDim.x) { if (tid < N) { // this if is actually important for when N is smaller than gridsize*blocksize int val = x[tid]; sum += val; maxSum += std::abs(val); sqrSum += val*val; maxMod = std::abs(val) > maxMod ? val : maxMod; min = val < min ? val : min; max = val > max ? val :max; zeros += val == 0 ? 1 : 0; } } int tid = threadIdx.x; for (int i = warpSize / 2; i != 0; i /= 2) { sum += __shfl_down_sync(0xffffffff, sum, i); maxSum += __shfl_down_sync(0xffffffff, maxSum, i); sqrSum += __shfl_down_sync(0xffffffff, sqrSum, i); int temporary = __shfl_down_sync(0xffffffff, maxMod, i); maxMod = temporary > maxMod ? temporary : maxMod; temporary = __shfl_down_sync(0xffffffff, min, i); min = temporary < min ? temporary : min; temporary = __shfl_down_sync(0xffffffff, max, i); max = temporary > max ? temporary : max; zeros += __shfl_down_sync(0xffffffff, zeros, i); } __syncthreads(); if (tid % warpSize == 0) { atomicAdd(y, sum); atomicAdd(y+1, maxSum); atomicAdd(y+2, sqrSum); atomicMax(y+3, maxMod); atomicMin(y+4, min); atomicMax(y+5, max); atomicAdd(y+6, zeros); } } template <typename T> void printContainer(T container, int N) { for (int i = 0; i < N; i++) { std::cout << container[i] << " | "; } } int main() { Timer timer; int N = 1000; int *x = (int *)malloc(sizeof(int) * N); int *y = (int *)malloc(sizeof(int) * 7); for (int i = 0; i < N; i++) { x[i] = i - N/2; } int *cuda_x; int *cuda_y; cudaMalloc(&cuda_x, sizeof(int) * N); cudaMalloc(&cuda_y, sizeof(int) * 7); cudaMemcpy(cuda_x, x, sizeof(int) * N, cudaMemcpyHostToDevice); std::vector<double> timings; for(int reps=0; reps < 10; ++reps) { timer.reset(); shuffleKernel<<<N/256, 128>>>(cuda_x, cuda_y, N); cudaMemcpy(y, cuda_y, sizeof(int) * 7, cudaMemcpyDeviceToHost); timings.push_back(timer.get()); } std::sort(timings.begin(), timings.end()); double time_elapsed = timings[10/2]; std::cout << "Time elapsed: " << time_elapsed << std::endl << std::endl; return EXIT_SUCCESS; }
5f856560013382a7d328a5c0f43ae2cf2442a87b.hip
// !!! This is a file automatically generated by hipify!!! /* Includes, system */ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define N 1023 /* DEVICE CODE */ __global__ void suma_2_enteros(int *d1, int *d2, int *sum){ int idBloque = blockIdx.y * gridDim.x + blockIdx.x; int idThread = idBloque * blockDim.z * blockDim.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; if (idThread < N){ sum[idThread] = d1[idThread] + d2[idThread];} } /* HOST CODE*/ int main(int argc, char** argv) { int DeviceCount = 0,i; int *h_d1,*h_d2,*h_sum; int *d_d1,*d_d2,*d_sum; dim3 dimGrid(8,2); dim3 dimBlock(8,4,2); h_d1 = (int*)malloc(N * sizeof(h_d1[0])); h_d2 = (int*)malloc(N * sizeof(h_d2[0])); h_sum = (int*)malloc(N * sizeof(h_sum[0])); for (i=0;i<N;i++){h_d1[i]=i;h_d2[i]=10*i;h_sum[i]=0;} /* Initialize CUDA */ if (hipInit(0) != 0){ printf("ERROR de inicializacion\n"); exit(0); } hipGetDeviceCount(&DeviceCount); if (DeviceCount == 0){ printf("ERROR ningun dispositivo soporta CUDA\n"); exit(0); } hipMalloc((void**)&d_d1,N*sizeof(d_d1));hipMemset(d_d1,0,N*sizeof(d_d1)); hipMalloc((void**)&d_d2,N*sizeof(d_d2));hipMemset(d_d2,0,N*sizeof(d_d2)); hipMalloc((void**)&d_sum,N*sizeof(d_sum));hipMemset(d_sum,0,N*sizeof(d_sum)); hipMemcpy(d_d1,h_d1,N*sizeof(h_d1[0]),hipMemcpyHostToDevice); hipMemcpy(d_d2,h_d2,N*sizeof(h_d2[0]),hipMemcpyHostToDevice); hipLaunchKernelGGL(( suma_2_enteros), dim3(dimGrid),dim3(dimBlock), 0, 0, d_d1,d_d2,d_sum); hipMemcpy(h_sum,d_sum,N*sizeof(h_sum[0]),hipMemcpyDeviceToHost); for (i=1020;i<1024;i++) printf("Resultado: %d \n",h_sum[i]); hipFree(d_d1);hipFree(d_d2);hipFree(d_sum); }
5f856560013382a7d328a5c0f43ae2cf2442a87b.cu
/* Includes, system */ #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #define N 1023 /* DEVICE CODE */ __global__ void suma_2_enteros(int *d1, int *d2, int *sum){ int idBloque = blockIdx.y * gridDim.x + blockIdx.x; int idThread = idBloque * blockDim.z * blockDim.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; if (idThread < N){ sum[idThread] = d1[idThread] + d2[idThread];} } /* HOST CODE*/ int main(int argc, char** argv) { int DeviceCount = 0,i; int *h_d1,*h_d2,*h_sum; int *d_d1,*d_d2,*d_sum; dim3 dimGrid(8,2); dim3 dimBlock(8,4,2); h_d1 = (int*)malloc(N * sizeof(h_d1[0])); h_d2 = (int*)malloc(N * sizeof(h_d2[0])); h_sum = (int*)malloc(N * sizeof(h_sum[0])); for (i=0;i<N;i++){h_d1[i]=i;h_d2[i]=10*i;h_sum[i]=0;} /* Initialize CUDA */ if (cuInit(0) != 0){ printf("ERROR de inicializacion\n"); exit(0); } cuDeviceGetCount(&DeviceCount); if (DeviceCount == 0){ printf("ERROR ningun dispositivo soporta CUDA\n"); exit(0); } cudaMalloc((void**)&d_d1,N*sizeof(d_d1));cudaMemset(d_d1,0,N*sizeof(d_d1)); cudaMalloc((void**)&d_d2,N*sizeof(d_d2));cudaMemset(d_d2,0,N*sizeof(d_d2)); cudaMalloc((void**)&d_sum,N*sizeof(d_sum));cudaMemset(d_sum,0,N*sizeof(d_sum)); cudaMemcpy(d_d1,h_d1,N*sizeof(h_d1[0]),cudaMemcpyHostToDevice); cudaMemcpy(d_d2,h_d2,N*sizeof(h_d2[0]),cudaMemcpyHostToDevice); suma_2_enteros<<<dimGrid,dimBlock>>>(d_d1,d_d2,d_sum); cudaMemcpy(h_sum,d_sum,N*sizeof(h_sum[0]),cudaMemcpyDeviceToHost); for (i=1020;i<1024;i++) printf("Resultado: %d \n",h_sum[i]); cudaFree(d_d1);cudaFree(d_d2);cudaFree(d_sum); }
15cce884b1a07aca1a2538dc98b1d8019a88cf4d.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA functions for ray-voxel intersection based projection * * This file has the necesary fucntiosn to perform X-ray CBCT projection * operation given a geaometry, angles and image. It usesthe so-called * Jacobs algorithm to compute efficiently the length of the x-rays over * voxel space. * * CODE by Ander Biguri * * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "Siddon_projection.hpp" #include "mex.h" #include <math.h> #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\ } \ } while (0) // Declare the texture reference. texture<float, hipTextureType3D , hipReadModeElementType> tex; #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ __global__ void kernelPixelDetector( Geometry geo, float* detector, Point3D source , Point3D deltaU, Point3D deltaV, Point3D uvOrigin){ // size_t idx = threadIdx.x + blockIdx.x * blockDim.x; unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) | (y>= geo.nDetecV)) return; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-y-1; int pixelU = x; Point3D pixel1D; pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); /////// // Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516 ////// // Also called Jacobs algorithms Point3D ray; // vector of Xray ray.x=pixel1D.x-source.x; ray.y=pixel1D.y-source.y; ray.z=pixel1D.z-source.z; // This variables are ommited because // bx,by,bz ={0,0,0} // dx,dy,dz ={1,1,1} // compute parameter values for x-ray parametric equation. eq(3-10) float axm,aym,azm; float axM,ayM,azM; // In the paper Nx= number of X planes-> Nvoxel+1 axm=min(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x); aym=min(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y); azm=min(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z); axM=max(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x); ayM=max(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y); azM=max(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z); float am=max(max(axm,aym),azm); float aM=min(min(axM,ayM),azM); // line intersects voxel space -> am<aM if (am>=aM) detector[idx]=0; // Compute max/min image INDEX for intersection eq(11-19) // Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version float imin,imax,jmin,jmax,kmin,kmax; // for X if( source.x<pixel1D.x){ imin=(am==axm)? 1 : ceil (source.x+am*ray.x); imax=(aM==axM)? geo.nVoxelX : floor(source.x+aM*ray.x); }else{ imax=(am==axm)? geo.nVoxelX-1 : floor(source.x+am*ray.x); imin=(aM==axM)? 0 : ceil (source.x+aM*ray.x); } // for Y if( source.y<pixel1D.y){ jmin=(am==aym)? 1 : ceil (source.y+am*ray.y); jmax=(aM==ayM)? geo.nVoxelY : floor(source.y+aM*ray.y); }else{ jmax=(am==aym)? geo.nVoxelY-1 : floor(source.y+am*ray.y); jmin=(aM==ayM)? 0 : ceil (source.y+aM*ray.y); } // for Z if( source.z<pixel1D.z){ kmin=(am==azm)? 1 : ceil (source.z+am*ray.z); kmax=(aM==azM)? geo.nVoxelZ : floor(source.z+aM*ray.z); }else{ kmax=(am==azm)? geo.nVoxelZ-1 : floor(source.z+am*ray.z); kmin=(aM==azM)? 0 : ceil (source.z+aM*ray.z); } // get intersection point N1. eq(20-21) [(also eq 9-10)] float ax,ay,az; ax=(source.x<pixel1D.x)? (imin-source.x)/ray.x : (imax-source.x)/ray.x; ay=(source.y<pixel1D.y)? (jmin-source.y)/ray.y : (jmax-source.y)/ray.y; az=(source.z<pixel1D.z)? (kmin-source.z)/ray.z : (kmax-source.z)/ray.z; // get index of first intersection. eq (26) and (19) int i,j,k; float aminc=min(min(ax,ay),az); i=(int)floor(source.x+ (aminc+am)/2*ray.x); j=(int)floor(source.y+ (aminc+am)/2*ray.y); k=(int)floor(source.z+ (aminc+am)/2*ray.z); // Initialize float ac=am; //eq (28), unit alphas float axu,ayu,azu; axu=1/abs(ray.x); ayu=1/abs(ray.y); azu=1/abs(ray.z); // eq(29), direction of update float iu,ju,ku; iu=(source.x< pixel1D.x)? 1 : -1; ju=(source.y< pixel1D.y)? 1 : -1; ku=(source.z< pixel1D.z)? 1 : -1; float maxlength=sqrt(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ); float sum=0; unsigned int Np=(imax-imin+1)+(jmax-jmin+1)+(kmax-kmin+1); // Number of intersections // Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed for (unsigned int ii=0;ii<Np;ii++){ if (ax==aminc){ sum+=(ax-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5); i=i+iu; ac=ax; ax+=axu; }else if(ay==aminc){ sum+=(ay-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5); j=j+ju; ac=ay; ay+=ayu; }else if(az==aminc){ sum+=(az-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5); k=k+ku; ac=az; az+=azu; } aminc=min(min(ax,ay),az); } detector[idx]=sum*maxlength; } int siddon_ray_projection(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){ //DONE, Tesla found // copy data to CUDA memory hipArray *d_imagedata = 0; const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipMalloc3DArray(&d_imagedata, &channelDesc, extent); cudaCheckErrors("hipMalloc3D error 3D tex"); hipMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_hipPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_imagedata; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); cudaCheckErrors("hipMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = hipFilterModePoint; //we dont want interpolation tex.addressMode[0] = hipAddressModeBorder; tex.addressMode[1] = hipAddressModeBorder; tex.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex, d_imagedata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); //Done! Image put into texture memory. size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float); float* dProjection; hipMalloc((void**)&dProjection, num_bytes); hipMemset(dProjection,0,num_bytes); cudaCheckErrors("hipMalloc fail"); bool timekernel=false; // For debuggin purposes hipEvent_t start, stop; float elapsedTime; Point3D source, deltaU, deltaV, uvOrigin; // 16x16 gave the best performance empirically // Funnily that makes it compatible with most GPUs..... int divU,divV; divU=16; divV=16; dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1); dim3 block(divU,divV,1); for (unsigned int i=0;i<nalpha;i++){ geo.alpha=alphas[i]; //precomute distances for faster execution //Precompute per angle constant stuff for speed computeDeltas_Siddon(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source); //Ray tracing! if (timekernel){ hipEventCreate(&start); hipEventRecord(start,0); } hipLaunchKernelGGL(( kernelPixelDetector), dim3(grid),dim3(block), 0, 0, geo,dProjection, source, deltaU, deltaV, uvOrigin); cudaCheckErrors("Kernel fail"); if (timekernel){ hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); } // copy result to host hipMemcpy(result[i], dProjection, num_bytes, hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy fail"); } hipUnbindTexture(tex); cudaCheckErrors("Unbind fail"); hipFree(dProjection); hipFreeArray(d_imagedata); cudaCheckErrors("hipFree d_imagedata fail"); hipDeviceReset(); return 0; } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas_Siddon(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO; S.y=0; S.z=0; //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: // Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours. // The obkjective is to get a position of the detector in a coordinate system where: // 1-units are voxel size (in each direction can be different) // 2-The image has the its first voxel at (0,0,0) // 3-The image never rotates // To do that, we need to compute the "deltas" the detector, or "by how much // (in new xyz) does the voxels change when and index is added". To do that // several geometric steps needs to be changed //1.Roll,pitch,jaw // The detector can have a small rotation. // according to //"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706. // Only the Z rotation will have a big influence in the image quality when they are small. // Still all rotations are supported // To roll pitch jaw, the detector has to be in centered in OXYZ. P.x=0;Pu0.x=0;Pv0.x=0; // Roll pitch yaw rollPitchYaw(geo,i,&P); rollPitchYaw(geo,i,&Pu0); rollPitchYaw(geo,i,&Pv0); //Now ltes translate the points where they shoudl be: P.x=P.x-(geo.DSD-geo.DSO); Pu0.x=Pu0.x-(geo.DSD-geo.DSO); Pv0.x=Pv0.x-(geo.DSD-geo.DSO); //1: Offset detector //P.x P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i]; Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i]; Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i]; //S doesnt need to chagne //3: Rotate (around z)! Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z; Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z; Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z; Point3D S2; S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha); S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha); S2.z=S.z; //2: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2; S2.x =S2.x+geo.sVoxelX/2; S2.y =S2.y+geo.sVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ; //mexPrintf("COR: %f \n",geo.COR[i]); //5. apply COR. Wherever everything was, now its offesetd by a bit float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S2.x+=CORx; S2.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S2; } #ifndef PROJECTION_HPP float maxDistanceCubeXY(Geometry geo, float alpha,int i){ /////////// // Compute initial "t" so we access safely as less as out of bounds as possible. ////////// float maxCubX,maxCubY; // Forgetting Z, compute max distance: diagonal+offset maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX; maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY; return geo.DSO/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY); } void rollPitchYaw(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->z=-sin(geo.dPitch[i])*auxPoint.x +cos(geo.dPitch[1])*sin(geo.dYaw[i])*auxPoint.y +cos(geo.dPitch[1])*cos(geo.dYaw[i])*auxPoint.z; } #endif
15cce884b1a07aca1a2538dc98b1d8019a88cf4d.cu
/*------------------------------------------------------------------------- * * CUDA functions for ray-voxel intersection based projection * * This file has the necesary fucntiosn to perform X-ray CBCT projection * operation given a geaometry, angles and image. It usesthe so-called * Jacobs algorithm to compute efficiently the length of the x-rays over * voxel space. * * CODE by Ander Biguri * * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "Siddon_projection.hpp" #include "mex.h" #include <math.h> #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\ } \ } while (0) // Declare the texture reference. texture<float, cudaTextureType3D , cudaReadModeElementType> tex; #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ __global__ void kernelPixelDetector( Geometry geo, float* detector, Point3D source , Point3D deltaU, Point3D deltaV, Point3D uvOrigin){ // size_t idx = threadIdx.x + blockIdx.x * blockDim.x; unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) | (y>= geo.nDetecV)) return; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-y-1; int pixelU = x; Point3D pixel1D; pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); /////// // Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516 ////// // Also called Jacobs algorithms Point3D ray; // vector of Xray ray.x=pixel1D.x-source.x; ray.y=pixel1D.y-source.y; ray.z=pixel1D.z-source.z; // This variables are ommited because // bx,by,bz ={0,0,0} // dx,dy,dz ={1,1,1} // compute parameter values for x-ray parametric equation. eq(3-10) float axm,aym,azm; float axM,ayM,azM; // In the paper Nx= number of X planes-> Nvoxel+1 axm=min(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x); aym=min(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y); azm=min(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z); axM=max(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x); ayM=max(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y); azM=max(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z); float am=max(max(axm,aym),azm); float aM=min(min(axM,ayM),azM); // line intersects voxel space -> am<aM if (am>=aM) detector[idx]=0; // Compute max/min image INDEX for intersection eq(11-19) // Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version float imin,imax,jmin,jmax,kmin,kmax; // for X if( source.x<pixel1D.x){ imin=(am==axm)? 1 : ceil (source.x+am*ray.x); imax=(aM==axM)? geo.nVoxelX : floor(source.x+aM*ray.x); }else{ imax=(am==axm)? geo.nVoxelX-1 : floor(source.x+am*ray.x); imin=(aM==axM)? 0 : ceil (source.x+aM*ray.x); } // for Y if( source.y<pixel1D.y){ jmin=(am==aym)? 1 : ceil (source.y+am*ray.y); jmax=(aM==ayM)? geo.nVoxelY : floor(source.y+aM*ray.y); }else{ jmax=(am==aym)? geo.nVoxelY-1 : floor(source.y+am*ray.y); jmin=(aM==ayM)? 0 : ceil (source.y+aM*ray.y); } // for Z if( source.z<pixel1D.z){ kmin=(am==azm)? 1 : ceil (source.z+am*ray.z); kmax=(aM==azM)? geo.nVoxelZ : floor(source.z+aM*ray.z); }else{ kmax=(am==azm)? geo.nVoxelZ-1 : floor(source.z+am*ray.z); kmin=(aM==azM)? 0 : ceil (source.z+aM*ray.z); } // get intersection point N1. eq(20-21) [(also eq 9-10)] float ax,ay,az; ax=(source.x<pixel1D.x)? (imin-source.x)/ray.x : (imax-source.x)/ray.x; ay=(source.y<pixel1D.y)? (jmin-source.y)/ray.y : (jmax-source.y)/ray.y; az=(source.z<pixel1D.z)? (kmin-source.z)/ray.z : (kmax-source.z)/ray.z; // get index of first intersection. eq (26) and (19) int i,j,k; float aminc=min(min(ax,ay),az); i=(int)floor(source.x+ (aminc+am)/2*ray.x); j=(int)floor(source.y+ (aminc+am)/2*ray.y); k=(int)floor(source.z+ (aminc+am)/2*ray.z); // Initialize float ac=am; //eq (28), unit alphas float axu,ayu,azu; axu=1/abs(ray.x); ayu=1/abs(ray.y); azu=1/abs(ray.z); // eq(29), direction of update float iu,ju,ku; iu=(source.x< pixel1D.x)? 1 : -1; ju=(source.y< pixel1D.y)? 1 : -1; ku=(source.z< pixel1D.z)? 1 : -1; float maxlength=sqrt(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ); float sum=0; unsigned int Np=(imax-imin+1)+(jmax-jmin+1)+(kmax-kmin+1); // Number of intersections // Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed for (unsigned int ii=0;ii<Np;ii++){ if (ax==aminc){ sum+=(ax-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5); i=i+iu; ac=ax; ax+=axu; }else if(ay==aminc){ sum+=(ay-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5); j=j+ju; ac=ay; ay+=ayu; }else if(az==aminc){ sum+=(az-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5); k=k+ku; ac=az; az+=azu; } aminc=min(min(ax,ay),az); } detector[idx]=sum*maxlength; } int siddon_ray_projection(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){ //DONE, Tesla found // copy data to CUDA memory cudaArray *d_imagedata = 0; const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaMalloc3DArray(&d_imagedata, &channelDesc, extent); cudaCheckErrors("cudaMalloc3D error 3D tex"); cudaMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_cudaPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_imagedata; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); cudaCheckErrors("cudaMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = cudaFilterModePoint; //we dont want interpolation tex.addressMode[0] = cudaAddressModeBorder; tex.addressMode[1] = cudaAddressModeBorder; tex.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex, d_imagedata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); //Done! Image put into texture memory. size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float); float* dProjection; cudaMalloc((void**)&dProjection, num_bytes); cudaMemset(dProjection,0,num_bytes); cudaCheckErrors("cudaMalloc fail"); bool timekernel=false; // For debuggin purposes cudaEvent_t start, stop; float elapsedTime; Point3D source, deltaU, deltaV, uvOrigin; // 16x16 gave the best performance empirically // Funnily that makes it compatible with most GPUs..... int divU,divV; divU=16; divV=16; dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1); dim3 block(divU,divV,1); for (unsigned int i=0;i<nalpha;i++){ geo.alpha=alphas[i]; //precomute distances for faster execution //Precompute per angle constant stuff for speed computeDeltas_Siddon(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source); //Ray tracing! if (timekernel){ cudaEventCreate(&start); cudaEventRecord(start,0); } kernelPixelDetector<<<grid,block>>>(geo,dProjection, source, deltaU, deltaV, uvOrigin); cudaCheckErrors("Kernel fail"); if (timekernel){ cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); } // copy result to host cudaMemcpy(result[i], dProjection, num_bytes, cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy fail"); } cudaUnbindTexture(tex); cudaCheckErrors("Unbind fail"); cudaFree(dProjection); cudaFreeArray(d_imagedata); cudaCheckErrors("cudaFree d_imagedata fail"); cudaDeviceReset(); return 0; } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas_Siddon(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO; S.y=0; S.z=0; //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: // Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours. // The obkjective is to get a position of the detector in a coordinate system where: // 1-units are voxel size (in each direction can be different) // 2-The image has the its first voxel at (0,0,0) // 3-The image never rotates // To do that, we need to compute the "deltas" the detector, or "by how much // (in new xyz) does the voxels change when and index is added". To do that // several geometric steps needs to be changed //1.Roll,pitch,jaw // The detector can have a small rotation. // according to //"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706. // Only the Z rotation will have a big influence in the image quality when they are small. // Still all rotations are supported // To roll pitch jaw, the detector has to be in centered in OXYZ. P.x=0;Pu0.x=0;Pv0.x=0; // Roll pitch yaw rollPitchYaw(geo,i,&P); rollPitchYaw(geo,i,&Pu0); rollPitchYaw(geo,i,&Pv0); //Now ltes translate the points where they shoudl be: P.x=P.x-(geo.DSD-geo.DSO); Pu0.x=Pu0.x-(geo.DSD-geo.DSO); Pv0.x=Pv0.x-(geo.DSD-geo.DSO); //1: Offset detector //P.x P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i]; Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i]; Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i]; //S doesnt need to chagne //3: Rotate (around z)! Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z; Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z; Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z; Point3D S2; S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha); S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha); S2.z=S.z; //2: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2; S2.x =S2.x+geo.sVoxelX/2; S2.y =S2.y+geo.sVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ; //mexPrintf("COR: %f \n",geo.COR[i]); //5. apply COR. Wherever everything was, now its offesetd by a bit float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S2.x+=CORx; S2.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S2; } #ifndef PROJECTION_HPP float maxDistanceCubeXY(Geometry geo, float alpha,int i){ /////////// // Compute initial "t" so we access safely as less as out of bounds as possible. ////////// float maxCubX,maxCubY; // Forgetting Z, compute max distance: diagonal+offset maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX; maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY; return geo.DSO/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY); } void rollPitchYaw(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->z=-sin(geo.dPitch[i])*auxPoint.x +cos(geo.dPitch[1])*sin(geo.dYaw[i])*auxPoint.y +cos(geo.dPitch[1])*cos(geo.dYaw[i])*auxPoint.z; } #endif
f81d35e60303c89ef41265b920bceb22f5e91849.hip
// !!! This is a file automatically generated by hipify!!! #include <sys/time.h> #include <hip/hip_runtime.h> #include "utils.h" #define CUCHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __device__ unsigned char clamp(float value, int min, int max) { if(value < min) return 0; else if(value > max) return max; else return value; } __global__ void RGBToYCBCR(const unsigned char *pixels, float *ycbcrimg, unsigned char *yimg, int width, int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { /* const float KB = 0.0593; const float KR = 0.2627; const float KG = 1 - KB - KR; unsigned char R = pixels[(y * width + x) * 3 + 0]; unsigned char G = pixels[(y * width + x) * 3 + 1]; unsigned char B = pixels[(y * width + x) * 3 + 2]; float Y = KR * R + (1 - KR - KB) * G + KB * B; float CB = (B - Y) / (1 + KR + KG - KB); float CR = (R - Y) / (1 - KR + KG + KB); yimg[y * width + x] = clamp(Y, 0, 255); ycbcrimg[(y * width + x)*3 + 0] = Y; ycbcrimg[(y * width + x)*3 + 1] = CB; ycbcrimg[(y * width + x)*3 + 2] = CR; */ unsigned char R = pixels[(y * width + x) * 3 + 0]; unsigned char G = pixels[(y * width + x) * 3 + 1]; unsigned char B = pixels[(y * width + x) * 3 + 2]; float Y = 0.2627f * R + 0.678f * G + 0.0593f * B; float CB = (B - Y) / 1.8814f; float CR = (R - Y) / 1.4746f; yimg[y * width + x] = clamp(Y, 0, 255); //ycbcrimg[(y * width + x)*3 + 0] = Y; ycbcrimg[(y * width + x)*3 + 1] = CB; ycbcrimg[(y * width + x)*3 + 2] = CR; } } __global__ void YCBCRToRGB(const float *ycbcrimg, unsigned char *yimg, float *lut, unsigned char *pixels, int width, int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { /* const float KB = 0.0593; const float KR = 0.2627; const float KG = 1 - KB - KR; float Y = lut[yimg[y * width + x]]; float CB = ycbcrimg[(y * width + x) * 3 + 1]; float CR = ycbcrimg[(y * width + x) * 3 + 2]; unsigned char R = clamp((Y + CR * (1 - KR + KG + KB)), 0, 255); unsigned char B = clamp((Y + CB * (1 + KR + KG - KB)), 0, 255); unsigned char G = clamp((Y - KR * R - KB * B) / (1 - KB - KR), 0, 255); yimg[y * width + x] = Y; pixels[(y * width + x) * 3 + 0] = R; pixels[(y * width + x) * 3 + 1] = G; pixels[(y * width + x) * 3 + 2] = B; */ float Y = lut[yimg[y * width + x]]; float CB = ycbcrimg[(y * width + x) * 3 + 1]; float CR = ycbcrimg[(y * width + x) * 3 + 2]; unsigned char R = clamp(Y + CR * 1.4746f, 0, 255); unsigned char B = clamp(Y + CB * 1.8814f, 0, 255); unsigned char G = clamp((Y - 0.2627f * R - 0.0593f * B) / 0.678, 0, 255); pixels[(y * width + x) * 3 + 0] = R; pixels[(y * width + x) * 3 + 1] = G; pixels[(y * width + x) * 3 + 2] = B; } } __global__ void histogram(unsigned char *yimg, unsigned int *yhist, int width, int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { atomicAdd(&yhist[yimg[y * width + x]], 1); } } int main(int argc, char **argv) { struct timeval start, last, now, computation; gettimeofday(&start, 0); if(argc < 2) { printf("usage: %s image\n", argv[0]); return 0; } ilInit(); ilEnable(IL_ORIGIN_SET); ilEnable(IL_FILE_OVERWRITE); ILboolean result = ilLoadImage(argv[1]); if(!result) { ILenum err = ilGetError() ; printf("Failed to load %s\n", argv[1]); printf("Error: %s\n", ilGetString(err)); } ilConvertImage(IL_RGB, IL_UNSIGNED_BYTE); ilOriginFunc(IL_ORIGIN_UPPER_LEFT); ILuint width = ilGetInteger(IL_IMAGE_WIDTH); ILuint height = ilGetInteger(IL_IMAGE_HEIGHT); unsigned int size = width * height; ILubyte *ILpixels = ilGetData(); gettimeofday(&now, 0); computation = last = now; unsigned char *pixels = 0; hipHostMalloc((void**)&pixels, size * 3 * sizeof(unsigned char)); memcpy(pixels, ILpixels, size * 3 * sizeof(unsigned char)); printf("Image (%d * %d) loaded in %f\n", width, height, get_time(start, now)); unsigned char *eqpixels = (unsigned char*)malloc(size * 3 * sizeof(unsigned char)); printf("Size: %d * %d\n", width, height); const int nStreams = 4; int streamSize = (height / nStreams) * width; hipStream_t streams[nStreams]; for(int i=0; i<nStreams; i++) CUCHECK(hipStreamCreate(&streams[i])); ///================================================================= unsigned char *d_pixels = 0, *d_eqpixels = 0; unsigned char *d_yimg = 0; unsigned int *d_yhist = 0; float *d_ycbcrimg = 0; float *d_lut = 0; CUCHECK(hipMalloc((void**)&d_pixels, size * 3 * sizeof(unsigned char))); CUCHECK(hipMalloc((void**)&d_ycbcrimg, size * 3 * sizeof(float))); CUCHECK(hipMalloc((void**)&d_yimg, size * sizeof(unsigned char))); CUCHECK(hipMalloc((void**)&d_yhist, 256 * sizeof(unsigned int))); CUCHECK(hipMalloc((void**)&d_lut, 256 * sizeof(float))); hipMemset(d_yhist, 0, 256 * sizeof(unsigned int)); // RGB -> YCBCR dim3 blockDim(16, 16, 1); dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height / nStreams + blockDim.y - 1) / blockDim.y, 1); for(int i=0; i<nStreams; i++) { int offset = i * streamSize; CUCHECK(hipMemcpyAsync(&d_pixels[offset * 3], &pixels[offset * 3], streamSize * 3 * sizeof(unsigned char), hipMemcpyHostToDevice, streams[i])); } for(int i=0; i<nStreams; i++) { int offset = i * streamSize; hipLaunchKernelGGL(( RGBToYCBCR), dim3(gridDim), dim3(blockDim), 0, streams[i], &d_pixels[offset * 3], &d_ycbcrimg[offset * 3], &d_yimg[offset], width, height / nStreams); } for(int i=0; i<nStreams; i++) { int offset = i * streamSize; hipLaunchKernelGGL(( histogram), dim3(gridDim), dim3(blockDim), 0, streams[i], &d_yimg[offset], d_yhist, width, height / nStreams); } hipFree(d_pixels); for(int i=0; i<nStreams; i++) CUCHECK(hipStreamDestroy(streams[i])); hipDeviceSynchronize(); gettimeofday(&now, 0); printf("\tHistogram computed in %f\n", get_time(last, now)); last = now; unsigned int *yhist = (unsigned int*)malloc(256 * sizeof(unsigned int)); hipMemcpy(yhist, d_yhist, 256 * sizeof(unsigned int), hipMemcpyDeviceToHost); // Equalization float ylut[256]; double ysum = 0; for(unsigned int i = 0; i < 256; ++i) { ysum += (float)yhist[i] / size; ylut[i] = ysum * 255; } hipMemcpy(d_lut, ylut, 256 * sizeof(float), hipMemcpyHostToDevice); free(yhist); hipDeviceSynchronize(); gettimeofday(&now, 0); printf("\tEqualization computed in %f\n", get_time(last, now)); last = now; CUCHECK(hipMalloc((void**)&d_eqpixels, size * 3 * sizeof(unsigned char))); // YCBCR -> RGB blockDim = dim3(16, 16, 1); gridDim = dim3((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1); hipLaunchKernelGGL(( YCBCRToRGB), dim3(gridDim), dim3(blockDim), 0, 0, d_ycbcrimg, d_yimg, d_lut, d_eqpixels, width, height); hipMemcpy(eqpixels, d_eqpixels, size * 3 * sizeof(unsigned char), hipMemcpyDeviceToHost); hipFree(d_eqpixels); hipFree(d_ycbcrimg); hipFree(d_yimg); hipFree(d_yhist); hipFree(d_lut); hipDeviceSynchronize(); gettimeofday(&now, 0); printf("\tYCBCR -> RGb in %f\n", get_time(last, now)); printf("Equalization computed in %f\n", get_time(computation, now)); last = now; ///================================================================= // Save images save_image("histo.jpg", eqpixels, width, height); free(eqpixels); gettimeofday(&now, 0); printf("Result saved in %f\n", get_time(last, now)); printf("Total time %f\n", get_time(start, now)); return 0; }
f81d35e60303c89ef41265b920bceb22f5e91849.cu
#include <sys/time.h> #include <cuda_runtime.h> #include "utils.h" #define CUCHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __device__ unsigned char clamp(float value, int min, int max) { if(value < min) return 0; else if(value > max) return max; else return value; } __global__ void RGBToYCBCR(const unsigned char *pixels, float *ycbcrimg, unsigned char *yimg, int width, int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { /* const float KB = 0.0593; const float KR = 0.2627; const float KG = 1 - KB - KR; unsigned char R = pixels[(y * width + x) * 3 + 0]; unsigned char G = pixels[(y * width + x) * 3 + 1]; unsigned char B = pixels[(y * width + x) * 3 + 2]; float Y = KR * R + (1 - KR - KB) * G + KB * B; float CB = (B - Y) / (1 + KR + KG - KB); float CR = (R - Y) / (1 - KR + KG + KB); yimg[y * width + x] = clamp(Y, 0, 255); ycbcrimg[(y * width + x)*3 + 0] = Y; ycbcrimg[(y * width + x)*3 + 1] = CB; ycbcrimg[(y * width + x)*3 + 2] = CR; */ unsigned char R = pixels[(y * width + x) * 3 + 0]; unsigned char G = pixels[(y * width + x) * 3 + 1]; unsigned char B = pixels[(y * width + x) * 3 + 2]; float Y = 0.2627f * R + 0.678f * G + 0.0593f * B; float CB = (B - Y) / 1.8814f; float CR = (R - Y) / 1.4746f; yimg[y * width + x] = clamp(Y, 0, 255); //ycbcrimg[(y * width + x)*3 + 0] = Y; ycbcrimg[(y * width + x)*3 + 1] = CB; ycbcrimg[(y * width + x)*3 + 2] = CR; } } __global__ void YCBCRToRGB(const float *ycbcrimg, unsigned char *yimg, float *lut, unsigned char *pixels, int width, int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { /* const float KB = 0.0593; const float KR = 0.2627; const float KG = 1 - KB - KR; float Y = lut[yimg[y * width + x]]; float CB = ycbcrimg[(y * width + x) * 3 + 1]; float CR = ycbcrimg[(y * width + x) * 3 + 2]; unsigned char R = clamp((Y + CR * (1 - KR + KG + KB)), 0, 255); unsigned char B = clamp((Y + CB * (1 + KR + KG - KB)), 0, 255); unsigned char G = clamp((Y - KR * R - KB * B) / (1 - KB - KR), 0, 255); yimg[y * width + x] = Y; pixels[(y * width + x) * 3 + 0] = R; pixels[(y * width + x) * 3 + 1] = G; pixels[(y * width + x) * 3 + 2] = B; */ float Y = lut[yimg[y * width + x]]; float CB = ycbcrimg[(y * width + x) * 3 + 1]; float CR = ycbcrimg[(y * width + x) * 3 + 2]; unsigned char R = clamp(Y + CR * 1.4746f, 0, 255); unsigned char B = clamp(Y + CB * 1.8814f, 0, 255); unsigned char G = clamp((Y - 0.2627f * R - 0.0593f * B) / 0.678, 0, 255); pixels[(y * width + x) * 3 + 0] = R; pixels[(y * width + x) * 3 + 1] = G; pixels[(y * width + x) * 3 + 2] = B; } } __global__ void histogram(unsigned char *yimg, unsigned int *yhist, int width, int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { atomicAdd(&yhist[yimg[y * width + x]], 1); } } int main(int argc, char **argv) { struct timeval start, last, now, computation; gettimeofday(&start, 0); if(argc < 2) { printf("usage: %s image\n", argv[0]); return 0; } ilInit(); ilEnable(IL_ORIGIN_SET); ilEnable(IL_FILE_OVERWRITE); ILboolean result = ilLoadImage(argv[1]); if(!result) { ILenum err = ilGetError() ; printf("Failed to load %s\n", argv[1]); printf("Error: %s\n", ilGetString(err)); } ilConvertImage(IL_RGB, IL_UNSIGNED_BYTE); ilOriginFunc(IL_ORIGIN_UPPER_LEFT); ILuint width = ilGetInteger(IL_IMAGE_WIDTH); ILuint height = ilGetInteger(IL_IMAGE_HEIGHT); unsigned int size = width * height; ILubyte *ILpixels = ilGetData(); gettimeofday(&now, 0); computation = last = now; unsigned char *pixels = 0; cudaMallocHost((void**)&pixels, size * 3 * sizeof(unsigned char)); memcpy(pixels, ILpixels, size * 3 * sizeof(unsigned char)); printf("Image (%d * %d) loaded in %f\n", width, height, get_time(start, now)); unsigned char *eqpixels = (unsigned char*)malloc(size * 3 * sizeof(unsigned char)); printf("Size: %d * %d\n", width, height); const int nStreams = 4; int streamSize = (height / nStreams) * width; cudaStream_t streams[nStreams]; for(int i=0; i<nStreams; i++) CUCHECK(cudaStreamCreate(&streams[i])); ///================================================================= unsigned char *d_pixels = 0, *d_eqpixels = 0; unsigned char *d_yimg = 0; unsigned int *d_yhist = 0; float *d_ycbcrimg = 0; float *d_lut = 0; CUCHECK(cudaMalloc((void**)&d_pixels, size * 3 * sizeof(unsigned char))); CUCHECK(cudaMalloc((void**)&d_ycbcrimg, size * 3 * sizeof(float))); CUCHECK(cudaMalloc((void**)&d_yimg, size * sizeof(unsigned char))); CUCHECK(cudaMalloc((void**)&d_yhist, 256 * sizeof(unsigned int))); CUCHECK(cudaMalloc((void**)&d_lut, 256 * sizeof(float))); cudaMemset(d_yhist, 0, 256 * sizeof(unsigned int)); // RGB -> YCBCR dim3 blockDim(16, 16, 1); dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height / nStreams + blockDim.y - 1) / blockDim.y, 1); for(int i=0; i<nStreams; i++) { int offset = i * streamSize; CUCHECK(cudaMemcpyAsync(&d_pixels[offset * 3], &pixels[offset * 3], streamSize * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice, streams[i])); } for(int i=0; i<nStreams; i++) { int offset = i * streamSize; RGBToYCBCR<<<gridDim, blockDim, 0, streams[i]>>>(&d_pixels[offset * 3], &d_ycbcrimg[offset * 3], &d_yimg[offset], width, height / nStreams); } for(int i=0; i<nStreams; i++) { int offset = i * streamSize; histogram<<<gridDim, blockDim, 0, streams[i]>>>(&d_yimg[offset], d_yhist, width, height / nStreams); } cudaFree(d_pixels); for(int i=0; i<nStreams; i++) CUCHECK(cudaStreamDestroy(streams[i])); cudaThreadSynchronize(); gettimeofday(&now, 0); printf("\tHistogram computed in %f\n", get_time(last, now)); last = now; unsigned int *yhist = (unsigned int*)malloc(256 * sizeof(unsigned int)); cudaMemcpy(yhist, d_yhist, 256 * sizeof(unsigned int), cudaMemcpyDeviceToHost); // Equalization float ylut[256]; double ysum = 0; for(unsigned int i = 0; i < 256; ++i) { ysum += (float)yhist[i] / size; ylut[i] = ysum * 255; } cudaMemcpy(d_lut, ylut, 256 * sizeof(float), cudaMemcpyHostToDevice); free(yhist); cudaThreadSynchronize(); gettimeofday(&now, 0); printf("\tEqualization computed in %f\n", get_time(last, now)); last = now; CUCHECK(cudaMalloc((void**)&d_eqpixels, size * 3 * sizeof(unsigned char))); // YCBCR -> RGB blockDim = dim3(16, 16, 1); gridDim = dim3((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1); YCBCRToRGB<<<gridDim, blockDim, 0>>>(d_ycbcrimg, d_yimg, d_lut, d_eqpixels, width, height); cudaMemcpy(eqpixels, d_eqpixels, size * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaFree(d_eqpixels); cudaFree(d_ycbcrimg); cudaFree(d_yimg); cudaFree(d_yhist); cudaFree(d_lut); cudaThreadSynchronize(); gettimeofday(&now, 0); printf("\tYCBCR -> RGb in %f\n", get_time(last, now)); printf("Equalization computed in %f\n", get_time(computation, now)); last = now; ///================================================================= // Save images save_image("histo.jpg", eqpixels, width, height); free(eqpixels); gettimeofday(&now, 0); printf("Result saved in %f\n", get_time(last, now)); printf("Total time %f\n", get_time(start, now)); return 0; }
f6660dbe7464a4b3094f2ee1e1e41fce0686a805.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> struct City { int x, y; char name; }; inline void GPUassert(hipError_t code, char * file, int line, bool Abort=true) { if (code != 0) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),file,line); if (Abort) exit(code); } } #define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); } #define NUMCITIES 9 #define CITYSIZE sizeof(struct City) // #define CITYSIZE 1 __host__ __device__ void printCity(struct City c){ printf("[x=%i, y=%i]\n", c.x, c.y); } __host__ __device__ void swap(struct City *a, int x, int y){ struct City temp; temp = a[x]; a[x] = a[y]; a[y] = temp; } __device__ double get_distance(struct City c1, struct City c2){ double x = double(c1.x - c2.x); double y = double(c1.y - c2.y); return sqrt((x*x) + (y*y)); } __device__ long total_distance(struct City *path){ long distance = 0; for(int i = 0; i < NUMCITIES - 1; i++){ distance += get_distance(path[i], path[i+1]); } return (long)distance; } __device__ void shortest_path(struct City *path){ int best_path_idx = 0; for(int i = 0; i < NUMCITIES - 1; i++){ printCity(path[i]); } } __device__ void print_path(struct City *path){ for(int i = 0; i < NUMCITIES; i++){ printf("%c>", path[i].name); } printf("\n"); } __device__ void format_path(struct City *path, char *str){ for(int i = 0; i < NUMCITIES; i++){ *str = path[i].name; str++; *str = '>'; str++; } str--; *str = 0; } __device__ void permutations_kernel(struct City *a, char **paths, double *distances, int i, int length, int tid, int *count) { if (length == i){ long distance = total_distance(a); //format_path(a, paths[count[0]]); count[0] = count[0] + 1; } else { for (int j = i; j < length; j++) { swap(a, i, j); // CUDA // permutations(a, i+1, length, tid, count); permutations_kernel(a, paths, distances, i+1, length, tid, count); swap(a, i, j); } } } __global__ void permute_kernel(struct City *dev_cities, char **paths, double *distances, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int count[1]; count[0] = 0; struct City local_array[NUMCITIES]; for (int i=0; i<size; i++){ local_array[i] = dev_cities[i]; } //swap(local_array + threadIdx.x, local_array); //swap(local_array, threadIdx.x, 0); permutations_kernel(local_array, paths, distances, 0, NUMCITIES, tid, count); } long factorial(int i) { long result = 1; while(i > 0) { result *= i; i--; } return result; } int main(){ struct City host_cities[NUMCITIES]; for(int c = 0; c < NUMCITIES; c++){ host_cities[c].name = 'A' + c; host_cities[c].x = rand() % 20 + 5; host_cities[c].y = rand() % 20 + 5; } //char host_paths [ factorial(NUMCITIES) ][ NUMCITIES*NUMCITIES ]; char host_paths [0][0]; char **device_paths; //double host_distances[ factorial(NUMCITIES) ]; double host_distances[0]; double *device_distances; float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); struct City *device_cities; hipMalloc((void**) &device_cities, sizeof(host_cities)); //hipMalloc((void**) &device_paths, sizeof(host_paths)); hipMalloc((void**) &device_paths, sizeof(char) * NUMCITIES * NUMCITIES * factorial(NUMCITIES)); hipMalloc((void**) &device_distances, sizeof(host_distances)); GPUerrchk(hipMemcpy(device_distances, host_distances, sizeof(host_distances), hipMemcpyHostToDevice)); GPUerrchk(hipMemcpy(device_cities, host_cities, sizeof(host_cities), hipMemcpyHostToDevice)); //GPUerrchk(hipMemcpy(device_paths, host_paths, sizeof(host_paths), hipMemcpyHostToDevice)); GPUerrchk(hipMemcpy(device_paths, host_paths, sizeof(char) * NUMCITIES * NUMCITIES * factorial(NUMCITIES), hipMemcpyHostToDevice)); hipEventRecord(start,0); hipLaunchKernelGGL(( permute_kernel), dim3(1), dim3(NUMCITIES), 0, 0, device_cities, device_paths, device_distances, NUMCITIES); hipEventRecord(stop,0); GPUerrchk(hipPeekAtLastError()); GPUerrchk(hipDeviceSynchronize()); hipEventElapsedTime( &time, start, stop ); printf("\nTiempo de Ejecucion: %f mSeg\n\n", time); hipEventDestroy( start ); hipEventDestroy( stop ); hipFree(device_cities); return 0; }
f6660dbe7464a4b3094f2ee1e1e41fce0686a805.cu
#include <stdio.h> struct City { int x, y; char name; }; inline void GPUassert(cudaError_t code, char * file, int line, bool Abort=true) { if (code != 0) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),file,line); if (Abort) exit(code); } } #define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); } #define NUMCITIES 9 #define CITYSIZE sizeof(struct City) // #define CITYSIZE 1 __host__ __device__ void printCity(struct City c){ printf("[x=%i, y=%i]\n", c.x, c.y); } __host__ __device__ void swap(struct City *a, int x, int y){ struct City temp; temp = a[x]; a[x] = a[y]; a[y] = temp; } __device__ double get_distance(struct City c1, struct City c2){ double x = double(c1.x - c2.x); double y = double(c1.y - c2.y); return sqrt((x*x) + (y*y)); } __device__ long total_distance(struct City *path){ long distance = 0; for(int i = 0; i < NUMCITIES - 1; i++){ distance += get_distance(path[i], path[i+1]); } return (long)distance; } __device__ void shortest_path(struct City *path){ int best_path_idx = 0; for(int i = 0; i < NUMCITIES - 1; i++){ printCity(path[i]); } } __device__ void print_path(struct City *path){ for(int i = 0; i < NUMCITIES; i++){ printf("%c>", path[i].name); } printf("\n"); } __device__ void format_path(struct City *path, char *str){ for(int i = 0; i < NUMCITIES; i++){ *str = path[i].name; str++; *str = '>'; str++; } str--; *str = 0; } __device__ void permutations_kernel(struct City *a, char **paths, double *distances, int i, int length, int tid, int *count) { if (length == i){ long distance = total_distance(a); //format_path(a, paths[count[0]]); count[0] = count[0] + 1; } else { for (int j = i; j < length; j++) { swap(a, i, j); // CUDA // permutations(a, i+1, length, tid, count); permutations_kernel(a, paths, distances, i+1, length, tid, count); swap(a, i, j); } } } __global__ void permute_kernel(struct City *dev_cities, char **paths, double *distances, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int count[1]; count[0] = 0; struct City local_array[NUMCITIES]; for (int i=0; i<size; i++){ local_array[i] = dev_cities[i]; } //swap(local_array + threadIdx.x, local_array); //swap(local_array, threadIdx.x, 0); permutations_kernel(local_array, paths, distances, 0, NUMCITIES, tid, count); } long factorial(int i) { long result = 1; while(i > 0) { result *= i; i--; } return result; } int main(){ struct City host_cities[NUMCITIES]; for(int c = 0; c < NUMCITIES; c++){ host_cities[c].name = 'A' + c; host_cities[c].x = rand() % 20 + 5; host_cities[c].y = rand() % 20 + 5; } //char host_paths [ factorial(NUMCITIES) ][ NUMCITIES*NUMCITIES ]; char host_paths [0][0]; char **device_paths; //double host_distances[ factorial(NUMCITIES) ]; double host_distances[0]; double *device_distances; float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); struct City *device_cities; cudaMalloc((void**) &device_cities, sizeof(host_cities)); //cudaMalloc((void**) &device_paths, sizeof(host_paths)); cudaMalloc((void**) &device_paths, sizeof(char) * NUMCITIES * NUMCITIES * factorial(NUMCITIES)); cudaMalloc((void**) &device_distances, sizeof(host_distances)); GPUerrchk(cudaMemcpy(device_distances, host_distances, sizeof(host_distances), cudaMemcpyHostToDevice)); GPUerrchk(cudaMemcpy(device_cities, host_cities, sizeof(host_cities), cudaMemcpyHostToDevice)); //GPUerrchk(cudaMemcpy(device_paths, host_paths, sizeof(host_paths), cudaMemcpyHostToDevice)); GPUerrchk(cudaMemcpy(device_paths, host_paths, sizeof(char) * NUMCITIES * NUMCITIES * factorial(NUMCITIES), cudaMemcpyHostToDevice)); cudaEventRecord(start,0); permute_kernel<<<1, NUMCITIES>>>(device_cities, device_paths, device_distances, NUMCITIES); cudaEventRecord(stop,0); GPUerrchk(cudaPeekAtLastError()); GPUerrchk(cudaDeviceSynchronize()); cudaEventElapsedTime( &time, start, stop ); printf("\nTiempo de Ejecucion: %f mSeg\n\n", time); cudaEventDestroy( start ); cudaEventDestroy( stop ); cudaFree(device_cities); return 0; }
2503992f46a68e9dd57b13b054ad7c4eac95ace0.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cublas_helpers.h" #include "sparse_gemm_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor SparseGemmOperationProfiler::SparseGemmOperationProfiler(Options const& options) : OperationProfiler(options, library::OperationKind::kSparseGemm, { {ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (e.g. gemm, planar " "complex, batched, ...)"}, {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"}, {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, {ArgumentTypeID::kTensor, {"E"}, "Tensor storing the E operand"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count"}, "Number of GEMMs computed in one batch"}, }) { description_ = " Structured sparse GEMM. D = alpha * A*B + beta * C"; } /// Destructor SparseGemmOperationProfiler::~SparseGemmOperationProfiler() {} /// Prints usage statement for the math function void SparseGemmOperationProfiler::print_usage(std::ostream& out) const { out << "Sparse GEMM" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void SparseGemmOperationProfiler::print_examples(std::ostream& out) const { out << "\nExamples:\n\n" << "Profile a particular problem size:\n" << " $ cutlass_profiler --operation=SparseGemm --m=1024 --n=1024 " "--k=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=SparseGemm --m=1024:4096:256 " "--n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=SparseGemm " "--accumulator-type=f16,f32\n\n" << "Run when A is f16 with column-major and B is any datatype with " "row-major (For column major, use column, col, or n. For row major " "use, row or t):\n" << " $ cutlass_profiler --operation=SparseGemm --A=f16:column " "--B=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace " "if results are incorrect (note that --cta-tile::k=32 is default " "cta-tile size):\n" << " $ cutlass_profiler --operation=SparseGemm --cta_m=256 --cta_n=128 " " --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to gemm kernels with a quick functional test and " "save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=SparseGemm \\ \n" << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// Status SparseGemmOperationProfiler::SparseGemmProblem::parse( library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (!arg_as_int(this->m, "m", problem_space, problem)) { // default value this->m = 1024; } if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->k, "k", problem_space, problem)) { // default value this->k = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.E, "E", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar(this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar(this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } this->elements_per_128b = 128 / library::sizeof_bits(operation_desc.A.element); this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->m), int(this->k) / int(this->sparse)}) .front(); this->ldb = DeviceAllocation::get_packed_layout( operation_desc.B.layout, {int(this->k), int(this->n)}) .front(); this->ldc = DeviceAllocation::get_packed_layout( operation_desc.C.layout, {int(this->m), int(this->n)}) .front(); this->lde = DeviceAllocation::get_packed_layout( operation_desc.E.layout, {int(this->m), int(this->k / this->sparse / this->elements_per_128b)}) .front(); return Status::kSuccess; } /// Initializes a performance result void SparseGemmOperationProfiler::SparseGemmProblem::initialize_result( PerformanceResult& result, library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "B", problem_space, std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); set_argument(result, "C", problem_space, std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); set_argument(result, "E", problem_space, std::string(library::to_string(operation_desc.E.element)) + ":" + library::to_string(operation_desc.E.layout)); set_argument(result, "m", problem_space, m); set_argument(result, "n", problem_space, n); set_argument(result, "k", problem_space, k); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } /// Extracts the problem dimensions Status SparseGemmOperationProfiler::initialize_configuration( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { library::SparseGemmDescription const& operation_desc = static_cast<library::SparseGemmDescription const&>( operation->description()); if (operation_desc.gemm_kind != library::GemmKind::kSparse) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } gemm_workspace_.configuration.problem_size.m() = int(problem_.m); gemm_workspace_.configuration.problem_size.n() = int(problem_.n); gemm_workspace_.configuration.problem_size.k() = int(problem_.k); gemm_workspace_.configuration.lda = problem_.lda; gemm_workspace_.configuration.ldb = problem_.ldb; gemm_workspace_.configuration.ldc = problem_.ldc; gemm_workspace_.configuration.ldd = problem_.ldc; gemm_workspace_.configuration.lde = problem_.lde; gemm_workspace_.arguments.A = nullptr; gemm_workspace_.arguments.B = nullptr; gemm_workspace_.arguments.C = nullptr; gemm_workspace_.arguments.D = nullptr; gemm_workspace_.arguments.E = nullptr; gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments); } /// Initializes the performance result void SparseGemmOperationProfiler::initialize_result_( PerformanceResult& result, Options const& options, library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Input bytes read and Output bytes written for the gemm problem result.bytes = int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * problem_.k / problem_.sparse + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) * problem_.k + int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.E.element) * problem_.m / 8) * problem_.k / problem_.sparse / problem_.elements_per_128b; // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i == 0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n; } result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n); result.runtime = 0; } /// Initializes workspace Status SparseGemmOperationProfiler::initialize_workspace( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { library::SparseGemmDescription const& operation_desc = static_cast<library::SparseGemmDescription const&>( operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { gemm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.m), int(problem_.k) / int(problem_.sparse)}, {int(problem_.lda)}); gemm_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, {int(problem_.k), int(problem_.n)}, {int(problem_.ldb)}); gemm_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.E = device_context.allocate_sparsemeta_tensor( options, "E", operation_desc.E.element, operation_desc.E.layout, operation_desc.A.element, {int(problem_.m), int(problem_.k) / int(problem_.sparse) / int(problem_.elements_per_128b)}, {int(problem_.lde)}); gemm_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data()); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size( &gemm_workspace_.configuration); gemm_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size( &gemm_workspace_.configuration); gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &gemm_workspace_.configuration, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kSparseGemm; results_.back().disposition = Disposition::kNotRun; for (auto& verification_provider : options.verification.providers) { results_.back().verification_map[verification_provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool SparseGemmOperationProfiler::verify_cutlass( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing GEMM arguments gemm_workspace_.arguments.A = gemm_workspace_.A->data(); gemm_workspace_.arguments.B = gemm_workspace_.B->data(); gemm_workspace_.arguments.C = gemm_workspace_.C->data(); gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); gemm_workspace_.arguments.E = gemm_workspace_.E->data(); gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } hipError_t result = hipDeviceSynchronize(); if (result != hipSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for (auto& m : results_.back().verification_map) { if (m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if (!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if (is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool SparseGemmOperationProfiler::profile( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing GEMM arguments gemm_workspace_.arguments.A = gemm_workspace_.A->data(); gemm_workspace_.arguments.B = gemm_workspace_.B->data(); gemm_workspace_.arguments.C = gemm_workspace_.C->data(); gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); gemm_workspace_.arguments.E = gemm_workspace_.E->data(); gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_(results_.back().runtime, options, operation, &gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
2503992f46a68e9dd57b13b054ad7c4eac95ace0.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cublas_helpers.h" #include "sparse_gemm_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor SparseGemmOperationProfiler::SparseGemmOperationProfiler(Options const& options) : OperationProfiler(options, library::OperationKind::kSparseGemm, { {ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (e.g. gemm, planar " "complex, batched, ...)"}, {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"}, {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, {ArgumentTypeID::kTensor, {"E"}, "Tensor storing the E operand"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count"}, "Number of GEMMs computed in one batch"}, }) { description_ = " Structured sparse GEMM. D = alpha * A*B + beta * C"; } /// Destructor SparseGemmOperationProfiler::~SparseGemmOperationProfiler() {} /// Prints usage statement for the math function void SparseGemmOperationProfiler::print_usage(std::ostream& out) const { out << "Sparse GEMM" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void SparseGemmOperationProfiler::print_examples(std::ostream& out) const { out << "\nExamples:\n\n" << "Profile a particular problem size:\n" << " $ cutlass_profiler --operation=SparseGemm --m=1024 --n=1024 " "--k=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=SparseGemm --m=1024:4096:256 " "--n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=SparseGemm " "--accumulator-type=f16,f32\n\n" << "Run when A is f16 with column-major and B is any datatype with " "row-major (For column major, use column, col, or n. For row major " "use, row or t):\n" << " $ cutlass_profiler --operation=SparseGemm --A=f16:column " "--B=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace " "if results are incorrect (note that --cta-tile::k=32 is default " "cta-tile size):\n" << " $ cutlass_profiler --operation=SparseGemm --cta_m=256 --cta_n=128 " " --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to gemm kernels with a quick functional test and " "save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=SparseGemm \\ \n" << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// Status SparseGemmOperationProfiler::SparseGemmProblem::parse( library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (!arg_as_int(this->m, "m", problem_space, problem)) { // default value this->m = 1024; } if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->k, "k", problem_space, problem)) { // default value this->k = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.E, "E", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar(this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar(this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } this->elements_per_128b = 128 / library::sizeof_bits(operation_desc.A.element); this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->m), int(this->k) / int(this->sparse)}) .front(); this->ldb = DeviceAllocation::get_packed_layout( operation_desc.B.layout, {int(this->k), int(this->n)}) .front(); this->ldc = DeviceAllocation::get_packed_layout( operation_desc.C.layout, {int(this->m), int(this->n)}) .front(); this->lde = DeviceAllocation::get_packed_layout( operation_desc.E.layout, {int(this->m), int(this->k / this->sparse / this->elements_per_128b)}) .front(); return Status::kSuccess; } /// Initializes a performance result void SparseGemmOperationProfiler::SparseGemmProblem::initialize_result( PerformanceResult& result, library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "B", problem_space, std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); set_argument(result, "C", problem_space, std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); set_argument(result, "E", problem_space, std::string(library::to_string(operation_desc.E.element)) + ":" + library::to_string(operation_desc.E.layout)); set_argument(result, "m", problem_space, m); set_argument(result, "n", problem_space, n); set_argument(result, "k", problem_space, k); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } /// Extracts the problem dimensions Status SparseGemmOperationProfiler::initialize_configuration( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { library::SparseGemmDescription const& operation_desc = static_cast<library::SparseGemmDescription const&>( operation->description()); if (operation_desc.gemm_kind != library::GemmKind::kSparse) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } gemm_workspace_.configuration.problem_size.m() = int(problem_.m); gemm_workspace_.configuration.problem_size.n() = int(problem_.n); gemm_workspace_.configuration.problem_size.k() = int(problem_.k); gemm_workspace_.configuration.lda = problem_.lda; gemm_workspace_.configuration.ldb = problem_.ldb; gemm_workspace_.configuration.ldc = problem_.ldc; gemm_workspace_.configuration.ldd = problem_.ldc; gemm_workspace_.configuration.lde = problem_.lde; gemm_workspace_.arguments.A = nullptr; gemm_workspace_.arguments.B = nullptr; gemm_workspace_.arguments.C = nullptr; gemm_workspace_.arguments.D = nullptr; gemm_workspace_.arguments.E = nullptr; gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments); } /// Initializes the performance result void SparseGemmOperationProfiler::initialize_result_( PerformanceResult& result, Options const& options, library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Input bytes read and Output bytes written for the gemm problem result.bytes = int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * problem_.k / problem_.sparse + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) * problem_.k + int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.E.element) * problem_.m / 8) * problem_.k / problem_.sparse / problem_.elements_per_128b; // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i == 0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n; } result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n); result.runtime = 0; } /// Initializes workspace Status SparseGemmOperationProfiler::initialize_workspace( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { library::SparseGemmDescription const& operation_desc = static_cast<library::SparseGemmDescription const&>( operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { gemm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.m), int(problem_.k) / int(problem_.sparse)}, {int(problem_.lda)}); gemm_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, {int(problem_.k), int(problem_.n)}, {int(problem_.ldb)}); gemm_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.E = device_context.allocate_sparsemeta_tensor( options, "E", operation_desc.E.element, operation_desc.E.layout, operation_desc.A.element, {int(problem_.m), int(problem_.k) / int(problem_.sparse) / int(problem_.elements_per_128b)}, {int(problem_.lde)}); gemm_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data()); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size( &gemm_workspace_.configuration); gemm_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size( &gemm_workspace_.configuration); gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &gemm_workspace_.configuration, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kSparseGemm; results_.back().disposition = Disposition::kNotRun; for (auto& verification_provider : options.verification.providers) { results_.back().verification_map[verification_provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool SparseGemmOperationProfiler::verify_cutlass( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing GEMM arguments gemm_workspace_.arguments.A = gemm_workspace_.A->data(); gemm_workspace_.arguments.B = gemm_workspace_.B->data(); gemm_workspace_.arguments.C = gemm_workspace_.C->data(); gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); gemm_workspace_.arguments.E = gemm_workspace_.E->data(); gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for (auto& m : results_.back().verification_map) { if (m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if (!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if (is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool SparseGemmOperationProfiler::profile( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing GEMM arguments gemm_workspace_.arguments.A = gemm_workspace_.A->data(); gemm_workspace_.arguments.B = gemm_workspace_.B->data(); gemm_workspace_.arguments.C = gemm_workspace_.C->data(); gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); gemm_workspace_.arguments.E = gemm_workspace_.E->data(); gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_(results_.back().runtime, options, operation, &gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
0354dc53803794ea539dda174632c3af965f65bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by lidan on 2020/9/20. // #include "book.cuh" __device__ int addem(int a ,int b) { return a + b ; } __global__ void add1(int a,int b,int *c) { *c = addem(a,b) ; } int main(void) { int c ; int *dev_c ; HANDLE_ERROR(hipMalloc((void**)&dev_c,sizeof(int) ) ) ; hipLaunchKernelGGL(( add1), dim3(1),dim3(1), 0, 0, 2,7,dev_c) ; HANDLE_ERROR(hipMemcpy(&c,dev_c,sizeof(int),hipMemcpyDeviceToHost)) ; printf( "2 + 7 = %d\n", c ); HANDLE_ERROR( hipFree( dev_c ) ); return 0; }
0354dc53803794ea539dda174632c3af965f65bb.cu
// // Created by lidan on 2020/9/20. // #include "book.cuh" __device__ int addem(int a ,int b) { return a + b ; } __global__ void add1(int a,int b,int *c) { *c = addem(a,b) ; } int main(void) { int c ; int *dev_c ; HANDLE_ERROR(cudaMalloc((void**)&dev_c,sizeof(int) ) ) ; add1<<<1,1>>>(2,7,dev_c) ; HANDLE_ERROR(cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost)) ; printf( "2 + 7 = %d\n", c ); HANDLE_ERROR( cudaFree( dev_c ) ); return 0; }
3b032d1fab7d0efd27e85e0eb305f531882480c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017 XGBoost contributors */ #include <xgboost/tree_updater.h> #include <memory> #include <utility> #include <vector> #include "../common/compressed_iterator.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu_hist); typedef bst_gpair_integer gpair_sum_t; static const ncclDataType_t nccl_sum_t = ncclInt64; // Helper for explicit template specialisation template <int N> struct Int {}; struct DeviceGMat { dh::dvec<common::compressed_byte_t> gidx_buffer; common::CompressedIterator<uint32_t> gidx; dh::dvec<size_t> row_ptr; void Init(int device_idx, const common::GHistIndexMatrix& gmat, bst_ulong element_begin, bst_ulong element_end, bst_ulong row_begin, bst_ulong row_end, int n_bins) { dh::safe_cuda(hipSetDevice(device_idx)); CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated"; CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1) << "row_ptr must be externally allocated"; common::CompressedBufferWriter cbw(n_bins); std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size()); cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin, gmat.index.begin() + element_end); gidx_buffer = host_buffer; gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins); // row_ptr dh::safe_cuda(hipMemcpy(row_ptr.data(), gmat.row_ptr.data() + row_begin, row_ptr.size() * sizeof(size_t), hipMemcpyHostToDevice)); // normalise row_ptr size_t start = gmat.row_ptr[row_begin]; auto d_row_ptr = row_ptr.data(); dh::launch_n(row_ptr.device_idx(), row_ptr.size(), [=] __device__(size_t idx) { d_row_ptr[idx] -= start; }); } }; struct HistHelper { gpair_sum_t* d_hist; int n_bins; __host__ __device__ HistHelper(gpair_sum_t* ptr, int n_bins) : d_hist(ptr), n_bins(n_bins) {} __device__ void Add(bst_gpair gpair, int gidx, int nidx) const { int hist_idx = nidx * n_bins + gidx; auto dst_ptr = reinterpret_cast<unsigned long long int*>(&d_hist[hist_idx]); // NOLINT gpair_sum_t tmp(gpair.GetGrad(), gpair.GetHess()); auto src_ptr = reinterpret_cast<gpair_sum_t::value_t*>(&tmp); atomicAdd(dst_ptr, static_cast<unsigned long long int>(*src_ptr)); // NOLINT atomicAdd(dst_ptr + 1, static_cast<unsigned long long int>(*(src_ptr + 1))); // NOLINT } __device__ gpair_sum_t Get(int gidx, int nidx) const { return d_hist[nidx * n_bins + gidx]; } }; struct DeviceHist { int n_bins; dh::dvec<gpair_sum_t> data; void Init(int n_bins_in) { this->n_bins = n_bins_in; CHECK(!data.empty()) << "DeviceHist must be externally allocated"; } void Reset(int device_idx) { hipSetDevice(device_idx); data.fill(gpair_sum_t()); } HistHelper GetBuilder() { return HistHelper(data.data(), n_bins); } gpair_sum_t* GetLevelPtr(int depth) { return data.data() + n_nodes(depth - 1) * n_bins; } int LevelSize(int depth) { return n_bins * n_nodes_level(depth); } }; template <int BLOCK_THREADS> __global__ void find_split_kernel( const gpair_sum_t* d_level_hist, int* d_feature_segments, int depth, int n_features, int n_bins, DeviceNodeStats* d_nodes, int nodes_offset_device, float* d_fidx_min_map, float* d_gidx_fvalue_map, GPUTrainingParam gpu_param, bool* d_left_child_smallest_temp, bool colsample, int* d_feature_flags) { typedef hipcub::KeyValuePair<int, float> ArgMaxT; typedef hipcub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT; typedef hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT; typedef hipcub::BlockReduce<gpair_sum_t, BLOCK_THREADS> SumReduceT; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& split = uninitialized_split.Alias(); __shared__ cub::Uninitialized<gpair_sum_t> uninitialized_sum; gpair_sum_t& shared_sum = uninitialized_sum.Alias(); __shared__ ArgMaxT block_max; __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { split = DeviceSplitCandidate(); } __syncthreads(); // below two are for accessing full-sized node list stored on each device // always one block per node, BLOCK_THREADS threads per block int level_node_idx = blockIdx.x + nodes_offset_device; int node_idx = n_nodes(depth - 1) + level_node_idx; for (int fidx = 0; fidx < n_features; fidx++) { if (colsample && d_feature_flags[fidx] == 0) continue; int begin = d_feature_segments[level_node_idx * n_features + fidx]; int end = d_feature_segments[level_node_idx * n_features + fidx + 1]; gpair_sum_t feature_sum = gpair_sum_t(); for (int reduce_begin = begin; reduce_begin < end; reduce_begin += BLOCK_THREADS) { bool thread_active = reduce_begin + threadIdx.x < end; // Scan histogram gpair_sum_t bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x] : gpair_sum_t(); feature_sum += SumReduceT(temp_storage.sum_reduce).Reduce(bin, hipcub::Sum()); } if (threadIdx.x == 0) { shared_sum = feature_sum; } // __syncthreads(); // no need to synch because below there is a Scan auto prefix_op = SumCallbackOp<gpair_sum_t>(); for (int scan_begin = begin; scan_begin < end; scan_begin += BLOCK_THREADS) { bool thread_active = scan_begin + threadIdx.x < end; gpair_sum_t bin = thread_active ? d_level_hist[scan_begin + threadIdx.x] : gpair_sum_t(); BlockScanT(temp_storage.scan) .ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op); // Calculate gain gpair_sum_t parent_sum = gpair_sum_t(d_nodes[node_idx].sum_gradients); float parent_gain = d_nodes[node_idx].root_gain; gpair_sum_t missing = parent_sum - shared_sum; bool missing_left; float gain = thread_active ? loss_chg_missing(bin, missing, parent_sum, parent_gain, gpu_param, missing_left) : -FLT_MAX; __syncthreads(); // Find thread with best gain ArgMaxT tuple(threadIdx.x, gain); ArgMaxT best = MaxReduceT(temp_storage.max_reduce).Reduce(tuple, hipcub::ArgMax()); if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { float fvalue; int gidx = (scan_begin - (level_node_idx * n_bins)) + threadIdx.x; if (threadIdx.x == 0 && begin == scan_begin) { // check at start of first tile fvalue = d_fidx_min_map[fidx]; } else { fvalue = d_gidx_fvalue_map[gidx - 1]; } gpair_sum_t left = missing_left ? bin + missing : bin; gpair_sum_t right = parent_sum - left; split.Update(gain, missing_left ? LeftDir : RightDir, fvalue, fidx, left, right, gpu_param); } __syncthreads(); } // end scan } // end over features // Create node if (threadIdx.x == 0 && split.IsValid()) { d_nodes[node_idx].SetSplit(split); DeviceNodeStats& left_child = d_nodes[left_child_nidx(node_idx)]; DeviceNodeStats& right_child = d_nodes[right_child_nidx(node_idx)]; bool& left_child_smallest = d_left_child_smallest_temp[node_idx]; left_child = DeviceNodeStats(split.left_sum, left_child_nidx(node_idx), gpu_param); right_child = DeviceNodeStats(split.right_sum, right_child_nidx(node_idx), gpu_param); // Record smallest node if (split.left_sum.GetHess() <= split.right_sum.GetHess()) { left_child_smallest = true; } else { left_child_smallest = false; } } } class GPUHistMaker : public TreeUpdater { public: GPUHistMaker() : initialised(false), is_dense(false), p_last_fmat_(nullptr), prediction_cache_initialised(false) {} ~GPUHistMaker() { if (initialised) { for (int d_idx = 0; d_idx < n_devices; ++d_idx) { ncclCommDestroy(comms[d_idx]); dh::safe_cuda(hipSetDevice(dList[d_idx])); dh::safe_cuda(hipStreamDestroy(*(streams[d_idx]))); } for (int num_d = 1; num_d <= n_devices; ++num_d) { // loop over number of devices used for (int d_idx = 0; d_idx < n_devices; ++d_idx) { ncclCommDestroy(find_split_comms[num_d - 1][d_idx]); } } } } void Init( const std::vector<std::pair<std::string, std::string>>& args) override { param.InitAllowUnknown(args); CHECK(param.max_depth < 16) << "Tree depth too large."; CHECK(param.max_depth != 0) << "Tree depth cannot be 0."; CHECK(param.grow_policy != TrainParam::kLossGuide) << "Loss guided growth policy not supported. Use CPU algorithm."; this->param = param; CHECK(param.n_gpus != 0) << "Must have at least one device"; } void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { GradStats::CheckInfo(dmat->info()); // rescale learning rate according to size of trees float lr = param.learning_rate; param.learning_rate = lr / trees.size(); // build tree try { for (size_t i = 0; i < trees.size(); ++i) { this->UpdateTree(gpair, dmat, trees[i]); } } catch (const std::exception& e) { LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl; } param.learning_rate = lr; } void InitData(const std::vector<bst_gpair>& gpair, DMatrix& fmat, // NOLINT const RegTree& tree) { dh::Timer time1; // set member num_rows and n_devices for rest of GPUHistBuilder members info = &fmat.info(); num_rows = info->num_row; n_devices = dh::n_devices(param.n_gpus, num_rows); if (!initialised) { // reset static timers used across iterations cpu_init_time = 0; gpu_init_time = 0; cpu_time.Reset(); gpu_time = 0; // set dList member dList.resize(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices(); dList[d_idx] = device_idx; } // initialize nccl comms.resize(n_devices); streams.resize(n_devices); dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices, dList.data())); // initialize communicator // (One communicator per // process) // printf("# NCCL: Using devices\n"); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { streams[d_idx] = reinterpret_cast<hipStream_t*>(malloc(sizeof(hipStream_t))); dh::safe_cuda(hipSetDevice(dList[d_idx])); dh::safe_cuda(hipStreamCreate(streams[d_idx])); int cudaDev; int rank; hipDeviceProp_t prop; dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev)); dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank)); dh::safe_cuda(hipGetDeviceProperties(&prop, cudaDev)); // printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev, // prop.pciBusID, prop.name); // hipDriverGetVersion(&driverVersion); // hipRuntimeGetVersion(&runtimeVersion); std::ostringstream oss; oss << "CUDA Capability Major/Minor version number: " << prop.major << "." << prop.minor << " is insufficient. Need >=3.5."; int failed = prop.major < 3 || prop.major == 3 && prop.minor < 5; CHECK(failed == 0) << oss.str(); } // local find_split group of comms for each case of reduced number of // GPUs to use find_split_comms.resize( n_devices, std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but // ok, and best to do // here instead of // repeatedly for (int num_d = 1; num_d <= n_devices; ++num_d) { // loop over number of devices used dh::safe_nccl( ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d, dList.data())); // initialize communicator // (One communicator per // process) } is_dense = info->num_nonzero == info->num_col * info->num_row; dh::Timer time0; hmat_.Init(&fmat, param.max_bin); cpu_init_time += time0.ElapsedSeconds(); if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] CPU Time for hmat_.Init " << time0.ElapsedSeconds() << " sec"; fflush(stdout); } time0.Reset(); gmat_.cut = &hmat_; cpu_init_time += time0.ElapsedSeconds(); if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] CPU Time for gmat_.cut " << time0.ElapsedSeconds() << " sec"; fflush(stdout); } time0.Reset(); gmat_.Init(&fmat); cpu_init_time += time0.ElapsedSeconds(); if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] CPU Time for gmat_.Init() " << time0.ElapsedSeconds() << " sec"; fflush(stdout); } time0.Reset(); if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] CPU Time for hmat_.Init, gmat_.cut, gmat_.Init " << cpu_init_time << " sec"; fflush(stdout); } int n_bins = hmat_.row_ptr.back(); int n_features = hmat_.row_ptr.size() - 1; // deliniate data onto multiple gpus device_row_segments.push_back(0); device_element_segments.push_back(0); bst_uint offset = 0; bst_uint shard_size = ::ceil(static_cast<double>(num_rows) / n_devices); for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; offset += shard_size; offset = ::min(offset, num_rows); device_row_segments.push_back(offset); device_element_segments.push_back(gmat_.row_ptr[offset]); } // Build feature segments std::vector<int> h_feature_segments; for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) { for (int fidx = 0; fidx < n_features; fidx++) { h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins); } } h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins); // Construct feature map std::vector<int> h_gidx_feature_map(n_bins); for (int fidx = 0; fidx < n_features; fidx++) { for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) { h_gidx_feature_map[i] = fidx; } } int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins; // allocate unique common data that reside on master device (NOTE: None // currently) // int master_device=dList[0]; // ba.allocate(master_device, ); // allocate vectors across all devices temp_memory.resize(n_devices); hist_vec.resize(n_devices); nodes.resize(n_devices); nodes_temp.resize(n_devices); nodes_child_temp.resize(n_devices); left_child_smallest.resize(n_devices); left_child_smallest_temp.resize(n_devices); feature_flags.resize(n_devices); fidx_min_map.resize(n_devices); feature_segments.resize(n_devices); prediction_cache.resize(n_devices); position.resize(n_devices); position_tmp.resize(n_devices); device_matrix.resize(n_devices); device_gpair.resize(n_devices); gidx_feature_map.resize(n_devices); gidx_fvalue_map.resize(n_devices); int find_split_n_devices = ::pow(2, ::floor(std::log2(n_devices))); find_split_n_devices = ::min(n_nodes_level(param.max_depth), find_split_n_devices); int max_num_nodes_device = n_nodes_level(param.max_depth) / find_split_n_devices; // num_rows_segment: for sharding rows onto gpus for splitting data // num_elements_segment: for sharding rows (of elements) onto gpus for // splitting data // max_num_nodes_device: for sharding nodes onto gpus for split finding // All other variables have full copy on gpu, with copy either being // identical or just current portion (like for histogram) before // AllReduce for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; bst_uint num_rows_segment = device_row_segments[d_idx + 1] - device_row_segments[d_idx]; bst_ulong num_elements_segment = device_element_segments[d_idx + 1] - device_element_segments[d_idx]; ba.allocate( device_idx, param.silent, &(hist_vec[d_idx].data), n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx], n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device, &nodes_child_temp[d_idx], max_num_nodes_device, &left_child_smallest[d_idx], n_nodes(param.max_depth), &left_child_smallest_temp[d_idx], max_num_nodes_device, &feature_flags[d_idx], n_features, // may change but same on all devices &fidx_min_map[d_idx], hmat_.min_val.size(), // constant and same on all devices &feature_segments[d_idx], h_feature_segments.size(), // constant and same on all devices &prediction_cache[d_idx], num_rows_segment, &position[d_idx], num_rows_segment, &position_tmp[d_idx], num_rows_segment, &device_gpair[d_idx], num_rows_segment, &device_matrix[d_idx].gidx_buffer, common::CompressedBufferWriter::CalculateBufferSize( num_elements_segment, n_bins), // constant and same on all devices &device_matrix[d_idx].row_ptr, num_rows_segment + 1, &gidx_feature_map[d_idx], n_bins, // constant and same on all devices &gidx_fvalue_map[d_idx], hmat_.cut.size()); // constant and same on all devices // Copy Host to Device (assumes comes after ba.allocate that sets // device) device_matrix[d_idx].Init( device_idx, gmat_, device_element_segments[d_idx], device_element_segments[d_idx + 1], device_row_segments[d_idx], device_row_segments[d_idx + 1], n_bins); gidx_feature_map[d_idx] = h_gidx_feature_map; gidx_fvalue_map[d_idx] = hmat_.cut; feature_segments[d_idx] = h_feature_segments; fidx_min_map[d_idx] = hmat_.min_val; // Initialize, no copy hist_vec[d_idx].Init(n_bins); // init host object prediction_cache[d_idx].fill(0); // init device object (assumes comes // after ba.allocate that sets device) feature_flags[d_idx].fill( 1); // init device object (assumes comes after // ba.allocate that sets device) } } // copy or init to do every iteration for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); nodes[d_idx].fill(DeviceNodeStats()); nodes_temp[d_idx].fill(DeviceNodeStats()); nodes_child_temp[d_idx].fill(DeviceNodeStats()); position[d_idx].fill(0); device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx], gpair.begin() + device_row_segments[d_idx + 1]); subsample_gpair(&device_gpair[d_idx], param.subsample, device_row_segments[d_idx]); hist_vec[d_idx].Reset(device_idx); // left_child_smallest and left_child_smallest_temp don't need to be // initialized } dh::synchronize_n_devices(n_devices, dList); if (!initialised) { gpu_init_time = time1.ElapsedSeconds() - cpu_init_time; gpu_time = -cpu_init_time; if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] Time for GPU operations during First " "Call to InitData() " << gpu_init_time << " sec"; fflush(stdout); } } p_last_fmat_ = &fmat; initialised = true; } void BuildHist(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t begin = device_element_segments[d_idx]; size_t end = device_element_segments[d_idx + 1]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; auto d_gidx = device_matrix[d_idx].gidx; auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin(); auto d_position = position[d_idx].data(); auto d_gpair = device_gpair[d_idx].data(); auto d_left_child_smallest = left_child_smallest[d_idx].data(); auto hist_builder = hist_vec[d_idx].GetBuilder(); dh::TransformLbs( device_idx, &temp_memory[d_idx], end - begin, d_row_ptr, row_end - row_begin, is_dense, [=] __device__(size_t local_idx, int local_ridx) { int nidx = d_position[local_ridx]; // OPTMARK: latency if (!is_active(nidx, depth)) return; // Only increment smallest node bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] && is_left_child(nidx)) || (!d_left_child_smallest[parent_nidx(nidx)] && !is_left_child(nidx)); if (!is_smallest && depth > 0) return; int gidx = d_gidx[local_idx]; bst_gpair gpair = d_gpair[local_ridx]; hist_builder.Add(gpair, gidx, nidx); // OPTMARK: This is slow, could use // shared memory or cache results // intead of writing to global // memory every time in atomic way. }); } dh::synchronize_n_devices(n_devices, dList); // time.printElapsed("Add Time"); // (in-place) reduce each element of histogram (for only current level) // across multiple gpus // TODO(JCM): use out of place with pre-allocated buffer, but then have to // copy // back on device // fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float)); for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); dh::safe_nccl(ncclAllReduce( reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)), reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)), hist_vec[d_idx].LevelSize(depth) * sizeof(gpair_sum_t) / sizeof(gpair_sum_t::value_t), nccl_sum_t, ncclSum, comms[d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx]))); } // if no NCCL, then presume only 1 GPU, then already correct // time.printElapsed("Reduce-Add Time"); // Subtraction trick (applied to all devices in same way -- to avoid doing // on master and then Bcast) if (depth > 0) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); auto hist_builder = hist_vec[d_idx].GetBuilder(); auto d_left_child_smallest = left_child_smallest[d_idx].data(); int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins; dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) { int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2); bool left_smallest = d_left_child_smallest[parent_nidx(nidx)]; if (left_smallest) { nidx++; // If left is smallest switch to right child } int gidx = idx % hist_builder.n_bins; gpair_sum_t parent = hist_builder.Get(gidx, parent_nidx(nidx)); int other_nidx = left_smallest ? nidx - 1 : nidx + 1; gpair_sum_t other = hist_builder.Get(gidx, other_nidx); gpair_sum_t sub = parent - other; hist_builder.Add( bst_gpair(sub.GetGrad(), sub.GetHess()), gidx, nidx); // OPTMARK: This is slow, could use shared // memory or cache results intead of writing to // global memory every time in atomic way. }); } dh::synchronize_n_devices(n_devices, dList); } } #define MIN_BLOCK_THREADS 128 #define CHUNK_BLOCK_THREADS 128 // MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due // to CUDA capability 35 and above requirement // for Maximum number of threads per block #define MAX_BLOCK_THREADS 512 void FindSplit(int depth) { // Specialised based on max_bins this->FindSplitSpecialize(depth, Int<MIN_BLOCK_THREADS>()); } template <int BLOCK_THREADS> void FindSplitSpecialize(int depth, Int<BLOCK_THREADS>) { if (param.max_bin <= BLOCK_THREADS) { LaunchFindSplit<BLOCK_THREADS>(depth); } else { this->FindSplitSpecialize(depth, Int<BLOCK_THREADS + CHUNK_BLOCK_THREADS>()); } } void FindSplitSpecialize(int depth, Int<MAX_BLOCK_THREADS>) { this->LaunchFindSplit<MAX_BLOCK_THREADS>(depth); } template <int BLOCK_THREADS> void LaunchFindSplit(int depth) { bool colsample = param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0; int num_nodes_device = n_nodes_level(depth); const int GRID_SIZE = num_nodes_device; // all GPUs do same work for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); int nodes_offset_device = 0; hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0, hist_vec[d_idx].GetLevelPtr(depth), feature_segments[d_idx].data(), depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(), nodes_offset_device, fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param), left_child_smallest[d_idx].data(), colsample, feature_flags[d_idx].data()); } // NOTE: No need to syncrhonize with host as all above pure P2P ops or // on-device ops } void InitFirstNode(const std::vector<bst_gpair>& gpair) { // Perform asynchronous reduction on each gpu std::vector<bst_gpair> device_sums(n_devices); #pragma omp parallel for num_threads(n_devices) for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); auto begin = device_gpair[d_idx].tbegin(); auto end = device_gpair[d_idx].tend(); bst_gpair init = bst_gpair(); auto binary_op = thrust::plus<bst_gpair>(); device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op); } bst_gpair sum = bst_gpair(); for (int d_idx = 0; d_idx < n_devices; d_idx++) { sum += device_sums[d_idx]; } // Setup first node so all devices have same first node (here done same on // all devices, or could have done one device and Bcast if worried about // exact precision issues) for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_nodes = nodes[d_idx].data(); auto gpu_param = GPUTrainingParam(param); dh::launch_n(device_idx, 1, [=] __device__(int idx) { bst_gpair sum_gradients = sum; d_nodes[idx] = DeviceNodeStats(sum_gradients, 0, gpu_param); }); } // synch all devices to host before moving on (No, can avoid because // BuildHist calls another kernel in default stream) // dh::synchronize_n_devices(n_devices, dList); } void UpdatePosition(int depth) { if (is_dense) { this->UpdatePositionDense(depth); } else { this->UpdatePositionSparse(depth); } } void UpdatePositionDense(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_position = position[d_idx].data(); DeviceNodeStats* d_nodes = nodes[d_idx].data(); auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data(); auto d_gidx = device_matrix[d_idx].gidx; int n_columns = info->num_col; size_t begin = device_row_segments[d_idx]; size_t end = device_row_segments[d_idx + 1]; dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) { int pos = d_position[local_idx]; if (!is_active(pos, depth)) { return; } DeviceNodeStats node = d_nodes[pos]; if (node.IsLeaf()) { return; } int gidx = d_gidx[local_idx * static_cast<size_t>(n_columns) + static_cast<size_t>(node.fidx)]; float fvalue = d_gidx_fvalue_map[gidx]; if (fvalue <= node.fvalue) { d_position[local_idx] = left_child_nidx(pos); } else { d_position[local_idx] = right_child_nidx(pos); } }); } dh::synchronize_n_devices(n_devices, dList); // dh::safe_cuda(hipDeviceSynchronize()); } void UpdatePositionSparse(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_position = position[d_idx].data(); auto d_position_tmp = position_tmp[d_idx].data(); DeviceNodeStats* d_nodes = nodes[d_idx].data(); auto d_gidx_feature_map = gidx_feature_map[d_idx].data(); auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data(); auto d_gidx = device_matrix[d_idx].gidx; auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin(); size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; size_t element_begin = device_element_segments[d_idx]; size_t element_end = device_element_segments[d_idx + 1]; // Update missing direction dh::launch_n(device_idx, row_end - row_begin, [=] __device__(int local_idx) { int pos = d_position[local_idx]; if (!is_active(pos, depth)) { d_position_tmp[local_idx] = pos; return; } DeviceNodeStats node = d_nodes[pos]; if (node.IsLeaf()) { d_position_tmp[local_idx] = pos; return; } else if (node.dir == LeftDir) { d_position_tmp[local_idx] = pos * 2 + 1; } else { d_position_tmp[local_idx] = pos * 2 + 2; } }); // Update node based on fvalue where exists // OPTMARK: This kernel is very inefficient for both compute and memory, // dominated by memory dependency / access patterns dh::TransformLbs( device_idx, &temp_memory[d_idx], element_end - element_begin, d_row_ptr, row_end - row_begin, is_dense, [=] __device__(size_t local_idx, int local_ridx) { int pos = d_position[local_ridx]; if (!is_active(pos, depth)) { return; } DeviceNodeStats node = d_nodes[pos]; if (node.IsLeaf()) { return; } int gidx = d_gidx[local_idx]; int findex = d_gidx_feature_map[gidx]; // OPTMARK: slowest global // memory access, maybe setup // position, gidx, etc. as // combined structure? if (findex == node.fidx) { float fvalue = d_gidx_fvalue_map[gidx]; if (fvalue <= node.fvalue) { d_position_tmp[local_ridx] = left_child_nidx(pos); } else { d_position_tmp[local_ridx] = right_child_nidx(pos); } } }); position[d_idx] = position_tmp[d_idx]; } dh::synchronize_n_devices(n_devices, dList); } void ColSampleTree() { if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return; feature_set_tree.resize(info->num_col); std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0); feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree); } void ColSampleLevel() { if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return; feature_set_level.resize(feature_set_tree.size()); feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel); std::vector<int> h_feature_flags(info->num_col, 0); for (auto fidx : feature_set_level) { h_feature_flags[fidx] = 1; } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); feature_flags[d_idx] = h_feature_flags; } dh::synchronize_n_devices(n_devices, dList); } bool UpdatePredictionCache(const DMatrix* data, std::vector<bst_float>* p_out_preds) override { std::vector<bst_float>& out_preds = *p_out_preds; if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) { return false; } if (!prediction_cache_initialised) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; prediction_cache[d_idx].copy(out_preds.begin() + row_begin, out_preds.begin() + row_end); } prediction_cache_initialised = true; } dh::synchronize_n_devices(n_devices, dList); float eps = param.learning_rate; for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; auto d_nodes = nodes[d_idx].data(); auto d_position = position[d_idx].data(); auto d_prediction_cache = prediction_cache[d_idx].data(); dh::launch_n(device_idx, prediction_cache[d_idx].size(), [=] __device__(int local_idx) { int pos = d_position[local_idx]; d_prediction_cache[local_idx] += d_nodes[pos].weight * eps; }); dh::safe_cuda( hipMemcpy(&out_preds[row_begin], prediction_cache[d_idx].data(), prediction_cache[d_idx].size() * sizeof(bst_float), hipMemcpyDeviceToHost)); } dh::synchronize_n_devices(n_devices, dList); return true; } void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat, RegTree* p_tree) { dh::Timer time0; this->InitData(gpair, *p_fmat, *p_tree); this->InitFirstNode(gpair); this->ColSampleTree(); for (int depth = 0; depth < param.max_depth; depth++) { this->ColSampleLevel(); this->BuildHist(depth); this->FindSplit(depth); this->UpdatePosition(depth); } // done with multi-GPU, pass back result from master to tree on host int master_device = dList[0]; dh::safe_cuda(hipSetDevice(master_device)); dense2sparse_tree(p_tree, nodes[0], param); gpu_time += time0.ElapsedSeconds(); if (param.debug_verbose) { LOG(CONSOLE) << "[GPU Plug-in] Cumulative GPU Time excluding initial time " << (gpu_time - gpu_init_time) << " sec"; fflush(stdout); } if (param.debug_verbose) { LOG(CONSOLE) << "[GPU Plug-in] Cumulative CPU Time " << cpu_time.ElapsedSeconds() << " sec"; LOG(CONSOLE) << "[GPU Plug-in] Cumulative CPU Time excluding initial time " << (cpu_time.ElapsedSeconds() - cpu_init_time - gpu_time) << " sec"; fflush(stdout); } } protected: TrainParam param; // std::unique_ptr<GPUHistBuilder> builder; common::HistCutMatrix hmat_; common::GHistIndexMatrix gmat_; MetaInfo* info; bool initialised; bool is_dense; const DMatrix* p_last_fmat_; bool prediction_cache_initialised; dh::bulk_allocator<dh::memory_type::DEVICE> ba; std::vector<int> feature_set_tree; std::vector<int> feature_set_level; bst_uint num_rows; int n_devices; // below vectors are for each devices used std::vector<int> dList; std::vector<int> device_row_segments; std::vector<size_t> device_element_segments; std::vector<dh::CubMemory> temp_memory; std::vector<DeviceHist> hist_vec; std::vector<dh::dvec<DeviceNodeStats>> nodes; std::vector<dh::dvec<DeviceNodeStats>> nodes_temp; std::vector<dh::dvec<DeviceNodeStats>> nodes_child_temp; std::vector<dh::dvec<bool>> left_child_smallest; std::vector<dh::dvec<bool>> left_child_smallest_temp; std::vector<dh::dvec<int>> feature_flags; std::vector<dh::dvec<float>> fidx_min_map; std::vector<dh::dvec<int>> feature_segments; std::vector<dh::dvec<bst_float>> prediction_cache; std::vector<dh::dvec<int>> position; std::vector<dh::dvec<int>> position_tmp; std::vector<DeviceGMat> device_matrix; std::vector<dh::dvec<bst_gpair>> device_gpair; std::vector<dh::dvec<int>> gidx_feature_map; std::vector<dh::dvec<float>> gidx_fvalue_map; std::vector<hipStream_t*> streams; std::vector<ncclComm_t> comms; std::vector<std::vector<ncclComm_t>> find_split_comms; double cpu_init_time; double gpu_init_time; dh::Timer cpu_time; double gpu_time; }; XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMaker(); }); } // namespace tree } // namespace xgboost
3b032d1fab7d0efd27e85e0eb305f531882480c7.cu
/*! * Copyright 2017 XGBoost contributors */ #include <xgboost/tree_updater.h> #include <memory> #include <utility> #include <vector> #include "../common/compressed_iterator.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu_hist); typedef bst_gpair_integer gpair_sum_t; static const ncclDataType_t nccl_sum_t = ncclInt64; // Helper for explicit template specialisation template <int N> struct Int {}; struct DeviceGMat { dh::dvec<common::compressed_byte_t> gidx_buffer; common::CompressedIterator<uint32_t> gidx; dh::dvec<size_t> row_ptr; void Init(int device_idx, const common::GHistIndexMatrix& gmat, bst_ulong element_begin, bst_ulong element_end, bst_ulong row_begin, bst_ulong row_end, int n_bins) { dh::safe_cuda(cudaSetDevice(device_idx)); CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated"; CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1) << "row_ptr must be externally allocated"; common::CompressedBufferWriter cbw(n_bins); std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size()); cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin, gmat.index.begin() + element_end); gidx_buffer = host_buffer; gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins); // row_ptr dh::safe_cuda(cudaMemcpy(row_ptr.data(), gmat.row_ptr.data() + row_begin, row_ptr.size() * sizeof(size_t), cudaMemcpyHostToDevice)); // normalise row_ptr size_t start = gmat.row_ptr[row_begin]; auto d_row_ptr = row_ptr.data(); dh::launch_n(row_ptr.device_idx(), row_ptr.size(), [=] __device__(size_t idx) { d_row_ptr[idx] -= start; }); } }; struct HistHelper { gpair_sum_t* d_hist; int n_bins; __host__ __device__ HistHelper(gpair_sum_t* ptr, int n_bins) : d_hist(ptr), n_bins(n_bins) {} __device__ void Add(bst_gpair gpair, int gidx, int nidx) const { int hist_idx = nidx * n_bins + gidx; auto dst_ptr = reinterpret_cast<unsigned long long int*>(&d_hist[hist_idx]); // NOLINT gpair_sum_t tmp(gpair.GetGrad(), gpair.GetHess()); auto src_ptr = reinterpret_cast<gpair_sum_t::value_t*>(&tmp); atomicAdd(dst_ptr, static_cast<unsigned long long int>(*src_ptr)); // NOLINT atomicAdd(dst_ptr + 1, static_cast<unsigned long long int>(*(src_ptr + 1))); // NOLINT } __device__ gpair_sum_t Get(int gidx, int nidx) const { return d_hist[nidx * n_bins + gidx]; } }; struct DeviceHist { int n_bins; dh::dvec<gpair_sum_t> data; void Init(int n_bins_in) { this->n_bins = n_bins_in; CHECK(!data.empty()) << "DeviceHist must be externally allocated"; } void Reset(int device_idx) { cudaSetDevice(device_idx); data.fill(gpair_sum_t()); } HistHelper GetBuilder() { return HistHelper(data.data(), n_bins); } gpair_sum_t* GetLevelPtr(int depth) { return data.data() + n_nodes(depth - 1) * n_bins; } int LevelSize(int depth) { return n_bins * n_nodes_level(depth); } }; template <int BLOCK_THREADS> __global__ void find_split_kernel( const gpair_sum_t* d_level_hist, int* d_feature_segments, int depth, int n_features, int n_bins, DeviceNodeStats* d_nodes, int nodes_offset_device, float* d_fidx_min_map, float* d_gidx_fvalue_map, GPUTrainingParam gpu_param, bool* d_left_child_smallest_temp, bool colsample, int* d_feature_flags) { typedef cub::KeyValuePair<int, float> ArgMaxT; typedef cub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT; typedef cub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT; typedef cub::BlockReduce<gpair_sum_t, BLOCK_THREADS> SumReduceT; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& split = uninitialized_split.Alias(); __shared__ cub::Uninitialized<gpair_sum_t> uninitialized_sum; gpair_sum_t& shared_sum = uninitialized_sum.Alias(); __shared__ ArgMaxT block_max; __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { split = DeviceSplitCandidate(); } __syncthreads(); // below two are for accessing full-sized node list stored on each device // always one block per node, BLOCK_THREADS threads per block int level_node_idx = blockIdx.x + nodes_offset_device; int node_idx = n_nodes(depth - 1) + level_node_idx; for (int fidx = 0; fidx < n_features; fidx++) { if (colsample && d_feature_flags[fidx] == 0) continue; int begin = d_feature_segments[level_node_idx * n_features + fidx]; int end = d_feature_segments[level_node_idx * n_features + fidx + 1]; gpair_sum_t feature_sum = gpair_sum_t(); for (int reduce_begin = begin; reduce_begin < end; reduce_begin += BLOCK_THREADS) { bool thread_active = reduce_begin + threadIdx.x < end; // Scan histogram gpair_sum_t bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x] : gpair_sum_t(); feature_sum += SumReduceT(temp_storage.sum_reduce).Reduce(bin, cub::Sum()); } if (threadIdx.x == 0) { shared_sum = feature_sum; } // __syncthreads(); // no need to synch because below there is a Scan auto prefix_op = SumCallbackOp<gpair_sum_t>(); for (int scan_begin = begin; scan_begin < end; scan_begin += BLOCK_THREADS) { bool thread_active = scan_begin + threadIdx.x < end; gpair_sum_t bin = thread_active ? d_level_hist[scan_begin + threadIdx.x] : gpair_sum_t(); BlockScanT(temp_storage.scan) .ExclusiveScan(bin, bin, cub::Sum(), prefix_op); // Calculate gain gpair_sum_t parent_sum = gpair_sum_t(d_nodes[node_idx].sum_gradients); float parent_gain = d_nodes[node_idx].root_gain; gpair_sum_t missing = parent_sum - shared_sum; bool missing_left; float gain = thread_active ? loss_chg_missing(bin, missing, parent_sum, parent_gain, gpu_param, missing_left) : -FLT_MAX; __syncthreads(); // Find thread with best gain ArgMaxT tuple(threadIdx.x, gain); ArgMaxT best = MaxReduceT(temp_storage.max_reduce).Reduce(tuple, cub::ArgMax()); if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { float fvalue; int gidx = (scan_begin - (level_node_idx * n_bins)) + threadIdx.x; if (threadIdx.x == 0 && begin == scan_begin) { // check at start of first tile fvalue = d_fidx_min_map[fidx]; } else { fvalue = d_gidx_fvalue_map[gidx - 1]; } gpair_sum_t left = missing_left ? bin + missing : bin; gpair_sum_t right = parent_sum - left; split.Update(gain, missing_left ? LeftDir : RightDir, fvalue, fidx, left, right, gpu_param); } __syncthreads(); } // end scan } // end over features // Create node if (threadIdx.x == 0 && split.IsValid()) { d_nodes[node_idx].SetSplit(split); DeviceNodeStats& left_child = d_nodes[left_child_nidx(node_idx)]; DeviceNodeStats& right_child = d_nodes[right_child_nidx(node_idx)]; bool& left_child_smallest = d_left_child_smallest_temp[node_idx]; left_child = DeviceNodeStats(split.left_sum, left_child_nidx(node_idx), gpu_param); right_child = DeviceNodeStats(split.right_sum, right_child_nidx(node_idx), gpu_param); // Record smallest node if (split.left_sum.GetHess() <= split.right_sum.GetHess()) { left_child_smallest = true; } else { left_child_smallest = false; } } } class GPUHistMaker : public TreeUpdater { public: GPUHistMaker() : initialised(false), is_dense(false), p_last_fmat_(nullptr), prediction_cache_initialised(false) {} ~GPUHistMaker() { if (initialised) { for (int d_idx = 0; d_idx < n_devices; ++d_idx) { ncclCommDestroy(comms[d_idx]); dh::safe_cuda(cudaSetDevice(dList[d_idx])); dh::safe_cuda(cudaStreamDestroy(*(streams[d_idx]))); } for (int num_d = 1; num_d <= n_devices; ++num_d) { // loop over number of devices used for (int d_idx = 0; d_idx < n_devices; ++d_idx) { ncclCommDestroy(find_split_comms[num_d - 1][d_idx]); } } } } void Init( const std::vector<std::pair<std::string, std::string>>& args) override { param.InitAllowUnknown(args); CHECK(param.max_depth < 16) << "Tree depth too large."; CHECK(param.max_depth != 0) << "Tree depth cannot be 0."; CHECK(param.grow_policy != TrainParam::kLossGuide) << "Loss guided growth policy not supported. Use CPU algorithm."; this->param = param; CHECK(param.n_gpus != 0) << "Must have at least one device"; } void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { GradStats::CheckInfo(dmat->info()); // rescale learning rate according to size of trees float lr = param.learning_rate; param.learning_rate = lr / trees.size(); // build tree try { for (size_t i = 0; i < trees.size(); ++i) { this->UpdateTree(gpair, dmat, trees[i]); } } catch (const std::exception& e) { LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl; } param.learning_rate = lr; } void InitData(const std::vector<bst_gpair>& gpair, DMatrix& fmat, // NOLINT const RegTree& tree) { dh::Timer time1; // set member num_rows and n_devices for rest of GPUHistBuilder members info = &fmat.info(); num_rows = info->num_row; n_devices = dh::n_devices(param.n_gpus, num_rows); if (!initialised) { // reset static timers used across iterations cpu_init_time = 0; gpu_init_time = 0; cpu_time.Reset(); gpu_time = 0; // set dList member dList.resize(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices(); dList[d_idx] = device_idx; } // initialize nccl comms.resize(n_devices); streams.resize(n_devices); dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices, dList.data())); // initialize communicator // (One communicator per // process) // printf("# NCCL: Using devices\n"); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { streams[d_idx] = reinterpret_cast<cudaStream_t*>(malloc(sizeof(cudaStream_t))); dh::safe_cuda(cudaSetDevice(dList[d_idx])); dh::safe_cuda(cudaStreamCreate(streams[d_idx])); int cudaDev; int rank; cudaDeviceProp prop; dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev)); dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank)); dh::safe_cuda(cudaGetDeviceProperties(&prop, cudaDev)); // printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev, // prop.pciBusID, prop.name); // cudaDriverGetVersion(&driverVersion); // cudaRuntimeGetVersion(&runtimeVersion); std::ostringstream oss; oss << "CUDA Capability Major/Minor version number: " << prop.major << "." << prop.minor << " is insufficient. Need >=3.5."; int failed = prop.major < 3 || prop.major == 3 && prop.minor < 5; CHECK(failed == 0) << oss.str(); } // local find_split group of comms for each case of reduced number of // GPUs to use find_split_comms.resize( n_devices, std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but // ok, and best to do // here instead of // repeatedly for (int num_d = 1; num_d <= n_devices; ++num_d) { // loop over number of devices used dh::safe_nccl( ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d, dList.data())); // initialize communicator // (One communicator per // process) } is_dense = info->num_nonzero == info->num_col * info->num_row; dh::Timer time0; hmat_.Init(&fmat, param.max_bin); cpu_init_time += time0.ElapsedSeconds(); if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] CPU Time for hmat_.Init " << time0.ElapsedSeconds() << " sec"; fflush(stdout); } time0.Reset(); gmat_.cut = &hmat_; cpu_init_time += time0.ElapsedSeconds(); if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] CPU Time for gmat_.cut " << time0.ElapsedSeconds() << " sec"; fflush(stdout); } time0.Reset(); gmat_.Init(&fmat); cpu_init_time += time0.ElapsedSeconds(); if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] CPU Time for gmat_.Init() " << time0.ElapsedSeconds() << " sec"; fflush(stdout); } time0.Reset(); if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] CPU Time for hmat_.Init, gmat_.cut, gmat_.Init " << cpu_init_time << " sec"; fflush(stdout); } int n_bins = hmat_.row_ptr.back(); int n_features = hmat_.row_ptr.size() - 1; // deliniate data onto multiple gpus device_row_segments.push_back(0); device_element_segments.push_back(0); bst_uint offset = 0; bst_uint shard_size = std::ceil(static_cast<double>(num_rows) / n_devices); for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; offset += shard_size; offset = std::min(offset, num_rows); device_row_segments.push_back(offset); device_element_segments.push_back(gmat_.row_ptr[offset]); } // Build feature segments std::vector<int> h_feature_segments; for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) { for (int fidx = 0; fidx < n_features; fidx++) { h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins); } } h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins); // Construct feature map std::vector<int> h_gidx_feature_map(n_bins); for (int fidx = 0; fidx < n_features; fidx++) { for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) { h_gidx_feature_map[i] = fidx; } } int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins; // allocate unique common data that reside on master device (NOTE: None // currently) // int master_device=dList[0]; // ba.allocate(master_device, ); // allocate vectors across all devices temp_memory.resize(n_devices); hist_vec.resize(n_devices); nodes.resize(n_devices); nodes_temp.resize(n_devices); nodes_child_temp.resize(n_devices); left_child_smallest.resize(n_devices); left_child_smallest_temp.resize(n_devices); feature_flags.resize(n_devices); fidx_min_map.resize(n_devices); feature_segments.resize(n_devices); prediction_cache.resize(n_devices); position.resize(n_devices); position_tmp.resize(n_devices); device_matrix.resize(n_devices); device_gpair.resize(n_devices); gidx_feature_map.resize(n_devices); gidx_fvalue_map.resize(n_devices); int find_split_n_devices = std::pow(2, std::floor(std::log2(n_devices))); find_split_n_devices = std::min(n_nodes_level(param.max_depth), find_split_n_devices); int max_num_nodes_device = n_nodes_level(param.max_depth) / find_split_n_devices; // num_rows_segment: for sharding rows onto gpus for splitting data // num_elements_segment: for sharding rows (of elements) onto gpus for // splitting data // max_num_nodes_device: for sharding nodes onto gpus for split finding // All other variables have full copy on gpu, with copy either being // identical or just current portion (like for histogram) before // AllReduce for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; bst_uint num_rows_segment = device_row_segments[d_idx + 1] - device_row_segments[d_idx]; bst_ulong num_elements_segment = device_element_segments[d_idx + 1] - device_element_segments[d_idx]; ba.allocate( device_idx, param.silent, &(hist_vec[d_idx].data), n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx], n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device, &nodes_child_temp[d_idx], max_num_nodes_device, &left_child_smallest[d_idx], n_nodes(param.max_depth), &left_child_smallest_temp[d_idx], max_num_nodes_device, &feature_flags[d_idx], n_features, // may change but same on all devices &fidx_min_map[d_idx], hmat_.min_val.size(), // constant and same on all devices &feature_segments[d_idx], h_feature_segments.size(), // constant and same on all devices &prediction_cache[d_idx], num_rows_segment, &position[d_idx], num_rows_segment, &position_tmp[d_idx], num_rows_segment, &device_gpair[d_idx], num_rows_segment, &device_matrix[d_idx].gidx_buffer, common::CompressedBufferWriter::CalculateBufferSize( num_elements_segment, n_bins), // constant and same on all devices &device_matrix[d_idx].row_ptr, num_rows_segment + 1, &gidx_feature_map[d_idx], n_bins, // constant and same on all devices &gidx_fvalue_map[d_idx], hmat_.cut.size()); // constant and same on all devices // Copy Host to Device (assumes comes after ba.allocate that sets // device) device_matrix[d_idx].Init( device_idx, gmat_, device_element_segments[d_idx], device_element_segments[d_idx + 1], device_row_segments[d_idx], device_row_segments[d_idx + 1], n_bins); gidx_feature_map[d_idx] = h_gidx_feature_map; gidx_fvalue_map[d_idx] = hmat_.cut; feature_segments[d_idx] = h_feature_segments; fidx_min_map[d_idx] = hmat_.min_val; // Initialize, no copy hist_vec[d_idx].Init(n_bins); // init host object prediction_cache[d_idx].fill(0); // init device object (assumes comes // after ba.allocate that sets device) feature_flags[d_idx].fill( 1); // init device object (assumes comes after // ba.allocate that sets device) } } // copy or init to do every iteration for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); nodes[d_idx].fill(DeviceNodeStats()); nodes_temp[d_idx].fill(DeviceNodeStats()); nodes_child_temp[d_idx].fill(DeviceNodeStats()); position[d_idx].fill(0); device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx], gpair.begin() + device_row_segments[d_idx + 1]); subsample_gpair(&device_gpair[d_idx], param.subsample, device_row_segments[d_idx]); hist_vec[d_idx].Reset(device_idx); // left_child_smallest and left_child_smallest_temp don't need to be // initialized } dh::synchronize_n_devices(n_devices, dList); if (!initialised) { gpu_init_time = time1.ElapsedSeconds() - cpu_init_time; gpu_time = -cpu_init_time; if (param.debug_verbose) { // Only done once for each training session LOG(CONSOLE) << "[GPU Plug-in] Time for GPU operations during First " "Call to InitData() " << gpu_init_time << " sec"; fflush(stdout); } } p_last_fmat_ = &fmat; initialised = true; } void BuildHist(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t begin = device_element_segments[d_idx]; size_t end = device_element_segments[d_idx + 1]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; auto d_gidx = device_matrix[d_idx].gidx; auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin(); auto d_position = position[d_idx].data(); auto d_gpair = device_gpair[d_idx].data(); auto d_left_child_smallest = left_child_smallest[d_idx].data(); auto hist_builder = hist_vec[d_idx].GetBuilder(); dh::TransformLbs( device_idx, &temp_memory[d_idx], end - begin, d_row_ptr, row_end - row_begin, is_dense, [=] __device__(size_t local_idx, int local_ridx) { int nidx = d_position[local_ridx]; // OPTMARK: latency if (!is_active(nidx, depth)) return; // Only increment smallest node bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] && is_left_child(nidx)) || (!d_left_child_smallest[parent_nidx(nidx)] && !is_left_child(nidx)); if (!is_smallest && depth > 0) return; int gidx = d_gidx[local_idx]; bst_gpair gpair = d_gpair[local_ridx]; hist_builder.Add(gpair, gidx, nidx); // OPTMARK: This is slow, could use // shared memory or cache results // intead of writing to global // memory every time in atomic way. }); } dh::synchronize_n_devices(n_devices, dList); // time.printElapsed("Add Time"); // (in-place) reduce each element of histogram (for only current level) // across multiple gpus // TODO(JCM): use out of place with pre-allocated buffer, but then have to // copy // back on device // fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float)); for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); dh::safe_nccl(ncclAllReduce( reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)), reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)), hist_vec[d_idx].LevelSize(depth) * sizeof(gpair_sum_t) / sizeof(gpair_sum_t::value_t), nccl_sum_t, ncclSum, comms[d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx]))); } // if no NCCL, then presume only 1 GPU, then already correct // time.printElapsed("Reduce-Add Time"); // Subtraction trick (applied to all devices in same way -- to avoid doing // on master and then Bcast) if (depth > 0) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); auto hist_builder = hist_vec[d_idx].GetBuilder(); auto d_left_child_smallest = left_child_smallest[d_idx].data(); int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins; dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) { int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2); bool left_smallest = d_left_child_smallest[parent_nidx(nidx)]; if (left_smallest) { nidx++; // If left is smallest switch to right child } int gidx = idx % hist_builder.n_bins; gpair_sum_t parent = hist_builder.Get(gidx, parent_nidx(nidx)); int other_nidx = left_smallest ? nidx - 1 : nidx + 1; gpair_sum_t other = hist_builder.Get(gidx, other_nidx); gpair_sum_t sub = parent - other; hist_builder.Add( bst_gpair(sub.GetGrad(), sub.GetHess()), gidx, nidx); // OPTMARK: This is slow, could use shared // memory or cache results intead of writing to // global memory every time in atomic way. }); } dh::synchronize_n_devices(n_devices, dList); } } #define MIN_BLOCK_THREADS 128 #define CHUNK_BLOCK_THREADS 128 // MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due // to CUDA capability 35 and above requirement // for Maximum number of threads per block #define MAX_BLOCK_THREADS 512 void FindSplit(int depth) { // Specialised based on max_bins this->FindSplitSpecialize(depth, Int<MIN_BLOCK_THREADS>()); } template <int BLOCK_THREADS> void FindSplitSpecialize(int depth, Int<BLOCK_THREADS>) { if (param.max_bin <= BLOCK_THREADS) { LaunchFindSplit<BLOCK_THREADS>(depth); } else { this->FindSplitSpecialize(depth, Int<BLOCK_THREADS + CHUNK_BLOCK_THREADS>()); } } void FindSplitSpecialize(int depth, Int<MAX_BLOCK_THREADS>) { this->LaunchFindSplit<MAX_BLOCK_THREADS>(depth); } template <int BLOCK_THREADS> void LaunchFindSplit(int depth) { bool colsample = param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0; int num_nodes_device = n_nodes_level(depth); const int GRID_SIZE = num_nodes_device; // all GPUs do same work for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); int nodes_offset_device = 0; find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>( hist_vec[d_idx].GetLevelPtr(depth), feature_segments[d_idx].data(), depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(), nodes_offset_device, fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param), left_child_smallest[d_idx].data(), colsample, feature_flags[d_idx].data()); } // NOTE: No need to syncrhonize with host as all above pure P2P ops or // on-device ops } void InitFirstNode(const std::vector<bst_gpair>& gpair) { // Perform asynchronous reduction on each gpu std::vector<bst_gpair> device_sums(n_devices); #pragma omp parallel for num_threads(n_devices) for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); auto begin = device_gpair[d_idx].tbegin(); auto end = device_gpair[d_idx].tend(); bst_gpair init = bst_gpair(); auto binary_op = thrust::plus<bst_gpair>(); device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op); } bst_gpair sum = bst_gpair(); for (int d_idx = 0; d_idx < n_devices; d_idx++) { sum += device_sums[d_idx]; } // Setup first node so all devices have same first node (here done same on // all devices, or could have done one device and Bcast if worried about // exact precision issues) for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_nodes = nodes[d_idx].data(); auto gpu_param = GPUTrainingParam(param); dh::launch_n(device_idx, 1, [=] __device__(int idx) { bst_gpair sum_gradients = sum; d_nodes[idx] = DeviceNodeStats(sum_gradients, 0, gpu_param); }); } // synch all devices to host before moving on (No, can avoid because // BuildHist calls another kernel in default stream) // dh::synchronize_n_devices(n_devices, dList); } void UpdatePosition(int depth) { if (is_dense) { this->UpdatePositionDense(depth); } else { this->UpdatePositionSparse(depth); } } void UpdatePositionDense(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_position = position[d_idx].data(); DeviceNodeStats* d_nodes = nodes[d_idx].data(); auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data(); auto d_gidx = device_matrix[d_idx].gidx; int n_columns = info->num_col; size_t begin = device_row_segments[d_idx]; size_t end = device_row_segments[d_idx + 1]; dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) { int pos = d_position[local_idx]; if (!is_active(pos, depth)) { return; } DeviceNodeStats node = d_nodes[pos]; if (node.IsLeaf()) { return; } int gidx = d_gidx[local_idx * static_cast<size_t>(n_columns) + static_cast<size_t>(node.fidx)]; float fvalue = d_gidx_fvalue_map[gidx]; if (fvalue <= node.fvalue) { d_position[local_idx] = left_child_nidx(pos); } else { d_position[local_idx] = right_child_nidx(pos); } }); } dh::synchronize_n_devices(n_devices, dList); // dh::safe_cuda(cudaDeviceSynchronize()); } void UpdatePositionSparse(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_position = position[d_idx].data(); auto d_position_tmp = position_tmp[d_idx].data(); DeviceNodeStats* d_nodes = nodes[d_idx].data(); auto d_gidx_feature_map = gidx_feature_map[d_idx].data(); auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data(); auto d_gidx = device_matrix[d_idx].gidx; auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin(); size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; size_t element_begin = device_element_segments[d_idx]; size_t element_end = device_element_segments[d_idx + 1]; // Update missing direction dh::launch_n(device_idx, row_end - row_begin, [=] __device__(int local_idx) { int pos = d_position[local_idx]; if (!is_active(pos, depth)) { d_position_tmp[local_idx] = pos; return; } DeviceNodeStats node = d_nodes[pos]; if (node.IsLeaf()) { d_position_tmp[local_idx] = pos; return; } else if (node.dir == LeftDir) { d_position_tmp[local_idx] = pos * 2 + 1; } else { d_position_tmp[local_idx] = pos * 2 + 2; } }); // Update node based on fvalue where exists // OPTMARK: This kernel is very inefficient for both compute and memory, // dominated by memory dependency / access patterns dh::TransformLbs( device_idx, &temp_memory[d_idx], element_end - element_begin, d_row_ptr, row_end - row_begin, is_dense, [=] __device__(size_t local_idx, int local_ridx) { int pos = d_position[local_ridx]; if (!is_active(pos, depth)) { return; } DeviceNodeStats node = d_nodes[pos]; if (node.IsLeaf()) { return; } int gidx = d_gidx[local_idx]; int findex = d_gidx_feature_map[gidx]; // OPTMARK: slowest global // memory access, maybe setup // position, gidx, etc. as // combined structure? if (findex == node.fidx) { float fvalue = d_gidx_fvalue_map[gidx]; if (fvalue <= node.fvalue) { d_position_tmp[local_ridx] = left_child_nidx(pos); } else { d_position_tmp[local_ridx] = right_child_nidx(pos); } } }); position[d_idx] = position_tmp[d_idx]; } dh::synchronize_n_devices(n_devices, dList); } void ColSampleTree() { if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return; feature_set_tree.resize(info->num_col); std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0); feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree); } void ColSampleLevel() { if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return; feature_set_level.resize(feature_set_tree.size()); feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel); std::vector<int> h_feature_flags(info->num_col, 0); for (auto fidx : feature_set_level) { h_feature_flags[fidx] = 1; } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); feature_flags[d_idx] = h_feature_flags; } dh::synchronize_n_devices(n_devices, dList); } bool UpdatePredictionCache(const DMatrix* data, std::vector<bst_float>* p_out_preds) override { std::vector<bst_float>& out_preds = *p_out_preds; if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) { return false; } if (!prediction_cache_initialised) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; prediction_cache[d_idx].copy(out_preds.begin() + row_begin, out_preds.begin() + row_end); } prediction_cache_initialised = true; } dh::synchronize_n_devices(n_devices, dList); float eps = param.learning_rate; for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; auto d_nodes = nodes[d_idx].data(); auto d_position = position[d_idx].data(); auto d_prediction_cache = prediction_cache[d_idx].data(); dh::launch_n(device_idx, prediction_cache[d_idx].size(), [=] __device__(int local_idx) { int pos = d_position[local_idx]; d_prediction_cache[local_idx] += d_nodes[pos].weight * eps; }); dh::safe_cuda( cudaMemcpy(&out_preds[row_begin], prediction_cache[d_idx].data(), prediction_cache[d_idx].size() * sizeof(bst_float), cudaMemcpyDeviceToHost)); } dh::synchronize_n_devices(n_devices, dList); return true; } void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat, RegTree* p_tree) { dh::Timer time0; this->InitData(gpair, *p_fmat, *p_tree); this->InitFirstNode(gpair); this->ColSampleTree(); for (int depth = 0; depth < param.max_depth; depth++) { this->ColSampleLevel(); this->BuildHist(depth); this->FindSplit(depth); this->UpdatePosition(depth); } // done with multi-GPU, pass back result from master to tree on host int master_device = dList[0]; dh::safe_cuda(cudaSetDevice(master_device)); dense2sparse_tree(p_tree, nodes[0], param); gpu_time += time0.ElapsedSeconds(); if (param.debug_verbose) { LOG(CONSOLE) << "[GPU Plug-in] Cumulative GPU Time excluding initial time " << (gpu_time - gpu_init_time) << " sec"; fflush(stdout); } if (param.debug_verbose) { LOG(CONSOLE) << "[GPU Plug-in] Cumulative CPU Time " << cpu_time.ElapsedSeconds() << " sec"; LOG(CONSOLE) << "[GPU Plug-in] Cumulative CPU Time excluding initial time " << (cpu_time.ElapsedSeconds() - cpu_init_time - gpu_time) << " sec"; fflush(stdout); } } protected: TrainParam param; // std::unique_ptr<GPUHistBuilder> builder; common::HistCutMatrix hmat_; common::GHistIndexMatrix gmat_; MetaInfo* info; bool initialised; bool is_dense; const DMatrix* p_last_fmat_; bool prediction_cache_initialised; dh::bulk_allocator<dh::memory_type::DEVICE> ba; std::vector<int> feature_set_tree; std::vector<int> feature_set_level; bst_uint num_rows; int n_devices; // below vectors are for each devices used std::vector<int> dList; std::vector<int> device_row_segments; std::vector<size_t> device_element_segments; std::vector<dh::CubMemory> temp_memory; std::vector<DeviceHist> hist_vec; std::vector<dh::dvec<DeviceNodeStats>> nodes; std::vector<dh::dvec<DeviceNodeStats>> nodes_temp; std::vector<dh::dvec<DeviceNodeStats>> nodes_child_temp; std::vector<dh::dvec<bool>> left_child_smallest; std::vector<dh::dvec<bool>> left_child_smallest_temp; std::vector<dh::dvec<int>> feature_flags; std::vector<dh::dvec<float>> fidx_min_map; std::vector<dh::dvec<int>> feature_segments; std::vector<dh::dvec<bst_float>> prediction_cache; std::vector<dh::dvec<int>> position; std::vector<dh::dvec<int>> position_tmp; std::vector<DeviceGMat> device_matrix; std::vector<dh::dvec<bst_gpair>> device_gpair; std::vector<dh::dvec<int>> gidx_feature_map; std::vector<dh::dvec<float>> gidx_fvalue_map; std::vector<cudaStream_t*> streams; std::vector<ncclComm_t> comms; std::vector<std::vector<ncclComm_t>> find_split_comms; double cpu_init_time; double gpu_init_time; dh::Timer cpu_time; double gpu_time; }; XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMaker(); }); } // namespace tree } // namespace xgboost
dfc3ae1786baef7b24c1ef6a84f10d4eb53be62e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/mv_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/blas/blas.h" namespace phi { template <typename T> __global__ void MVGradDxCUDAKernel( const int m, const int n, const T *dout, const T *vec, T *dx) { int idx = blockDim.x * blockIdx.x + threadIdx.x; for (; idx < m * n; idx += blockDim.x * gridDim.x) { int i = idx / n; int j = idx % n; dx[idx] = dout[i] * vec[j]; } } template <typename T, typename Context> void MvGradKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &vec, const DenseTensor &out_grad, DenseTensor *x_grad, DenseTensor *vec_grad) { auto dout = out_grad; auto dx = x_grad; auto dvec = vec_grad; auto dim_x = x.dims(); int m = dim_x[0]; int n = dim_x[1]; // get data ptr const T *x_data = x.data<T>(); const T *vec_data = vec.data<T>(); const T *dout_data = dout.data<T>(); auto blas = phi::funcs::GetBlas<Context, T>(dev_ctx); auto stream = dev_ctx.stream(); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, m * n); if (dx) { T *dx_data = dev_ctx.template Alloc<T>(dx); hipLaunchKernelGGL(( MVGradDxCUDAKernel<T>) , dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream, m, n, dout_data, vec_data, dx_data); } if (dvec) { T *dvec_data = dev_ctx.template Alloc<T>(dvec); blas.GEMV(true, dim_x[0], dim_x[1], static_cast<T>(1), x_data, dout_data, static_cast<T>(0), dvec_data); } } } // namespace phi PD_REGISTER_KERNEL(mv_grad, GPU, ALL_LAYOUT, phi::MvGradKernel, float, double) { }
dfc3ae1786baef7b24c1ef6a84f10d4eb53be62e.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/mv_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/blas/blas.h" namespace phi { template <typename T> __global__ void MVGradDxCUDAKernel( const int m, const int n, const T *dout, const T *vec, T *dx) { int idx = blockDim.x * blockIdx.x + threadIdx.x; for (; idx < m * n; idx += blockDim.x * gridDim.x) { int i = idx / n; int j = idx % n; dx[idx] = dout[i] * vec[j]; } } template <typename T, typename Context> void MvGradKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &vec, const DenseTensor &out_grad, DenseTensor *x_grad, DenseTensor *vec_grad) { auto dout = out_grad; auto dx = x_grad; auto dvec = vec_grad; auto dim_x = x.dims(); int m = dim_x[0]; int n = dim_x[1]; // get data ptr const T *x_data = x.data<T>(); const T *vec_data = vec.data<T>(); const T *dout_data = dout.data<T>(); auto blas = phi::funcs::GetBlas<Context, T>(dev_ctx); auto stream = dev_ctx.stream(); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, m * n); if (dx) { T *dx_data = dev_ctx.template Alloc<T>(dx); MVGradDxCUDAKernel<T> <<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>( m, n, dout_data, vec_data, dx_data); } if (dvec) { T *dvec_data = dev_ctx.template Alloc<T>(dvec); blas.GEMV(true, dim_x[0], dim_x[1], static_cast<T>(1), x_data, dout_data, static_cast<T>(0), dvec_data); } } } // namespace phi PD_REGISTER_KERNEL(mv_grad, GPU, ALL_LAYOUT, phi::MvGradKernel, float, double) { }
e3c665cb9bbbcad657fe0fcf666c34bd6de990bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2016 University of Cordoba and University of Illinois * All rights reserved. * * Developed by: IMPACT Research Group * University of Cordoba and University of Illinois * http://impact.crhc.illinois.edu/ * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * with the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * > Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * > Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimers in the * documentation and/or other materials provided with the distribution. * > Neither the names of IMPACT Research Group, University of Cordoba, * University of Illinois nor the names of its contributors may be used * to endorse or promote products derived from this Software without * specific prior written permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH * THE SOFTWARE. * */ #define _CUDA_COMPILER_ #include "support/common.h" #include "support/partitioner.h" // Device auxiliary functions __device__ void reduce(int *l_count, int local_cnt, int *l_data) { const int tid = threadIdx.x; const int localSize = blockDim.x; // load shared mem l_data[tid] = local_cnt; __syncthreads(); // do reduction in shared mem for(int s = localSize >> 1; s > 0; s >>= 1) { if(tid < s) { l_data[tid] += l_data[tid + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) *l_count = l_data[0]; } __device__ int block_binary_prefix_sums(int *l_count, int x, int *l_data) { l_data[threadIdx.x] = x; const int length = blockDim.x; // Build up tree int offset = 1; for(int l = length >> 1; l > 0; l >>= 1) { __syncthreads(); if(threadIdx.x < l) { int ai = offset * (2 * threadIdx.x + 1) - 1; int bi = offset * (2 * threadIdx.x + 2) - 1; l_data[bi] += l_data[ai]; } offset <<= 1; } if(offset < length) { offset <<= 1; } // Build down tree int maxThread = offset >> 1; for(int d = 0; d < maxThread; d <<= 1) { d += 1; offset >>= 1; __syncthreads(); if(threadIdx.x < d) { int ai = offset * (threadIdx.x + 1) - 1; int bi = ai + (offset >> 1); l_data[bi] += l_data[ai]; } } __syncthreads(); int output = l_data[threadIdx.x] + *l_count - x; __syncthreads(); if(threadIdx.x == blockDim.x - 1) *l_count += l_data[threadIdx.x]; return output; } // CUDA kernel ------------------------------------------------------------------------------------------ __global__ void StreamCompaction_kernel(int size, T value, int n_tasks, float alpha, T *output, T *input, int *flags #ifdef CUDA_8_0 , int *worklist #endif ) { extern __shared__ int l_mem[]; int* l_data = l_mem; int* l_count = &l_data[blockDim.x]; #ifdef CUDA_8_0 int* l_tmp = &l_count[1]; #endif #ifdef CUDA_8_0 Partitioner p = partitioner_create(n_tasks, alpha, worklist, l_tmp); #else Partitioner p = partitioner_create(n_tasks, alpha); #endif for(int my_s = gpu_first(&p); gpu_more(&p); my_s = gpu_next(&p)) { if(threadIdx.x == 0) { l_count[0] = 0; } __syncthreads(); int local_cnt = 0; // Declare on-chip memory T reg[REGS]; #ifdef CUDA_8_0 int pos = my_s * REGS * blockDim.x + threadIdx.x; #else int pos = (my_s - p.cut) * REGS * blockDim.x + threadIdx.x; #endif // Load in on-chip memory #pragma unroll for(int j = 0; j < REGS; j++) { if(pos < size) { reg[j] = input[pos]; if(reg[j] != value) local_cnt++; } else reg[j] = value; pos += blockDim.x; } reduce(&l_count[0], local_cnt, &l_data[0]); // Set global synch if(threadIdx.x == 0) { int p_count; #ifdef CUDA_8_0 while((p_count = atomicAdd_system(&flags[my_s], 0)) == 0) { } atomicAdd_system(&flags[my_s + 1], p_count + l_count[0]); #else while((p_count = atomicAdd(&flags[my_s], 0)) == 0) { } atomicAdd(&flags[my_s + 1], p_count + l_count[0]); #endif l_count[0] = p_count - 1; } __syncthreads(); // Store to global memory #pragma unroll for(int j = 0; j < REGS; j++) { pos = block_binary_prefix_sums(&l_count[0], (int)((reg[j] != value) ? 1 : 0), &l_data[0]); if(reg[j] != value) { output[pos] = reg[j]; } } } } hipError_t call_StreamCompaction_kernel(int blocks, int threads, int size, T value, int n_tasks, float alpha, T *output, T *input, int *flags, int l_mem_size #ifdef CUDA_8_0 , int *worklist #endif ){ dim3 dimGrid(blocks); dim3 dimBlock(threads); hipLaunchKernelGGL(( StreamCompaction_kernel), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, size, value, n_tasks, alpha, output, input, flags #ifdef CUDA_8_0 , worklist #endif ); hipError_t err = hipGetLastError(); return err; }
e3c665cb9bbbcad657fe0fcf666c34bd6de990bd.cu
/* * Copyright (c) 2016 University of Cordoba and University of Illinois * All rights reserved. * * Developed by: IMPACT Research Group * University of Cordoba and University of Illinois * http://impact.crhc.illinois.edu/ * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * with the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * > Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * > Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimers in the * documentation and/or other materials provided with the distribution. * > Neither the names of IMPACT Research Group, University of Cordoba, * University of Illinois nor the names of its contributors may be used * to endorse or promote products derived from this Software without * specific prior written permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH * THE SOFTWARE. * */ #define _CUDA_COMPILER_ #include "support/common.h" #include "support/partitioner.h" // Device auxiliary functions __device__ void reduce(int *l_count, int local_cnt, int *l_data) { const int tid = threadIdx.x; const int localSize = blockDim.x; // load shared mem l_data[tid] = local_cnt; __syncthreads(); // do reduction in shared mem for(int s = localSize >> 1; s > 0; s >>= 1) { if(tid < s) { l_data[tid] += l_data[tid + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) *l_count = l_data[0]; } __device__ int block_binary_prefix_sums(int *l_count, int x, int *l_data) { l_data[threadIdx.x] = x; const int length = blockDim.x; // Build up tree int offset = 1; for(int l = length >> 1; l > 0; l >>= 1) { __syncthreads(); if(threadIdx.x < l) { int ai = offset * (2 * threadIdx.x + 1) - 1; int bi = offset * (2 * threadIdx.x + 2) - 1; l_data[bi] += l_data[ai]; } offset <<= 1; } if(offset < length) { offset <<= 1; } // Build down tree int maxThread = offset >> 1; for(int d = 0; d < maxThread; d <<= 1) { d += 1; offset >>= 1; __syncthreads(); if(threadIdx.x < d) { int ai = offset * (threadIdx.x + 1) - 1; int bi = ai + (offset >> 1); l_data[bi] += l_data[ai]; } } __syncthreads(); int output = l_data[threadIdx.x] + *l_count - x; __syncthreads(); if(threadIdx.x == blockDim.x - 1) *l_count += l_data[threadIdx.x]; return output; } // CUDA kernel ------------------------------------------------------------------------------------------ __global__ void StreamCompaction_kernel(int size, T value, int n_tasks, float alpha, T *output, T *input, int *flags #ifdef CUDA_8_0 , int *worklist #endif ) { extern __shared__ int l_mem[]; int* l_data = l_mem; int* l_count = &l_data[blockDim.x]; #ifdef CUDA_8_0 int* l_tmp = &l_count[1]; #endif #ifdef CUDA_8_0 Partitioner p = partitioner_create(n_tasks, alpha, worklist, l_tmp); #else Partitioner p = partitioner_create(n_tasks, alpha); #endif for(int my_s = gpu_first(&p); gpu_more(&p); my_s = gpu_next(&p)) { if(threadIdx.x == 0) { l_count[0] = 0; } __syncthreads(); int local_cnt = 0; // Declare on-chip memory T reg[REGS]; #ifdef CUDA_8_0 int pos = my_s * REGS * blockDim.x + threadIdx.x; #else int pos = (my_s - p.cut) * REGS * blockDim.x + threadIdx.x; #endif // Load in on-chip memory #pragma unroll for(int j = 0; j < REGS; j++) { if(pos < size) { reg[j] = input[pos]; if(reg[j] != value) local_cnt++; } else reg[j] = value; pos += blockDim.x; } reduce(&l_count[0], local_cnt, &l_data[0]); // Set global synch if(threadIdx.x == 0) { int p_count; #ifdef CUDA_8_0 while((p_count = atomicAdd_system(&flags[my_s], 0)) == 0) { } atomicAdd_system(&flags[my_s + 1], p_count + l_count[0]); #else while((p_count = atomicAdd(&flags[my_s], 0)) == 0) { } atomicAdd(&flags[my_s + 1], p_count + l_count[0]); #endif l_count[0] = p_count - 1; } __syncthreads(); // Store to global memory #pragma unroll for(int j = 0; j < REGS; j++) { pos = block_binary_prefix_sums(&l_count[0], (int)((reg[j] != value) ? 1 : 0), &l_data[0]); if(reg[j] != value) { output[pos] = reg[j]; } } } } cudaError_t call_StreamCompaction_kernel(int blocks, int threads, int size, T value, int n_tasks, float alpha, T *output, T *input, int *flags, int l_mem_size #ifdef CUDA_8_0 , int *worklist #endif ){ dim3 dimGrid(blocks); dim3 dimBlock(threads); StreamCompaction_kernel<<<dimGrid, dimBlock, l_mem_size>>>(size, value, n_tasks, alpha, output, input, flags #ifdef CUDA_8_0 , worklist #endif ); cudaError_t err = cudaGetLastError(); return err; }
7a1762d148a57e5e0c5f28421de60b6007f616f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "prefix_sum_layer_updater_cuda.h" #include "../prefix_sum_layer.h" #include "util_cuda.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; __global__ void prefix_sum_upd_kernel( float * __restrict output, const float * __restrict input, int feature_map_segment_length, int neuron_count_per_entry, int neuron_count_per_feature_map, float clamp_min, float clamp_max, int iteration_count) { int threadblock_size = blockDim.x; int thread_id = threadIdx.x; int neuron_id = blockIdx.x; int feature_map_segment_id = blockIdx.y; int entry_id = blockIdx.z; int current_feature_map_local_id = thread_id; int offset = entry_id * neuron_count_per_entry + (feature_map_segment_id * feature_map_segment_length + current_feature_map_local_id) * neuron_count_per_feature_map + neuron_id; float running_sum = 0.0F; for(int i = 0; i < iteration_count; ++i, current_feature_map_local_id += threadblock_size) { float val = 0.0F; if (current_feature_map_local_id < feature_map_segment_length) val = input[offset]; if (thread_id == 0) val += running_sum; arr_sh[thread_id] = val; __syncthreads(); for(int d = 1; d < threadblock_size; d = d << 1) { if (thread_id >= d) val += arr_sh[thread_id - d]; __syncthreads(); if (thread_id >= d) arr_sh[thread_id] = val; __syncthreads(); } if (thread_id == 0) running_sum = arr_sh[threadblock_size - 1]; __syncthreads(); if (current_feature_map_local_id < feature_map_segment_length) output[offset] = min(max(val, clamp_min), clamp_max); offset += threadblock_size * neuron_count_per_feature_map; } } template<bool add_update_to_destination> __global__ void prefix_sum_backprop_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, int feature_map_segment_length, int neuron_count_per_entry, int neuron_count_per_feature_map, float clamp_min, float clamp_max, int iteration_count) { int threadblock_size = blockDim.x; int thread_id = threadIdx.x; int neuron_id = blockIdx.x; int feature_map_segment_id = blockIdx.y; int entry_id = blockIdx.z; int current_feature_map_local_id = thread_id; int offset = entry_id * neuron_count_per_entry + (feature_map_segment_id * feature_map_segment_length + feature_map_segment_length - 1 - current_feature_map_local_id) * neuron_count_per_feature_map + neuron_id; float running_sum = 0.0F; for(int i = 0; i < iteration_count; ++i, current_feature_map_local_id += threadblock_size) { float val = 0.0F; if (current_feature_map_local_id < feature_map_segment_length) val = output_errors[offset]; if (thread_id == 0) val += running_sum; arr_sh[thread_id] = val; __syncthreads(); for(int d = 1; d < threadblock_size; d = d << 1) { if (thread_id >= d) val += arr_sh[thread_id - d]; __syncthreads(); if (thread_id >= d) arr_sh[thread_id] = val; __syncthreads(); } if (thread_id == 0) running_sum = arr_sh[threadblock_size - 1]; __syncthreads(); if (current_feature_map_local_id < feature_map_segment_length) { if (add_update_to_destination) input_errors[offset] += val; else input_errors[offset] = val; } offset -= threadblock_size * neuron_count_per_feature_map; } } void prefix_sum_layer_updater_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(feature_map_segment_length); int smem_size = threadblock_size * sizeof(float); int feature_map_segment_count = output_configuration_specific.feature_map_count / feature_map_segment_length; int iteration_count = (feature_map_segment_length + threadblock_size - 1) / threadblock_size; hipLaunchKernelGGL(( prefix_sum_upd_kernel), dim3(dim3(output_elem_count_per_feature_map, feature_map_segment_count, entry_count)), dim3(threadblock_size), smem_size, stream_id, *output_buffer, *input_buffers[0], feature_map_segment_length, output_elem_count_per_entry, output_elem_count_per_feature_map, clamp_min, clamp_max, iteration_count); } void prefix_sum_layer_updater_cuda::enqueue_backward_data_propagation( hipStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { int threadblock_size = get_threadblock_size(feature_map_segment_length); int smem_size = threadblock_size * sizeof(float); int feature_map_segment_count = output_configuration_specific.feature_map_count / feature_map_segment_length; int iteration_count = (feature_map_segment_length + threadblock_size - 1) / threadblock_size; if (add_update_to_destination) hipLaunchKernelGGL(( prefix_sum_backprop_upd_kernel<true>), dim3(dim3(output_elem_count_per_feature_map, feature_map_segment_count, entry_count)), dim3(threadblock_size), smem_size, stream_id, *input_errors_buffer, *output_errors_buffer, feature_map_segment_length, output_elem_count_per_entry, output_elem_count_per_feature_map, clamp_min, clamp_max, iteration_count); else hipLaunchKernelGGL(( prefix_sum_backprop_upd_kernel<false>), dim3(dim3(output_elem_count_per_feature_map, feature_map_segment_count, entry_count)), dim3(threadblock_size), smem_size, stream_id, *input_errors_buffer, *output_errors_buffer, feature_map_segment_length, output_elem_count_per_entry, output_elem_count_per_feature_map, clamp_min, clamp_max, iteration_count); } void prefix_sum_layer_updater_cuda::updater_configured() { std::shared_ptr<const prefix_sum_layer> layer_derived = std::dynamic_pointer_cast<const prefix_sum_layer>(layer_schema); feature_map_segment_length = layer_derived->feature_map_segment_length; clamp_min = layer_derived->clamp_min; clamp_max = layer_derived->clamp_max; } int prefix_sum_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const { return 0; } bool prefix_sum_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const { return false; } bool prefix_sum_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } int prefix_sum_layer_updater_cuda::get_threadblock_size(int feature_map_segment_length) { int threadblock_size; if (feature_map_segment_length < 256) { threadblock_size = (feature_map_segment_length + 32 - 1) / 32 * 32; } else { int threadblock_count = (feature_map_segment_length + 256 - 1) / 256; threadblock_size = (feature_map_segment_length + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
7a1762d148a57e5e0c5f28421de60b6007f616f3.cu
/* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "prefix_sum_layer_updater_cuda.h" #include "../prefix_sum_layer.h" #include "util_cuda.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; __global__ void prefix_sum_upd_kernel( float * __restrict output, const float * __restrict input, int feature_map_segment_length, int neuron_count_per_entry, int neuron_count_per_feature_map, float clamp_min, float clamp_max, int iteration_count) { int threadblock_size = blockDim.x; int thread_id = threadIdx.x; int neuron_id = blockIdx.x; int feature_map_segment_id = blockIdx.y; int entry_id = blockIdx.z; int current_feature_map_local_id = thread_id; int offset = entry_id * neuron_count_per_entry + (feature_map_segment_id * feature_map_segment_length + current_feature_map_local_id) * neuron_count_per_feature_map + neuron_id; float running_sum = 0.0F; for(int i = 0; i < iteration_count; ++i, current_feature_map_local_id += threadblock_size) { float val = 0.0F; if (current_feature_map_local_id < feature_map_segment_length) val = input[offset]; if (thread_id == 0) val += running_sum; arr_sh[thread_id] = val; __syncthreads(); for(int d = 1; d < threadblock_size; d = d << 1) { if (thread_id >= d) val += arr_sh[thread_id - d]; __syncthreads(); if (thread_id >= d) arr_sh[thread_id] = val; __syncthreads(); } if (thread_id == 0) running_sum = arr_sh[threadblock_size - 1]; __syncthreads(); if (current_feature_map_local_id < feature_map_segment_length) output[offset] = min(max(val, clamp_min), clamp_max); offset += threadblock_size * neuron_count_per_feature_map; } } template<bool add_update_to_destination> __global__ void prefix_sum_backprop_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, int feature_map_segment_length, int neuron_count_per_entry, int neuron_count_per_feature_map, float clamp_min, float clamp_max, int iteration_count) { int threadblock_size = blockDim.x; int thread_id = threadIdx.x; int neuron_id = blockIdx.x; int feature_map_segment_id = blockIdx.y; int entry_id = blockIdx.z; int current_feature_map_local_id = thread_id; int offset = entry_id * neuron_count_per_entry + (feature_map_segment_id * feature_map_segment_length + feature_map_segment_length - 1 - current_feature_map_local_id) * neuron_count_per_feature_map + neuron_id; float running_sum = 0.0F; for(int i = 0; i < iteration_count; ++i, current_feature_map_local_id += threadblock_size) { float val = 0.0F; if (current_feature_map_local_id < feature_map_segment_length) val = output_errors[offset]; if (thread_id == 0) val += running_sum; arr_sh[thread_id] = val; __syncthreads(); for(int d = 1; d < threadblock_size; d = d << 1) { if (thread_id >= d) val += arr_sh[thread_id - d]; __syncthreads(); if (thread_id >= d) arr_sh[thread_id] = val; __syncthreads(); } if (thread_id == 0) running_sum = arr_sh[threadblock_size - 1]; __syncthreads(); if (current_feature_map_local_id < feature_map_segment_length) { if (add_update_to_destination) input_errors[offset] += val; else input_errors[offset] = val; } offset -= threadblock_size * neuron_count_per_feature_map; } } void prefix_sum_layer_updater_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(feature_map_segment_length); int smem_size = threadblock_size * sizeof(float); int feature_map_segment_count = output_configuration_specific.feature_map_count / feature_map_segment_length; int iteration_count = (feature_map_segment_length + threadblock_size - 1) / threadblock_size; prefix_sum_upd_kernel<<<dim3(output_elem_count_per_feature_map, feature_map_segment_count, entry_count), threadblock_size, smem_size, stream_id>>>( *output_buffer, *input_buffers[0], feature_map_segment_length, output_elem_count_per_entry, output_elem_count_per_feature_map, clamp_min, clamp_max, iteration_count); } void prefix_sum_layer_updater_cuda::enqueue_backward_data_propagation( cudaStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { int threadblock_size = get_threadblock_size(feature_map_segment_length); int smem_size = threadblock_size * sizeof(float); int feature_map_segment_count = output_configuration_specific.feature_map_count / feature_map_segment_length; int iteration_count = (feature_map_segment_length + threadblock_size - 1) / threadblock_size; if (add_update_to_destination) prefix_sum_backprop_upd_kernel<true><<<dim3(output_elem_count_per_feature_map, feature_map_segment_count, entry_count), threadblock_size, smem_size, stream_id>>>( *input_errors_buffer, *output_errors_buffer, feature_map_segment_length, output_elem_count_per_entry, output_elem_count_per_feature_map, clamp_min, clamp_max, iteration_count); else prefix_sum_backprop_upd_kernel<false><<<dim3(output_elem_count_per_feature_map, feature_map_segment_count, entry_count), threadblock_size, smem_size, stream_id>>>( *input_errors_buffer, *output_errors_buffer, feature_map_segment_length, output_elem_count_per_entry, output_elem_count_per_feature_map, clamp_min, clamp_max, iteration_count); } void prefix_sum_layer_updater_cuda::updater_configured() { std::shared_ptr<const prefix_sum_layer> layer_derived = std::dynamic_pointer_cast<const prefix_sum_layer>(layer_schema); feature_map_segment_length = layer_derived->feature_map_segment_length; clamp_min = layer_derived->clamp_min; clamp_max = layer_derived->clamp_max; } int prefix_sum_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const { return 0; } bool prefix_sum_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const { return false; } bool prefix_sum_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } int prefix_sum_layer_updater_cuda::get_threadblock_size(int feature_map_segment_length) { int threadblock_size; if (feature_map_segment_length < 256) { threadblock_size = (feature_map_segment_length + 32 - 1) / 32 * 32; } else { int threadblock_count = (feature_map_segment_length + 256 - 1) / 256; threadblock_size = (feature_map_segment_length + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
c9c276b8b39aa707825f46caa343f77c8fd7536a.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2022 by XGBoost Contributors * * \brief Some components of GPU Hist evaluator, this file only exist to reduce nvcc * compilation time. */ #include <thrust/logical.h> // thrust::any_of #include <thrust/sort.h> // thrust::stable_sort #include "../../common/device_helpers.cuh" #include "../../common/hist_util.h" // common::HistogramCuts #include "evaluate_splits.cuh" #include "xgboost/data.h" namespace xgboost { namespace tree { template <typename GradientSumT> void GPUHistEvaluator<GradientSumT>::Reset(common::HistogramCuts const &cuts, common::Span<FeatureType const> ft, bst_feature_t n_features, TrainParam const &param, int32_t device) { param_ = param; tree_evaluator_ = TreeEvaluator{param, n_features, device}; has_categoricals_ = cuts.HasCategorical(); if (cuts.HasCategorical()) { dh::XGBCachingDeviceAllocator<char> alloc; auto ptrs = cuts.cut_ptrs_.ConstDeviceSpan(); auto beg = thrust::make_counting_iterator<size_t>(1ul); auto end = thrust::make_counting_iterator<size_t>(ptrs.size()); auto to_onehot = param.max_cat_to_onehot; // This condition avoids sort-based split function calls if the users want // onehot-encoding-based splits. // For some reason, any_of adds 1.5 minutes to compilation time for CUDA 11.x. need_sort_histogram_ = thrust::any_of(thrust::hip::par(alloc), beg, end, [=] XGBOOST_DEVICE(size_t i) { auto idx = i - 1; if (common::IsCat(ft, idx)) { auto n_bins = ptrs[i] - ptrs[idx]; bool use_sort = !common::UseOneHot(n_bins, to_onehot); return use_sort; } return false; }); node_categorical_storage_size_ = common::CatBitField::ComputeStorageSize(cuts.MaxCategory() + 1); CHECK_NE(node_categorical_storage_size_, 0); split_cats_.resize(node_categorical_storage_size_); h_split_cats_.resize(node_categorical_storage_size_); dh::safe_cuda( hipMemsetAsync(split_cats_.data().get(), '\0', split_cats_.size() * sizeof(CatST))); cat_sorted_idx_.resize(cuts.cut_values_.Size() * 2); // evaluate 2 nodes at a time. sort_input_.resize(cat_sorted_idx_.size()); /** * cache feature index binary search result */ feature_idx_.resize(cat_sorted_idx_.size()); auto d_fidxes = dh::ToSpan(feature_idx_); auto it = thrust::make_counting_iterator(0ul); auto values = cuts.cut_values_.ConstDeviceSpan(); thrust::transform(thrust::hip::par(alloc), it, it + feature_idx_.size(), feature_idx_.begin(), [=] XGBOOST_DEVICE(size_t i) { auto fidx = dh::SegmentId(ptrs, i); return fidx; }); } } template <typename GradientSumT> common::Span<bst_feature_t const> GPUHistEvaluator<GradientSumT>::SortHistogram( common::Span<const EvaluateSplitInputs> d_inputs, EvaluateSplitSharedInputs shared_inputs, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator) { dh::XGBCachingDeviceAllocator<char> alloc; auto sorted_idx = this->SortedIdx(d_inputs.size(), shared_inputs.feature_values.size()); dh::Iota(sorted_idx); auto data = this->SortInput(d_inputs.size(), shared_inputs.feature_values.size()); auto it = thrust::make_counting_iterator(0u); auto d_feature_idx = dh::ToSpan(feature_idx_); auto total_bins = shared_inputs.feature_values.size(); thrust::transform(thrust::hip::par(alloc), it, it + data.size(), dh::tbegin(data), [=] XGBOOST_DEVICE(uint32_t i) { auto const &input = d_inputs[i / total_bins]; auto j = i % total_bins; auto fidx = d_feature_idx[j]; if (common::IsCat(shared_inputs.feature_types, fidx)) { auto lw = evaluator.CalcWeightCat(shared_inputs.param, input.gradient_histogram[j]); return thrust::make_tuple(i, lw); } return thrust::make_tuple(i, 0.0); }); // Sort an array segmented according to // - nodes // - features within each node // - gradients within each feature thrust::stable_sort_by_key(thrust::hip::par(alloc), dh::tbegin(data), dh::tend(data), dh::tbegin(sorted_idx), [=] XGBOOST_DEVICE(SortPair const &l, SortPair const &r) { auto li = thrust::get<0>(l); auto ri = thrust::get<0>(r); auto l_node = li / total_bins; auto r_node = ri / total_bins; if (l_node != r_node) { return l_node < r_node; // not the same node } li = li % total_bins; ri = ri % total_bins; auto lfidx = d_feature_idx[li]; auto rfidx = d_feature_idx[ri]; if (lfidx != rfidx) { return lfidx < rfidx; // not the same feature } if (common::IsCat(shared_inputs.feature_types, lfidx)) { auto lw = thrust::get<1>(l); auto rw = thrust::get<1>(r); return lw < rw; } return li < ri; }); return dh::ToSpan(cat_sorted_idx_); } template class GPUHistEvaluator<GradientPair>; template class GPUHistEvaluator<GradientPairPrecise>; } // namespace tree } // namespace xgboost
c9c276b8b39aa707825f46caa343f77c8fd7536a.cu
/*! * Copyright 2022 by XGBoost Contributors * * \brief Some components of GPU Hist evaluator, this file only exist to reduce nvcc * compilation time. */ #include <thrust/logical.h> // thrust::any_of #include <thrust/sort.h> // thrust::stable_sort #include "../../common/device_helpers.cuh" #include "../../common/hist_util.h" // common::HistogramCuts #include "evaluate_splits.cuh" #include "xgboost/data.h" namespace xgboost { namespace tree { template <typename GradientSumT> void GPUHistEvaluator<GradientSumT>::Reset(common::HistogramCuts const &cuts, common::Span<FeatureType const> ft, bst_feature_t n_features, TrainParam const &param, int32_t device) { param_ = param; tree_evaluator_ = TreeEvaluator{param, n_features, device}; has_categoricals_ = cuts.HasCategorical(); if (cuts.HasCategorical()) { dh::XGBCachingDeviceAllocator<char> alloc; auto ptrs = cuts.cut_ptrs_.ConstDeviceSpan(); auto beg = thrust::make_counting_iterator<size_t>(1ul); auto end = thrust::make_counting_iterator<size_t>(ptrs.size()); auto to_onehot = param.max_cat_to_onehot; // This condition avoids sort-based split function calls if the users want // onehot-encoding-based splits. // For some reason, any_of adds 1.5 minutes to compilation time for CUDA 11.x. need_sort_histogram_ = thrust::any_of(thrust::cuda::par(alloc), beg, end, [=] XGBOOST_DEVICE(size_t i) { auto idx = i - 1; if (common::IsCat(ft, idx)) { auto n_bins = ptrs[i] - ptrs[idx]; bool use_sort = !common::UseOneHot(n_bins, to_onehot); return use_sort; } return false; }); node_categorical_storage_size_ = common::CatBitField::ComputeStorageSize(cuts.MaxCategory() + 1); CHECK_NE(node_categorical_storage_size_, 0); split_cats_.resize(node_categorical_storage_size_); h_split_cats_.resize(node_categorical_storage_size_); dh::safe_cuda( cudaMemsetAsync(split_cats_.data().get(), '\0', split_cats_.size() * sizeof(CatST))); cat_sorted_idx_.resize(cuts.cut_values_.Size() * 2); // evaluate 2 nodes at a time. sort_input_.resize(cat_sorted_idx_.size()); /** * cache feature index binary search result */ feature_idx_.resize(cat_sorted_idx_.size()); auto d_fidxes = dh::ToSpan(feature_idx_); auto it = thrust::make_counting_iterator(0ul); auto values = cuts.cut_values_.ConstDeviceSpan(); thrust::transform(thrust::cuda::par(alloc), it, it + feature_idx_.size(), feature_idx_.begin(), [=] XGBOOST_DEVICE(size_t i) { auto fidx = dh::SegmentId(ptrs, i); return fidx; }); } } template <typename GradientSumT> common::Span<bst_feature_t const> GPUHistEvaluator<GradientSumT>::SortHistogram( common::Span<const EvaluateSplitInputs> d_inputs, EvaluateSplitSharedInputs shared_inputs, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator) { dh::XGBCachingDeviceAllocator<char> alloc; auto sorted_idx = this->SortedIdx(d_inputs.size(), shared_inputs.feature_values.size()); dh::Iota(sorted_idx); auto data = this->SortInput(d_inputs.size(), shared_inputs.feature_values.size()); auto it = thrust::make_counting_iterator(0u); auto d_feature_idx = dh::ToSpan(feature_idx_); auto total_bins = shared_inputs.feature_values.size(); thrust::transform(thrust::cuda::par(alloc), it, it + data.size(), dh::tbegin(data), [=] XGBOOST_DEVICE(uint32_t i) { auto const &input = d_inputs[i / total_bins]; auto j = i % total_bins; auto fidx = d_feature_idx[j]; if (common::IsCat(shared_inputs.feature_types, fidx)) { auto lw = evaluator.CalcWeightCat(shared_inputs.param, input.gradient_histogram[j]); return thrust::make_tuple(i, lw); } return thrust::make_tuple(i, 0.0); }); // Sort an array segmented according to // - nodes // - features within each node // - gradients within each feature thrust::stable_sort_by_key(thrust::cuda::par(alloc), dh::tbegin(data), dh::tend(data), dh::tbegin(sorted_idx), [=] XGBOOST_DEVICE(SortPair const &l, SortPair const &r) { auto li = thrust::get<0>(l); auto ri = thrust::get<0>(r); auto l_node = li / total_bins; auto r_node = ri / total_bins; if (l_node != r_node) { return l_node < r_node; // not the same node } li = li % total_bins; ri = ri % total_bins; auto lfidx = d_feature_idx[li]; auto rfidx = d_feature_idx[ri]; if (lfidx != rfidx) { return lfidx < rfidx; // not the same feature } if (common::IsCat(shared_inputs.feature_types, lfidx)) { auto lw = thrust::get<1>(l); auto rw = thrust::get<1>(r); return lw < rw; } return li < ri; }); return dh::ToSpan(cat_sorted_idx_); } template class GPUHistEvaluator<GradientPair>; template class GPUHistEvaluator<GradientPairPrecise>; } // namespace tree } // namespace xgboost
930cd768a8c9a9c73ea3c225b8059e60a2c31ed4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #define CHUNK_S 4096 typedef struct { float x, y, z; } kdata; __constant__ kdata k[CHUNK_S]; __global__ void cmpfhd(const float*__restrict__ rmu, const float*__restrict__ imu, float*__restrict__ rfhd, float*__restrict__ ifhd, const float*__restrict__ x, const float*__restrict__ y, const float*__restrict__ z, const int samples, const int voxels) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < samples) { float xn = x[n], yn = y[n], zn = z[n]; float rfhdn = rfhd[n], ifhdn = ifhd[n]; for (int m = 0; m < voxels; m++) { float e = 2.f * (float)M_PI * (k[m].x * xn + k[m].y * yn + k[m].z * zn); float c = __cosf(e); float s = __sinf(e); rfhdn += rmu[m] * c - imu[m] * s; ifhdn += imu[m] * c + rmu[m] * s; } rfhd[n] = rfhdn, ifhd[n] = ifhdn; } } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <#samples> <#voxels> <verify>\n", argv[0]); exit(1); } const int samples = atoi(argv[1]); // in the order of 100000 const int voxels = atoi(argv[2]); // cube(128)/2097152 const int verify = atoi(argv[3]); const int sampleSize = samples * sizeof(float); const int voxelSize = voxels * sizeof(float); float *h_rmu = (float*) malloc (voxelSize); float *h_imu = (float*) malloc (voxelSize); float *h_kx = (float*) malloc (voxelSize); float *h_ky = (float*) malloc (voxelSize); float *h_kz = (float*) malloc (voxelSize); kdata *h_k = (kdata*) malloc (voxels * sizeof(kdata)); float *h_rfhd = (float*) malloc (sampleSize); float *h_ifhd = (float*) malloc (sampleSize); float *h_x = (float*) malloc (sampleSize); float *h_y = (float*) malloc (sampleSize); float *h_z = (float*) malloc (sampleSize); // For device results float *rfhd = (float*) malloc (sampleSize); float *ifhd = (float*) malloc (sampleSize); srand(2); for (int i = 0; i < samples; i++) { h_rfhd[i] = (float)i/samples; h_ifhd[i] = (float)i/samples; h_x[i] = 0.3f + (rand()%2 ? 0.1 : -0.1); h_y[i] = 0.2f + (rand()%2 ? 0.1 : -0.1); h_z[i] = 0.1f + (rand()%2 ? 0.1 : -0.1); } for (int i = 0; i < voxels; i++) { h_rmu[i] = (float)i/voxels; h_imu[i] = (float)i/voxels; h_k[i].x = h_kx[i] = 0.1f + (rand()%2 ? 0.1 : -0.1); h_k[i].y = h_ky[i] = 0.2f + (rand()%2 ? 0.1 : -0.1); h_k[i].z = h_kz[i] = 0.3f + (rand()%2 ? 0.1 : -0.1); } printf("Run FHd on a device\n"); float *d_rmu, *d_imu; float *d_rfhd, *d_ifhd; float *d_x, *d_y, *d_z; hipMalloc((void**)&d_rmu, voxelSize); hipMemcpy(d_rmu, h_rmu, voxelSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_imu, voxelSize); hipMemcpy(d_imu, h_imu, voxelSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_rfhd, sampleSize); hipMemcpy(d_rfhd, h_rfhd, sampleSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_ifhd, sampleSize); hipMemcpy(d_ifhd, h_ifhd, sampleSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_x, sampleSize); hipMemcpy(d_x, h_x, sampleSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_y, sampleSize); hipMemcpy(d_y, h_y, sampleSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_z, sampleSize); hipMemcpy(d_z, h_z, sampleSize, hipMemcpyHostToDevice); const int ntpb = 256; const int nblks = (samples + ntpb - 1) / ntpb; dim3 grid (nblks); dim3 block (ntpb); int c = CHUNK_S; int s = sizeof(kdata) * c; int nchunks = (voxels + c - 1) / c; hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < nchunks; i++) { if (i == nchunks - 1) { c = voxels - CHUNK_S * i; s = sizeof(kdata) * c; } hipMemcpyToSymbol(k, &h_k[i * CHUNK_S], s); hipLaunchKernelGGL(( cmpfhd), dim3(grid), dim3(block), 0, 0, d_rmu + i*CHUNK_S, d_imu + i*CHUNK_S, d_rfhd, d_ifhd, d_x, d_y, d_z, samples, c); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Device execution time %f (s)\n", time * 1e-9f); hipMemcpy(rfhd, d_rfhd, sampleSize, hipMemcpyDeviceToHost); hipMemcpy(ifhd, d_ifhd, sampleSize, hipMemcpyDeviceToHost); if (verify) { printf("Computing root mean square error between host and device results.\n"); printf("This will take a while..\n"); #pragma omp parallel for for (int n = 0; n < samples; n++) { float r = h_rfhd[n]; float i = h_ifhd[n]; #pragma omp parallel for simd reduction(+:r,i) for (int m = 0; m < voxels; m++) { float e = 2.f * (float)M_PI * (h_kx[m] * h_x[n] + h_ky[m] * h_y[n] + h_kz[m] * h_z[n]); float c = cosf(e); float s = sinf(e); r += h_rmu[m] * c - h_imu[m] * s; i += h_imu[m] * c + h_rmu[m] * s; } h_rfhd[n] = r; h_ifhd[n] = i; } float err = 0.f; for (int i = 0; i < samples; i++) { err += (h_rfhd[i] - rfhd[i]) * (h_rfhd[i] - rfhd[i]) + (h_ifhd[i] - ifhd[i]) * (h_ifhd[i] - ifhd[i]) ; } printf("RMSE = %f\n", sqrtf(err / (2*samples))); } hipFree(d_rmu); hipFree(d_imu); hipFree(d_rfhd); hipFree(d_ifhd); hipFree(d_x); hipFree(d_y); hipFree(d_z); free(h_rmu); free(h_imu); free(h_kx); free(h_ky); free(h_kz); free(h_k); free(h_rfhd); free(h_ifhd); free(rfhd); free(ifhd); free(h_x); free(h_y); free(h_z); return 0; }
930cd768a8c9a9c73ea3c225b8059e60a2c31ed4.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #define CHUNK_S 4096 typedef struct { float x, y, z; } kdata; __constant__ kdata k[CHUNK_S]; __global__ void cmpfhd(const float*__restrict__ rmu, const float*__restrict__ imu, float*__restrict__ rfhd, float*__restrict__ ifhd, const float*__restrict__ x, const float*__restrict__ y, const float*__restrict__ z, const int samples, const int voxels) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < samples) { float xn = x[n], yn = y[n], zn = z[n]; float rfhdn = rfhd[n], ifhdn = ifhd[n]; for (int m = 0; m < voxels; m++) { float e = 2.f * (float)M_PI * (k[m].x * xn + k[m].y * yn + k[m].z * zn); float c = __cosf(e); float s = __sinf(e); rfhdn += rmu[m] * c - imu[m] * s; ifhdn += imu[m] * c + rmu[m] * s; } rfhd[n] = rfhdn, ifhd[n] = ifhdn; } } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <#samples> <#voxels> <verify>\n", argv[0]); exit(1); } const int samples = atoi(argv[1]); // in the order of 100000 const int voxels = atoi(argv[2]); // cube(128)/2097152 const int verify = atoi(argv[3]); const int sampleSize = samples * sizeof(float); const int voxelSize = voxels * sizeof(float); float *h_rmu = (float*) malloc (voxelSize); float *h_imu = (float*) malloc (voxelSize); float *h_kx = (float*) malloc (voxelSize); float *h_ky = (float*) malloc (voxelSize); float *h_kz = (float*) malloc (voxelSize); kdata *h_k = (kdata*) malloc (voxels * sizeof(kdata)); float *h_rfhd = (float*) malloc (sampleSize); float *h_ifhd = (float*) malloc (sampleSize); float *h_x = (float*) malloc (sampleSize); float *h_y = (float*) malloc (sampleSize); float *h_z = (float*) malloc (sampleSize); // For device results float *rfhd = (float*) malloc (sampleSize); float *ifhd = (float*) malloc (sampleSize); srand(2); for (int i = 0; i < samples; i++) { h_rfhd[i] = (float)i/samples; h_ifhd[i] = (float)i/samples; h_x[i] = 0.3f + (rand()%2 ? 0.1 : -0.1); h_y[i] = 0.2f + (rand()%2 ? 0.1 : -0.1); h_z[i] = 0.1f + (rand()%2 ? 0.1 : -0.1); } for (int i = 0; i < voxels; i++) { h_rmu[i] = (float)i/voxels; h_imu[i] = (float)i/voxels; h_k[i].x = h_kx[i] = 0.1f + (rand()%2 ? 0.1 : -0.1); h_k[i].y = h_ky[i] = 0.2f + (rand()%2 ? 0.1 : -0.1); h_k[i].z = h_kz[i] = 0.3f + (rand()%2 ? 0.1 : -0.1); } printf("Run FHd on a device\n"); float *d_rmu, *d_imu; float *d_rfhd, *d_ifhd; float *d_x, *d_y, *d_z; hipMalloc((void**)&d_rmu, voxelSize); hipMemcpy(d_rmu, h_rmu, voxelSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_imu, voxelSize); hipMemcpy(d_imu, h_imu, voxelSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_rfhd, sampleSize); hipMemcpy(d_rfhd, h_rfhd, sampleSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_ifhd, sampleSize); hipMemcpy(d_ifhd, h_ifhd, sampleSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_x, sampleSize); hipMemcpy(d_x, h_x, sampleSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_y, sampleSize); hipMemcpy(d_y, h_y, sampleSize, hipMemcpyHostToDevice); hipMalloc((void**)&d_z, sampleSize); hipMemcpy(d_z, h_z, sampleSize, hipMemcpyHostToDevice); const int ntpb = 256; const int nblks = (samples + ntpb - 1) / ntpb; dim3 grid (nblks); dim3 block (ntpb); int c = CHUNK_S; int s = sizeof(kdata) * c; int nchunks = (voxels + c - 1) / c; hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < nchunks; i++) { if (i == nchunks - 1) { c = voxels - CHUNK_S * i; s = sizeof(kdata) * c; } hipMemcpyToSymbol(k, &h_k[i * CHUNK_S], s); cmpfhd<<<grid, block>>>(d_rmu + i*CHUNK_S, d_imu + i*CHUNK_S, d_rfhd, d_ifhd, d_x, d_y, d_z, samples, c); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Device execution time %f (s)\n", time * 1e-9f); hipMemcpy(rfhd, d_rfhd, sampleSize, hipMemcpyDeviceToHost); hipMemcpy(ifhd, d_ifhd, sampleSize, hipMemcpyDeviceToHost); if (verify) { printf("Computing root mean square error between host and device results.\n"); printf("This will take a while..\n"); #pragma omp parallel for for (int n = 0; n < samples; n++) { float r = h_rfhd[n]; float i = h_ifhd[n]; #pragma omp parallel for simd reduction(+:r,i) for (int m = 0; m < voxels; m++) { float e = 2.f * (float)M_PI * (h_kx[m] * h_x[n] + h_ky[m] * h_y[n] + h_kz[m] * h_z[n]); float c = cosf(e); float s = sinf(e); r += h_rmu[m] * c - h_imu[m] * s; i += h_imu[m] * c + h_rmu[m] * s; } h_rfhd[n] = r; h_ifhd[n] = i; } float err = 0.f; for (int i = 0; i < samples; i++) { err += (h_rfhd[i] - rfhd[i]) * (h_rfhd[i] - rfhd[i]) + (h_ifhd[i] - ifhd[i]) * (h_ifhd[i] - ifhd[i]) ; } printf("RMSE = %f\n", sqrtf(err / (2*samples))); } hipFree(d_rmu); hipFree(d_imu); hipFree(d_rfhd); hipFree(d_ifhd); hipFree(d_x); hipFree(d_y); hipFree(d_z); free(h_rmu); free(h_imu); free(h_kx); free(h_ky); free(h_kz); free(h_k); free(h_rfhd); free(h_ifhd); free(rfhd); free(ifhd); free(h_x); free(h_y); free(h_z); return 0; }
fc28bc4688a42745d5bd1314b750ba9da7c6e391.hip
// !!! This is a file automatically generated by hipify!!! #include "hashing/CompactionHashSet.h" #include "hashing/hash_ops.h" #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <hipcub/hipcub.hpp> #include <cmath> namespace hashing { namespace device { __global__ void markValidHashEntryKernel( const unsigned* hash_entry, const unsigned table_size, unsigned* valid_indicator ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < table_size) { unsigned valid = 0; if(hash_entry[idx] != EmptyKey) { valid = 1; } valid_indicator[idx] = valid; } } __global__ void buildCompactedIndexKernel( const unsigned* valid_indicator, const unsigned table_size, unsigned* compacted_index ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < table_size) { unsigned offset = 0xffffffffu; if(valid_indicator[idx] > 0) { offset = compacted_index[idx] - 1; } compacted_index[idx] = offset; } } }; /* End of namespace device */ }; /* End of namespace hashing */ /* The default contructor */ hashing::CompactionHashSet::CompactionHashSet() : table(nullptr), compacted_index(nullptr), table_size(0), m_temp_storage(nullptr), m_valid_indicator(nullptr), m_temp_storage_bytes(0) { } /* The buffer management method */ void hashing::CompactionHashSet::AllocateBuffer(const unsigned max_unique_keys, const float factor) { //The size for main table const auto max_table_size = unsigned(::ceil(factor * max_unique_keys)); table_size = max_table_size; hipMalloc((void**)(&table), table_size * sizeof(unsigned)); hipMalloc((void**)(&compacted_index), table_size * sizeof(unsigned)); hipMalloc((void**)(&m_valid_indicator), table_size * sizeof(unsigned)); //Query the required bytes for temp storage size_t required_temp_bytes = 0; hipcub::DeviceScan::InclusiveSum( NULL, required_temp_bytes, m_valid_indicator, compacted_index, table_size ); m_temp_storage_bytes = required_temp_bytes; hipMalloc((void**)&m_temp_storage, m_temp_storage_bytes); //Check allocate error cudaSafeCall(hipDeviceSynchronize()); cudaSafeCall(hipGetLastError()); } void hashing::CompactionHashSet::ReleaseBuffer() { hipFree(table); hipFree(compacted_index); hipFree(m_temp_storage); hipFree(m_valid_indicator); //Check free error cudaSafeCall(hipDeviceSynchronize()); cudaSafeCall(hipGetLastError()); table_size = 0; m_temp_storage_bytes = 0; } void hashing::CompactionHashSet::ResetTable(hipStream_t stream) { cudaSafeCall(hipMemsetAsync(table, 0xff, sizeof(unsigned) * table_size, stream)); build_hash_constants(primary_hash, step_hash); } /* The compaction method */ void hashing::CompactionHashSet::BuildIndex(hipStream_t stream) { BuildCompactedHashIndex( table, table_size, m_valid_indicator, compacted_index, m_temp_storage, m_temp_storage_bytes, stream ); } void hashing::CompactionHashSet::BuildCompactedHashIndex( const unsigned * table_entry, unsigned table_size, unsigned * valid_indicator, unsigned* compacted_index, unsigned char * temp_storage, unsigned temp_stroage_bytes, hipStream_t stream ) { dim3 blk(128); dim3 grid(divUp(table_size, blk.x)); //First mark it hipLaunchKernelGGL(( device::markValidHashEntryKernel), dim3(grid), dim3(blk), 0, stream, table_entry, table_size, valid_indicator ); //Do a prefix sum size_t required_bytes = temp_stroage_bytes; hipcub::DeviceScan::InclusiveSum( (void*)temp_storage, required_bytes, valid_indicator, compacted_index, table_size, stream ); //Build the compacted index hipLaunchKernelGGL(( device::buildCompactedIndexKernel), dim3(grid), dim3(blk), 0, stream, valid_indicator, table_size, compacted_index ); } void hashing::CompactionHashSet::BuildCompactedIndex( const unsigned *valid_indicator, unsigned *compacted_index, unsigned table_size, unsigned char *temp_storage, unsigned temp_stroage_bytes, hipStream_t stream ) { //Do a prefix sum size_t required_bytes = temp_stroage_bytes; hipcub::DeviceScan::InclusiveSum( (void*)temp_storage, required_bytes, valid_indicator, compacted_index, table_size, stream ); //Build the compacted index dim3 blk(128); dim3 grid(divUp(table_size, blk.x)); hipLaunchKernelGGL(( device::buildCompactedIndexKernel), dim3(grid), dim3(blk), 0, stream, valid_indicator, table_size, compacted_index ); }
fc28bc4688a42745d5bd1314b750ba9da7c6e391.cu
#include "hashing/CompactionHashSet.h" #include "hashing/hash_ops.h" #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <cub/cub.cuh> #include <cmath> namespace hashing { namespace device { __global__ void markValidHashEntryKernel( const unsigned* hash_entry, const unsigned table_size, unsigned* valid_indicator ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < table_size) { unsigned valid = 0; if(hash_entry[idx] != EmptyKey) { valid = 1; } valid_indicator[idx] = valid; } } __global__ void buildCompactedIndexKernel( const unsigned* valid_indicator, const unsigned table_size, unsigned* compacted_index ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < table_size) { unsigned offset = 0xffffffffu; if(valid_indicator[idx] > 0) { offset = compacted_index[idx] - 1; } compacted_index[idx] = offset; } } }; /* End of namespace device */ }; /* End of namespace hashing */ /* The default contructor */ hashing::CompactionHashSet::CompactionHashSet() : table(nullptr), compacted_index(nullptr), table_size(0), m_temp_storage(nullptr), m_valid_indicator(nullptr), m_temp_storage_bytes(0) { } /* The buffer management method */ void hashing::CompactionHashSet::AllocateBuffer(const unsigned max_unique_keys, const float factor) { //The size for main table const auto max_table_size = unsigned(std::ceil(factor * max_unique_keys)); table_size = max_table_size; cudaMalloc((void**)(&table), table_size * sizeof(unsigned)); cudaMalloc((void**)(&compacted_index), table_size * sizeof(unsigned)); cudaMalloc((void**)(&m_valid_indicator), table_size * sizeof(unsigned)); //Query the required bytes for temp storage size_t required_temp_bytes = 0; cub::DeviceScan::InclusiveSum( NULL, required_temp_bytes, m_valid_indicator, compacted_index, table_size ); m_temp_storage_bytes = required_temp_bytes; cudaMalloc((void**)&m_temp_storage, m_temp_storage_bytes); //Check allocate error cudaSafeCall(cudaDeviceSynchronize()); cudaSafeCall(cudaGetLastError()); } void hashing::CompactionHashSet::ReleaseBuffer() { cudaFree(table); cudaFree(compacted_index); cudaFree(m_temp_storage); cudaFree(m_valid_indicator); //Check free error cudaSafeCall(cudaDeviceSynchronize()); cudaSafeCall(cudaGetLastError()); table_size = 0; m_temp_storage_bytes = 0; } void hashing::CompactionHashSet::ResetTable(cudaStream_t stream) { cudaSafeCall(cudaMemsetAsync(table, 0xff, sizeof(unsigned) * table_size, stream)); build_hash_constants(primary_hash, step_hash); } /* The compaction method */ void hashing::CompactionHashSet::BuildIndex(cudaStream_t stream) { BuildCompactedHashIndex( table, table_size, m_valid_indicator, compacted_index, m_temp_storage, m_temp_storage_bytes, stream ); } void hashing::CompactionHashSet::BuildCompactedHashIndex( const unsigned * table_entry, unsigned table_size, unsigned * valid_indicator, unsigned* compacted_index, unsigned char * temp_storage, unsigned temp_stroage_bytes, cudaStream_t stream ) { dim3 blk(128); dim3 grid(divUp(table_size, blk.x)); //First mark it device::markValidHashEntryKernel<<<grid, blk, 0, stream>>>( table_entry, table_size, valid_indicator ); //Do a prefix sum size_t required_bytes = temp_stroage_bytes; cub::DeviceScan::InclusiveSum( (void*)temp_storage, required_bytes, valid_indicator, compacted_index, table_size, stream ); //Build the compacted index device::buildCompactedIndexKernel<<<grid, blk, 0, stream>>>( valid_indicator, table_size, compacted_index ); } void hashing::CompactionHashSet::BuildCompactedIndex( const unsigned *valid_indicator, unsigned *compacted_index, unsigned table_size, unsigned char *temp_storage, unsigned temp_stroage_bytes, cudaStream_t stream ) { //Do a prefix sum size_t required_bytes = temp_stroage_bytes; cub::DeviceScan::InclusiveSum( (void*)temp_storage, required_bytes, valid_indicator, compacted_index, table_size, stream ); //Build the compacted index dim3 blk(128); dim3 grid(divUp(table_size, blk.x)); device::buildCompactedIndexKernel<<<grid, blk, 0, stream>>>( valid_indicator, table_size, compacted_index ); }
a4060c4e0b4aeec1d70d8c9ca8a2347a6d23d3c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <time.h> #include <random> #include <Windows.h> #include <stdlib.h> using namespace std; __global__ void Addition(double* answer, double* mass1, double* mass2, int* n) { int row = blockIdx.x * 10000 + threadIdx.x; int column = blockIdx.y * 10000 + threadIdx.y; int border = n[0] * n[0]; answer[row * n[0] + column] = 0; for (int p = 0; p < n[0]; p++) { answer[row * n[0] + column] = (mass1[row * n[0] + p] + mass2[p * n[0] + column]); } } double* Addition_CPU(double* mass1, double* mass2, int* n) { double* answer = new double[n[0] * n[0]]; for (int p = 0; p < n[0]; p++) { for (int q = 0; q < n[0]; q++) { answer[p * n[0] + q] = 0; for (int r = 0; r < n[0]; r++) { answer[p * n[0] + q] = (mass1[(p * n[0] + r)] + mass2[r * n[0] + q]); } } } return answer; } void main() { int e = 1; int start; int* n = new int[1]; n[0] = 1000; srand((unsigned)time(NULL)); //printf("Generating first matr\n"); start = GetTickCount(); double* mass1 = new double[n[0] * n[0]]; for (int p = 0; p < n[0] * n[0]; p++) { mass1[p] = (double)rand() / (double)rand(); } //printf("Matr 1 ready\n"); //printf("Generating time: %i\n", GetTickCount() - start); //printf("\nGenerating second massive\n"); double* mass2 = new double[n[0] * n[0]]; start = GetTickCount(); for (int p = 0; p < n[0] * n[0]; p++) { mass2[p] = (double)rand() / (double)rand(); } //printf("Matr 2 ready\n"); //printf("Generating time: %i\n\n", GetTickCount() - start); printf("CPU working\n"); start = GetTickCount(); double* answer_CPU = Addition_CPU(mass1, mass2, n); int CPU_time = GetTickCount() - start; printf("CPU compute time: %i\n\n", CPU_time); printf("GPU working\n"); start = GetTickCount(); double* cuda_answer; hipMalloc(&cuda_answer, sizeof(double) * n[0] * n[0]); double* cuda_mass1; hipMalloc(&cuda_mass1, sizeof(double) * n[0] * n[0]); hipMemcpy(cuda_mass1, mass1, sizeof(double) * n[0] * n[0], hipMemcpyHostToDevice); double* cuda_mass2; hipMalloc(&cuda_mass2, sizeof(double) * n[0] * n[0]); hipMemcpy(cuda_mass2, mass2, sizeof(double) * n[0] * n[0], hipMemcpyHostToDevice); int* cuda_n; hipMalloc(&cuda_n, sizeof(int)); hipMemcpy(cuda_n, n, sizeof(int), hipMemcpyHostToDevice); double* answer = new double[n[0] * n[0]]; Addition << <1,1000 >> >(cuda_answer, cuda_mass1, cuda_mass2, cuda_n); hipDeviceSynchronize(); hipMemcpy(answer, cuda_answer, sizeof(double) * n[0] * n[0], hipMemcpyDeviceToHost); int GPU_time = GetTickCount() - start; printf("GPU compute time: %i\n", GPU_time); bool correct = true; for (int p = 0; p < n[0] * n[0]; p++) { if (abs(answer[p] - answer_CPU[p] > e)) { correct = false; break; } } if (correct) { printf("\nAnswers are equal\n"); } else { printf("\nAnswers aren't equal\n"); } printf("\nCoefficient: %f\n", ((double)CPU_time / (double)GPU_time)); scanf("%d"); }
a4060c4e0b4aeec1d70d8c9ca8a2347a6d23d3c9.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <time.h> #include <random> #include <Windows.h> #include <stdlib.h> using namespace std; __global__ void Addition(double* answer, double* mass1, double* mass2, int* n) { int row = blockIdx.x * 10000 + threadIdx.x; int column = blockIdx.y * 10000 + threadIdx.y; int border = n[0] * n[0]; answer[row * n[0] + column] = 0; for (int p = 0; p < n[0]; p++) { answer[row * n[0] + column] = (mass1[row * n[0] + p] + mass2[p * n[0] + column]); } } double* Addition_CPU(double* mass1, double* mass2, int* n) { double* answer = new double[n[0] * n[0]]; for (int p = 0; p < n[0]; p++) { for (int q = 0; q < n[0]; q++) { answer[p * n[0] + q] = 0; for (int r = 0; r < n[0]; r++) { answer[p * n[0] + q] = (mass1[(p * n[0] + r)] + mass2[r * n[0] + q]); } } } return answer; } void main() { int e = 1; int start; int* n = new int[1]; n[0] = 1000; srand((unsigned)time(NULL)); //printf("Generating first matr\n"); start = GetTickCount(); double* mass1 = new double[n[0] * n[0]]; for (int p = 0; p < n[0] * n[0]; p++) { mass1[p] = (double)rand() / (double)rand(); } //printf("Matr 1 ready\n"); //printf("Generating time: %i\n", GetTickCount() - start); //printf("\nGenerating second massive\n"); double* mass2 = new double[n[0] * n[0]]; start = GetTickCount(); for (int p = 0; p < n[0] * n[0]; p++) { mass2[p] = (double)rand() / (double)rand(); } //printf("Matr 2 ready\n"); //printf("Generating time: %i\n\n", GetTickCount() - start); printf("CPU working\n"); start = GetTickCount(); double* answer_CPU = Addition_CPU(mass1, mass2, n); int CPU_time = GetTickCount() - start; printf("CPU compute time: %i\n\n", CPU_time); printf("GPU working\n"); start = GetTickCount(); double* cuda_answer; cudaMalloc(&cuda_answer, sizeof(double) * n[0] * n[0]); double* cuda_mass1; cudaMalloc(&cuda_mass1, sizeof(double) * n[0] * n[0]); cudaMemcpy(cuda_mass1, mass1, sizeof(double) * n[0] * n[0], cudaMemcpyHostToDevice); double* cuda_mass2; cudaMalloc(&cuda_mass2, sizeof(double) * n[0] * n[0]); cudaMemcpy(cuda_mass2, mass2, sizeof(double) * n[0] * n[0], cudaMemcpyHostToDevice); int* cuda_n; cudaMalloc(&cuda_n, sizeof(int)); cudaMemcpy(cuda_n, n, sizeof(int), cudaMemcpyHostToDevice); double* answer = new double[n[0] * n[0]]; Addition << <1,1000 >> >(cuda_answer, cuda_mass1, cuda_mass2, cuda_n); cudaDeviceSynchronize(); cudaMemcpy(answer, cuda_answer, sizeof(double) * n[0] * n[0], cudaMemcpyDeviceToHost); int GPU_time = GetTickCount() - start; printf("GPU compute time: %i\n", GPU_time); bool correct = true; for (int p = 0; p < n[0] * n[0]; p++) { if (abs(answer[p] - answer_CPU[p] > e)) { correct = false; break; } } if (correct) { printf("\nAnswers are equal\n"); } else { printf("\nAnswers aren't equal\n"); } printf("\nCoefficient: %f\n", ((double)CPU_time / (double)GPU_time)); scanf("%d"); }
54e826b95f87f20c379758d313b9caeddf0cdfbb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu" #else #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) #ifdef USE_MAGMA static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k) { int64_t size[1] = { k }; int64_t stride[1] = { 1 }; THCTensor_(resizeNd)(state, self, 1, size, stride); size_t len = k * sizeof(real); THCudaCheck(hipMemcpy(THCStorage_(data)(state, self->storage) + self->storageOffset, src, len, hipMemcpyHostToDevice)); } static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n) { int64_t size[2] = { m, n }; int64_t stride[2] = { 1, m }; THCTensor_(resizeNd)(state, self, 2, size, stride); size_t len = m * n * sizeof(real); THCudaCheck(hipMemcpy(THCStorage_(data)(state, self->storage) + self->storageOffset, src, len, hipMemcpyHostToDevice)); } static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self) { THAssert(self->_dim() == 2); size_t len = THCTensor_(nElement)(state, self)*sizeof(real); THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1); THCTensor *selfc = THCTensor_(newContiguous)(state, temp); THCudaCheck(hipMemcpy(dst, THCStorage_(data)(state, selfc->storage) + selfc->storageOffset, len, hipMemcpyDeviceToHost)); THCTensor_(free)(state, temp); THCTensor_(free)(state, selfc); } #endif // USE_MAGMA static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src) { THAssert(src->_dim() == 2); if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0)) { THCTensor_(retain)(state, self); return self; } if (self == src) self = THCTensor_(new)(state); else THCTensor_(retain)(state, self); int64_t size[2] = { src->size(0), src->size(1) }; int64_t stride[2] = { 1, src->size(0) }; THCTensor_(resizeNd)(state, self, 2, size, stride); THCTensor_(copy)(state, self, src); return self; } THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square"); THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible"); int64_t n = a_->size(0); int64_t nrhs = b_->size(1); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int *ipiv = th_magma_malloc_pinned<int>(n); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #else magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #endif if (info < 0) THError("MAGMA gesv : Argument %d : illegal value", -info); else if (info > 0) THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gesv)); #endif } THC_API void THCTensor_(trtrs)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_, const char *uplo, const char *trans, const char *diag) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square"); THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible"); magma_side_t sz = MagmaLeft; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; magma_trans_t ts = trans[0] == 'N' ? MagmaNoTrans : MagmaTrans; magma_diag_t dg = diag[0] == 'U' ? MagmaUnit : MagmaNonUnit; real alpha = 1; int64_t n = a_->size(0); int64_t nrhs = b_->size(1); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); #if defined(THC_REAL_IS_FLOAT) magma_strsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n); #else magma_dtrsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n); #endif THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(trtrs)); #endif } THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == b_->size(0), 2, "Expected A and b to have same size " "at dim 0, but they have incompatible sizes"); THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have " "m >= n. The case for m < n is not implemented yet."); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int64_t m = a->size(0); int64_t n = a->size(1); int64_t nrhs = b->size(1); real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #endif real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt); #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #endif magma_free_pinned(hwork); if (info != 0) THError("MAGMA gels : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gels)); #endif } THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos) { #ifdef USE_MAGMA int64_t n = a->size(0); int64_t lda = n; magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower; magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a); real *input_data = THCTensor_(data)(state, input); // eigen values and workspace real *w = th_magma_malloc_pinned<real>(n); real *wA = th_magma_malloc_pinned<real>(lda * n); // compute optimal size of work array int info; real lwork; int liwork; #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #endif real *work = th_magma_malloc_pinned<real>((size_t)lwork); int *iwork = th_magma_malloc_pinned<int>(liwork); // compute eigenvalues and, optionally, eigenvectors #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #endif // copy eigen values from w to re_ if (info == 0) THCTensor_(copyArray1d)(state, re_, w, n); magma_free_pinned(iwork); magma_free_pinned(work); magma_free_pinned(wA); magma_free_pinned(w); // check error value if (info > 0) THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA syev : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, input, rv_); #else THError(NoMagma(syev)); #endif } THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 3, "A should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square"); magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int64_t n = a_->size(0); real *a_data = th_magma_malloc_pinned<real>(n * n); THCTensor_(copyTensor2d)(state, a_data, a_); real *wr = th_magma_malloc_pinned<real>(n); real *wi = th_magma_malloc_pinned<real>(n); real *vr_data = NULL; int64_t ldvr = 1; if (jobvr == MagmaVec) { vr_data = th_magma_malloc_pinned<real>(n * n); ldvr = n; } real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA geev : Argument %d : illegal value", -info); { THCTensor_(resize2d)(state, re_, 2, n); THCTensor *re = THCTensor_(newContiguous)(state, re_); THCudaCheck(hipMemcpy(THCStorage_(data)(state, re->storage) + re->storageOffset, wr, n*sizeof(real), hipMemcpyHostToDevice)); THCudaCheck(hipMemcpy(THCStorage_(data)(state, re->storage) + re->storageOffset + n, wi, n*sizeof(real), hipMemcpyHostToDevice)); THCTensor_(freeCopyTo)(state, re, re_); THCTensor_(transpose)(state, re_, NULL, 0, 1); } if (jobvr == MagmaVec) THCTensor_(copyArray2d)(state, rv_, vr_data, n, n); magma_free_pinned(work_data); magma_free_pinned(vr_data); magma_free_pinned(wi); magma_free_pinned(wr); magma_free_pinned(a_data); #else THError(NoMagma(geev)); #endif } THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu) { #ifdef USE_MAGMA THCTensor *ra_ = THCTensor_(new)(state); THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu); THCTensor_(free)(state, ra_); #else THError(NoMagma(gesvd)); #endif } THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); magma_vec_t jobz = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec; int iunused[1]; int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = m < n ? m : n; int64_t j = (jobz == MagmaAllVec) ? m : k; int64_t jv = (jobz == MagmaAllVec) ? n : k; real *a_data = th_magma_malloc_pinned<real>(m * n); THCTensor_(copyTensor2d)(state, a_data, a); real *rs_data = th_magma_malloc_pinned<real>(k); real *ru_data = th_magma_malloc_pinned<real>(m * j); real *rv_data = th_magma_malloc_pinned<real>(n * n); real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); int *iwork = th_magma_malloc_pinned<int>(8 * k); #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #endif if (info > 0) THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info); else if (info < 0) THError("MAGMA gesdd : Argument %d : illegal value", -info); THCTensor_(copyArray2d)(state, rv_, rv_data, n, n); THCTensor_(transpose)(state, rv_, NULL, 0, 1); if (jobz != MagmaAllVec) THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv); THCTensor_(copyArray2d)(state, ru_, ru_data, m, j); THCTensor_(copyArray1d)(state, rs_, rs_data, k); THCTensor_(copyArray2d)(state, ra_, a_data, m, n); magma_free_pinned(work_data); magma_free_pinned(iwork); magma_free_pinned(rv_data); magma_free_pinned(ru_data); magma_free_pinned(rs_data); magma_free_pinned(a_data); #else THError(NoMagma(gesvd2)); #endif } THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) { THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); #ifdef USE_MAGMA int info; int64_t n = a->size(0); int lwork = n * magma_get_sgetri_nb(n); THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int *ipiv = th_magma_malloc_pinned<int>(n); THCTensor *work = THCTensor_(newWithSize1d)(state, lwork); real *work_data = THCTensor_(data)(state, work); // Run LU #if defined(THC_REAL_IS_FLOAT) magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info); #else magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info); #endif if (info > 0) THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #else magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getri : Argument %d : illegal value", -info); THCTensor_(free)(state, work); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, input, ra_); #else int64_t n = a->size(0); // input THCTensor *input = THCTensor_(newColumnMajor)(state, a, a); THCTensor_(resizeNd)(state, ra_, 2, THTensor_getSizePtr(input), THTensor_getStridePtr(input)); real *matrices1[1] = { THCTensor_(data)(state, input) }; real *matrices2[1] = { THCTensor_(data)(state, ra_) }; // Copy pointers to device. auto d_matrices1 = static_cast<real**>(THCudaMalloc(state, sizeof(real*))); auto d_matrices2 = static_cast<real**>(THCudaMalloc(state, sizeof(real*))); THCudaCheck(hipMemcpyAsync(d_matrices1, matrices1, sizeof(real*), hipMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(hipMemcpyAsync(d_matrices2, matrices2, sizeof(real*), hipMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; auto info_gpu = static_cast<int*>(THCudaMalloc(state, sizeof(int))); auto ipiv_gpu = static_cast<int*>(THCudaMalloc(state, n * sizeof(int))); // Run LU #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #else THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #endif THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #else THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #endif THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getri : Argument %d : illegal value", -info); THCudaFree(state, ipiv_gpu); THCudaFree(state, info_gpu); THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCTensor_(free)(state, input); #endif } __global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } } __global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r < c) { input[idx] = input[r*n + c]; } } } THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); int64_t n = a->size(0); magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotri_gpu(ul, n, input_data, n, &info); #else magma_dpotri_gpu(ul, n, input_data, n, &info); #endif if (info > 0) THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potri : Argument %d : illegal value", -info); hipStream_t stream = THCState_getCurrentStream(state); const int len = n*n; dim3 blocks(::min(DIVUP(len, 128), 65535)); dim3 threads(128); if (uplo[0] == 'U') { hipLaunchKernelGGL(( THCTensor_(copyUpperSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len); } else { hipLaunchKernelGGL(( THCTensor_(copyLowerSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len); } THCTensor_(freeCopyTo)(state, input, ra_); #else THError(NoMagma(potri)); #endif } THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be (non-empty) 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); int64_t n = a->size(0); magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrf_gpu(ul, n, input_data, n, &info); #else magma_dpotrf_gpu(ul, n, input_data, n, &info); #endif // check error value if (info > 0) THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potrf : Argument %d : illegal value", -info); if (uplo[0] == 'U') { THCTensor_(triu)(state, ra_, input, 0); } else { THCTensor_(tril)(state, ra_, input, 0); } THCTensor_(free)(state, input); #else THError(NoMagma(potrf)); #endif } THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); int64_t n = a->size(0); int64_t nrhs = b->size(1); magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b); real *b_data = THCTensor_(data)(state, b_); THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a); real *a_data = THCTensor_(data)(state, a_); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #else magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #endif // check error value if (info < 0) THError("MAGMA potrs : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, b_, rb_); THCTensor_(free)(state, a_); #else THError(NoMagma(potrs)); #endif } THC_API void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = (m < n ? m : n); #if defined(THC_REAL_IS_FLOAT) int64_t nb = magma_get_sgeqrf_nb(m, n); #else int64_t nb = magma_get_dgeqrf_nb(m, n); #endif real *rtau_data = th_magma_malloc_pinned<real>(k); real *a_data = THCTensor_(data)(state, a); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(copyArray1d)(state, rtau_, rtau_data, k); magma_free_pinned(rtau_data); #else THError(NoMagma(geqrf)); #endif } THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_); int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = (m < n ? m : n); #if defined(THC_REAL_IS_FLOAT) int64_t nb = magma_get_sgeqrf_nb(m, n); #else int64_t nb = magma_get_dgeqrf_nb(m, n); #endif real *a_data = THCTensor_(data)(state, a); real *tau_data = th_magma_malloc_pinned<real>(k); THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb); real *work_data = THCTensor_(data)(state, work); int info; // We need to call two different versions of ?geqrf: // ?geqrf_gpu allows fast computation of Q via ?orqrf_gpu, but doesn't give // R properly. Note that the MAGMA documentation for this method is wrong. // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 // ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orqrf_gpu #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(narrow)(state, a, a, 0, 0, k); THCTensor_(triu)(state, rr_, a, 0); THCTensor_(free)(state, a); a = THCTensor_(newColumnMajor)(state, rq_, a_); a_data = THCTensor_(data)(state, a); #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #else magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #endif if (info != 0) THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a); real *q_data = THCTensor_(data)(state, q); #if defined(THC_REAL_IS_FLOAT) magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #else magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #endif if (info != 0) THError("MAGMA orgqr : Argument %d : illegal value.", -info); THCTensor_(free)(state, a); THCTensor_(free)(state, work); magma_free_pinned(tau_data); THCTensor_(narrow)(state, q, q, 1, 0, k); THCTensor_(freeCopyTo)(state, q, rq_); #else THError(NoMagma(qr)); #endif } #endif #endif
54e826b95f87f20c379758d313b9caeddf0cdfbb.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu" #else #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) #ifdef USE_MAGMA static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k) { int64_t size[1] = { k }; int64_t stride[1] = { 1 }; THCTensor_(resizeNd)(state, self, 1, size, stride); size_t len = k * sizeof(real); THCudaCheck(cudaMemcpy(THCStorage_(data)(state, self->storage) + self->storageOffset, src, len, cudaMemcpyHostToDevice)); } static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n) { int64_t size[2] = { m, n }; int64_t stride[2] = { 1, m }; THCTensor_(resizeNd)(state, self, 2, size, stride); size_t len = m * n * sizeof(real); THCudaCheck(cudaMemcpy(THCStorage_(data)(state, self->storage) + self->storageOffset, src, len, cudaMemcpyHostToDevice)); } static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self) { THAssert(self->_dim() == 2); size_t len = THCTensor_(nElement)(state, self)*sizeof(real); THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1); THCTensor *selfc = THCTensor_(newContiguous)(state, temp); THCudaCheck(cudaMemcpy(dst, THCStorage_(data)(state, selfc->storage) + selfc->storageOffset, len, cudaMemcpyDeviceToHost)); THCTensor_(free)(state, temp); THCTensor_(free)(state, selfc); } #endif // USE_MAGMA static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src) { THAssert(src->_dim() == 2); if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0)) { THCTensor_(retain)(state, self); return self; } if (self == src) self = THCTensor_(new)(state); else THCTensor_(retain)(state, self); int64_t size[2] = { src->size(0), src->size(1) }; int64_t stride[2] = { 1, src->size(0) }; THCTensor_(resizeNd)(state, self, 2, size, stride); THCTensor_(copy)(state, self, src); return self; } THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square"); THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible"); int64_t n = a_->size(0); int64_t nrhs = b_->size(1); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int *ipiv = th_magma_malloc_pinned<int>(n); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #else magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #endif if (info < 0) THError("MAGMA gesv : Argument %d : illegal value", -info); else if (info > 0) THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gesv)); #endif } THC_API void THCTensor_(trtrs)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_, const char *uplo, const char *trans, const char *diag) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square"); THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible"); magma_side_t sz = MagmaLeft; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; magma_trans_t ts = trans[0] == 'N' ? MagmaNoTrans : MagmaTrans; magma_diag_t dg = diag[0] == 'U' ? MagmaUnit : MagmaNonUnit; real alpha = 1; int64_t n = a_->size(0); int64_t nrhs = b_->size(1); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); #if defined(THC_REAL_IS_FLOAT) magma_strsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n); #else magma_dtrsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n); #endif THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(trtrs)); #endif } THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == b_->size(0), 2, "Expected A and b to have same size " "at dim 0, but they have incompatible sizes"); THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have " "m >= n. The case for m < n is not implemented yet."); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int64_t m = a->size(0); int64_t n = a->size(1); int64_t nrhs = b->size(1); real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #endif real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt); #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #endif magma_free_pinned(hwork); if (info != 0) THError("MAGMA gels : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gels)); #endif } THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos) { #ifdef USE_MAGMA int64_t n = a->size(0); int64_t lda = n; magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower; magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a); real *input_data = THCTensor_(data)(state, input); // eigen values and workspace real *w = th_magma_malloc_pinned<real>(n); real *wA = th_magma_malloc_pinned<real>(lda * n); // compute optimal size of work array int info; real lwork; int liwork; #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #endif real *work = th_magma_malloc_pinned<real>((size_t)lwork); int *iwork = th_magma_malloc_pinned<int>(liwork); // compute eigenvalues and, optionally, eigenvectors #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #endif // copy eigen values from w to re_ if (info == 0) THCTensor_(copyArray1d)(state, re_, w, n); magma_free_pinned(iwork); magma_free_pinned(work); magma_free_pinned(wA); magma_free_pinned(w); // check error value if (info > 0) THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA syev : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, input, rv_); #else THError(NoMagma(syev)); #endif } THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 3, "A should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square"); magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int64_t n = a_->size(0); real *a_data = th_magma_malloc_pinned<real>(n * n); THCTensor_(copyTensor2d)(state, a_data, a_); real *wr = th_magma_malloc_pinned<real>(n); real *wi = th_magma_malloc_pinned<real>(n); real *vr_data = NULL; int64_t ldvr = 1; if (jobvr == MagmaVec) { vr_data = th_magma_malloc_pinned<real>(n * n); ldvr = n; } real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA geev : Argument %d : illegal value", -info); { THCTensor_(resize2d)(state, re_, 2, n); THCTensor *re = THCTensor_(newContiguous)(state, re_); THCudaCheck(cudaMemcpy(THCStorage_(data)(state, re->storage) + re->storageOffset, wr, n*sizeof(real), cudaMemcpyHostToDevice)); THCudaCheck(cudaMemcpy(THCStorage_(data)(state, re->storage) + re->storageOffset + n, wi, n*sizeof(real), cudaMemcpyHostToDevice)); THCTensor_(freeCopyTo)(state, re, re_); THCTensor_(transpose)(state, re_, NULL, 0, 1); } if (jobvr == MagmaVec) THCTensor_(copyArray2d)(state, rv_, vr_data, n, n); magma_free_pinned(work_data); magma_free_pinned(vr_data); magma_free_pinned(wi); magma_free_pinned(wr); magma_free_pinned(a_data); #else THError(NoMagma(geev)); #endif } THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu) { #ifdef USE_MAGMA THCTensor *ra_ = THCTensor_(new)(state); THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu); THCTensor_(free)(state, ra_); #else THError(NoMagma(gesvd)); #endif } THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); magma_vec_t jobz = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec; int iunused[1]; int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = m < n ? m : n; int64_t j = (jobz == MagmaAllVec) ? m : k; int64_t jv = (jobz == MagmaAllVec) ? n : k; real *a_data = th_magma_malloc_pinned<real>(m * n); THCTensor_(copyTensor2d)(state, a_data, a); real *rs_data = th_magma_malloc_pinned<real>(k); real *ru_data = th_magma_malloc_pinned<real>(m * j); real *rv_data = th_magma_malloc_pinned<real>(n * n); real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); int *iwork = th_magma_malloc_pinned<int>(8 * k); #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #endif if (info > 0) THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info); else if (info < 0) THError("MAGMA gesdd : Argument %d : illegal value", -info); THCTensor_(copyArray2d)(state, rv_, rv_data, n, n); THCTensor_(transpose)(state, rv_, NULL, 0, 1); if (jobz != MagmaAllVec) THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv); THCTensor_(copyArray2d)(state, ru_, ru_data, m, j); THCTensor_(copyArray1d)(state, rs_, rs_data, k); THCTensor_(copyArray2d)(state, ra_, a_data, m, n); magma_free_pinned(work_data); magma_free_pinned(iwork); magma_free_pinned(rv_data); magma_free_pinned(ru_data); magma_free_pinned(rs_data); magma_free_pinned(a_data); #else THError(NoMagma(gesvd2)); #endif } THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) { THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); #ifdef USE_MAGMA int info; int64_t n = a->size(0); int lwork = n * magma_get_sgetri_nb(n); THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int *ipiv = th_magma_malloc_pinned<int>(n); THCTensor *work = THCTensor_(newWithSize1d)(state, lwork); real *work_data = THCTensor_(data)(state, work); // Run LU #if defined(THC_REAL_IS_FLOAT) magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info); #else magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info); #endif if (info > 0) THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #else magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getri : Argument %d : illegal value", -info); THCTensor_(free)(state, work); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, input, ra_); #else int64_t n = a->size(0); // input THCTensor *input = THCTensor_(newColumnMajor)(state, a, a); THCTensor_(resizeNd)(state, ra_, 2, THTensor_getSizePtr(input), THTensor_getStridePtr(input)); real *matrices1[1] = { THCTensor_(data)(state, input) }; real *matrices2[1] = { THCTensor_(data)(state, ra_) }; // Copy pointers to device. auto d_matrices1 = static_cast<real**>(THCudaMalloc(state, sizeof(real*))); auto d_matrices2 = static_cast<real**>(THCudaMalloc(state, sizeof(real*))); THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, sizeof(real*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, sizeof(real*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; auto info_gpu = static_cast<int*>(THCudaMalloc(state, sizeof(int))); auto ipiv_gpu = static_cast<int*>(THCudaMalloc(state, n * sizeof(int))); // Run LU #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #else THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #endif THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #else THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #endif THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getri : Argument %d : illegal value", -info); THCudaFree(state, ipiv_gpu); THCudaFree(state, info_gpu); THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCTensor_(free)(state, input); #endif } __global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } } __global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r < c) { input[idx] = input[r*n + c]; } } } THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); int64_t n = a->size(0); magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotri_gpu(ul, n, input_data, n, &info); #else magma_dpotri_gpu(ul, n, input_data, n, &info); #endif if (info > 0) THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potri : Argument %d : illegal value", -info); cudaStream_t stream = THCState_getCurrentStream(state); const int len = n*n; dim3 blocks(std::min(DIVUP(len, 128), 65535)); dim3 threads(128); if (uplo[0] == 'U') { THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len); } else { THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len); } THCTensor_(freeCopyTo)(state, input, ra_); #else THError(NoMagma(potri)); #endif } THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be (non-empty) 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); int64_t n = a->size(0); magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrf_gpu(ul, n, input_data, n, &info); #else magma_dpotrf_gpu(ul, n, input_data, n, &info); #endif // check error value if (info > 0) THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potrf : Argument %d : illegal value", -info); if (uplo[0] == 'U') { THCTensor_(triu)(state, ra_, input, 0); } else { THCTensor_(tril)(state, ra_, input, 0); } THCTensor_(free)(state, input); #else THError(NoMagma(potrf)); #endif } THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); int64_t n = a->size(0); int64_t nrhs = b->size(1); magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b); real *b_data = THCTensor_(data)(state, b_); THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a); real *a_data = THCTensor_(data)(state, a_); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #else magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #endif // check error value if (info < 0) THError("MAGMA potrs : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, b_, rb_); THCTensor_(free)(state, a_); #else THError(NoMagma(potrs)); #endif } THC_API void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = (m < n ? m : n); #if defined(THC_REAL_IS_FLOAT) int64_t nb = magma_get_sgeqrf_nb(m, n); #else int64_t nb = magma_get_dgeqrf_nb(m, n); #endif real *rtau_data = th_magma_malloc_pinned<real>(k); real *a_data = THCTensor_(data)(state, a); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(copyArray1d)(state, rtau_, rtau_data, k); magma_free_pinned(rtau_data); #else THError(NoMagma(geqrf)); #endif } THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_); int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = (m < n ? m : n); #if defined(THC_REAL_IS_FLOAT) int64_t nb = magma_get_sgeqrf_nb(m, n); #else int64_t nb = magma_get_dgeqrf_nb(m, n); #endif real *a_data = THCTensor_(data)(state, a); real *tau_data = th_magma_malloc_pinned<real>(k); THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb); real *work_data = THCTensor_(data)(state, work); int info; // We need to call two different versions of ?geqrf: // ?geqrf_gpu allows fast computation of Q via ?orqrf_gpu, but doesn't give // R properly. Note that the MAGMA documentation for this method is wrong. // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 // ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orqrf_gpu #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(narrow)(state, a, a, 0, 0, k); THCTensor_(triu)(state, rr_, a, 0); THCTensor_(free)(state, a); a = THCTensor_(newColumnMajor)(state, rq_, a_); a_data = THCTensor_(data)(state, a); #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #else magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #endif if (info != 0) THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a); real *q_data = THCTensor_(data)(state, q); #if defined(THC_REAL_IS_FLOAT) magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #else magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #endif if (info != 0) THError("MAGMA orgqr : Argument %d : illegal value.", -info); THCTensor_(free)(state, a); THCTensor_(free)(state, work); magma_free_pinned(tau_data); THCTensor_(narrow)(state, q, q, 1, 0, k); THCTensor_(freeCopyTo)(state, q, rq_); #else THError(NoMagma(qr)); #endif } #endif #endif
c09769e51341558dea9c5ea2c2ffae548014cffa.hip
// !!! This is a file automatically generated by hipify!!! /* Oleg Grishin, NYU, 2014 Parallel Computing, CSCI-UA.0480-003 */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #define SIZE 8192 #define THREADS 512 #define BLOCKS 16 /* Function declarations */ void fill_array(int* a); /* Fills array with random ints */ void find_max(int* a, int* max); /* Sets up cuda for kernel code execution */ __global__ void maxReduce(int* in); /* Kernel function */ __global__ void findMaxKernelAcrossBlocks(int* l_a); /* Kernel function for finding max element when blocks are done executing their local maxima */ void fill_array(int* a) { int i; srand(time(NULL)); for (i = 0; i < SIZE; i++) { a[i] = rand() % 100000; } } void find_max(int* a, int* max) { int size = SIZE * sizeof(int); int *l_a; hipMalloc((void**)&l_a, size); hipMemcpy(l_a, a, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( maxReduce), dim3(BLOCKS), dim3(THREADS), 0, 0, l_a); hipLaunchKernelGGL(( findMaxKernelAcrossBlocks), dim3(1), dim3(1), 0, 0, l_a); hipMemcpy(max, l_a, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(a, l_a, size, hipMemcpyDeviceToHost); hipFree(l_a); } __global__ void maxReduce(int* in) { int i, idx = blockIdx.x*blockDim.x + threadIdx.x, a, b; /* sequential addressing: split in half, */ for(i=blockDim.x/2; i > 0; i>>=1) { if (threadIdx.x < i) { a = in[idx]; b = in[idx + i]; in[idx] = a > b ? a : b; } __syncthreads(); } } __global__ void findMaxKernelAcrossBlocks(int* l_a) { int block_width = SIZE/BLOCKS, i, elem, max = l_a[0]; for (i = 1; i < BLOCKS; i++) { elem = l_a[i*block_width]; if (max < elem) { max = elem; } } l_a[0] = max; } int main(int argc, char *argv[]) { int* a = (int *) malloc(SIZE * sizeof(int)); fill_array(a); int max; find_max(a, &max); hipError_t error = hipGetLastError(); if(error != hipSuccess) { printf("CUDA error: %s\n", hipGetErrorString(error)); exit(1); } printf("%d\n", max); exit(0); }
c09769e51341558dea9c5ea2c2ffae548014cffa.cu
/* Oleg Grishin, NYU, 2014 Parallel Computing, CSCI-UA.0480-003 */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #define SIZE 8192 #define THREADS 512 #define BLOCKS 16 /* Function declarations */ void fill_array(int* a); /* Fills array with random ints */ void find_max(int* a, int* max); /* Sets up cuda for kernel code execution */ __global__ void maxReduce(int* in); /* Kernel function */ __global__ void findMaxKernelAcrossBlocks(int* l_a); /* Kernel function for finding max element when blocks are done executing their local maxima */ void fill_array(int* a) { int i; srand(time(NULL)); for (i = 0; i < SIZE; i++) { a[i] = rand() % 100000; } } void find_max(int* a, int* max) { int size = SIZE * sizeof(int); int *l_a; cudaMalloc((void**)&l_a, size); cudaMemcpy(l_a, a, size, cudaMemcpyHostToDevice); maxReduce<<<BLOCKS, THREADS>>>(l_a); findMaxKernelAcrossBlocks<<<1, 1>>>(l_a); cudaMemcpy(max, l_a, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(a, l_a, size, cudaMemcpyDeviceToHost); cudaFree(l_a); } __global__ void maxReduce(int* in) { int i, idx = blockIdx.x*blockDim.x + threadIdx.x, a, b; /* sequential addressing: split in half, */ for(i=blockDim.x/2; i > 0; i>>=1) { if (threadIdx.x < i) { a = in[idx]; b = in[idx + i]; in[idx] = a > b ? a : b; } __syncthreads(); } } __global__ void findMaxKernelAcrossBlocks(int* l_a) { int block_width = SIZE/BLOCKS, i, elem, max = l_a[0]; for (i = 1; i < BLOCKS; i++) { elem = l_a[i*block_width]; if (max < elem) { max = elem; } } l_a[0] = max; } int main(int argc, char *argv[]) { int* a = (int *) malloc(SIZE * sizeof(int)); fill_array(a); int max; find_max(a, &max); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(1); } printf("%d\n", max); exit(0); }
d8772222b274989c9e4ad8c5d91e6f5969e01de5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "cuda_common.cuh" __global__ void lots_of_float_compute(float *inputs, int N, size_t niters, float *outputs) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; size_t nthreads = gridDim.x * blockDim.x; for (; tid < N; tid += nthreads) { size_t iter; float val = inputs[tid]; for (iter = 0; iter < niters; iter++) { val = (val + 5.0f) - 101.0f; val = (val / 3.0f) + 102.0f; val = (val + 1.07f) - 103.0f; val = (val / 1.037f) + 104.0f; val = (val + 3.00f) - 105.0f; val = (val / 0.22f) + 106.0f; } outputs[tid] = val; } } __global__ void lots_of_double_compute(double *inputs, int N, size_t niters, double *outputs) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; size_t nthreads = gridDim.x * blockDim.x; for (; tid < N; tid += nthreads) { size_t iter; double val = inputs[tid]; for (iter = 0; iter < niters; iter++) { val = (val + 5.0) - 101.0; val = (val / 3.0) + 102.0; val = (val + 1.07) - 103.0; val = (val / 1.037) + 104.0; val = (val + 3.00) - 105.0; val = (val / 0.22) + 106.0; } outputs[tid] = val; } } static void run_float_test(size_t N, int niters, int blocksPerGrid,int threadsPerBlock, long* to_device_clock_cyl,long * kernel_clock_cyl, long* from_device_clock_cyl, float *sample, int sampleLength) { int i; float *h_floatInputs, *h_floatOutputs; float *d_floatInputs, *d_floatOutputs; h_floatInputs = (float *)malloc(sizeof(float) * N); h_floatOutputs = (float *)malloc(sizeof(float) * N); gpuErrchk(hipMalloc((void **)&d_floatInputs, sizeof(float) * N)); gpuErrchk(hipMalloc((void **)&d_floatOutputs, sizeof(float) * N)); for (i = 0; i < N; i++) { h_floatInputs[i] = (float)i; } clock_t ops_start, ops_end; ops_start = clock(); gpuErrchk(hipMemcpy(d_floatInputs, h_floatInputs, sizeof(float) * N,hipMemcpyHostToDevice)); ops_end = clock(); *to_device_clock_cyl = ops_end - ops_start; ops_start = clock(); lots_of_float_compute << <blocksPerGrid, threadsPerBlock >> >(d_floatInputs,N, niters, d_floatOutputs); gpuErrchk(hipDeviceSynchronize()); ops_end = clock(); *kernel_clock_cyl = ops_end - ops_start; ops_start = clock(); gpuErrchk(hipMemcpy(h_floatOutputs, d_floatOutputs, sizeof(float) * N,hipMemcpyDeviceToHost)); ops_end = clock(); *from_device_clock_cyl = ops_end - ops_start; for (i = 0; i < sampleLength; i++) { sample[i] = h_floatOutputs[i]; } gpuErrchk(hipFree(d_floatInputs)); gpuErrchk(hipFree(d_floatOutputs)); free(h_floatInputs); free(h_floatOutputs); } static void run_double_test(size_t N, int niters, int blocksPerGrid,int threadsPerBlock, long* to_device_clock_cyl,long * kernel_clock_cyl, long* from_device_clock_cyl, double *sample, int sampleLength) { int i; double *h_doubleInputs, *h_doubleOutputs; double *d_doubleInputs, *d_doubleOutputs; h_doubleInputs = (double *)malloc(sizeof(double) * N); h_doubleOutputs = (double *)malloc(sizeof(double) * N); gpuErrchk(hipMalloc((void **)&d_doubleInputs, sizeof(double) * N)); gpuErrchk(hipMalloc((void **)&d_doubleOutputs, sizeof(double) * N)); for (i = 0; i < N; i++) { h_doubleInputs[i] = (double)i; } clock_t ops_start, ops_end; ops_start = clock(); gpuErrchk(hipMemcpy(d_doubleInputs, h_doubleInputs, sizeof(double) * N, hipMemcpyHostToDevice)); ops_end = clock(); *to_device_clock_cyl = ops_end - ops_start; ops_start = clock(); lots_of_double_compute << <blocksPerGrid, threadsPerBlock >> >(d_doubleInputs, N, niters, d_doubleOutputs); gpuErrchk(hipDeviceSynchronize()); ops_end = clock(); *kernel_clock_cyl = ops_end - ops_start; ops_start = clock(); gpuErrchk(hipMemcpy(h_doubleOutputs, d_doubleOutputs, sizeof(double) * N, hipMemcpyDeviceToHost)); ops_end = clock(); *from_device_clock_cyl = ops_end - ops_start; for (i = 0; i < sampleLength; i++) { sample[i] = h_doubleOutputs[i]; } gpuErrchk(hipFree(d_doubleInputs)); gpuErrchk(hipFree(d_doubleOutputs)); free(h_doubleInputs); free(h_doubleOutputs); } //int main(int argc, char **argv) //{ // int i; // double meanFloatToDeviceTime, meanFloatKernelTime, meanFloatFromDeviceTime; // double meanDoubleToDeviceTime, meanDoubleKernelTime, // meanDoubleFromDeviceTime; // struct hipDeviceProp_t deviceProperties; // size_t totalMem, freeMem; // float *floatSample; // double *doubleSample; // int sampleLength = 10; // int nRuns = 5; // int nKernelIters = 20; // // meanFloatToDeviceTime = meanFloatKernelTime = meanFloatFromDeviceTime = 0.0; // meanDoubleToDeviceTime = meanDoubleKernelTime = // meanDoubleFromDeviceTime = 0.0; // // gpuErrchk(hipMemGetInfo(&freeMem, &totalMem)); // gpuErrchk(hipGetDeviceProperties(&deviceProperties, 0)); // // size_t N = (freeMem * 0.9 / 2) / sizeof(double); // int threadsPerBlock = 256; // int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; // // if (blocksPerGrid > deviceProperties.maxGridSize[0]) // { // blocksPerGrid = deviceProperties.maxGridSize[0]; // } // // printf("Running %d blocks with %d threads/block over %lu elements\n", // blocksPerGrid, threadsPerBlock, N); // // floatSample = (float *)malloc(sizeof(float) * sampleLength); // doubleSample = (double *)malloc(sizeof(double) * sampleLength); // // for (i = 0; i < nRuns; i++) // { // long toDeviceTime, kernelTime, fromDeviceTime; // // run_float_test(N, nKernelIters, blocksPerGrid, threadsPerBlock, // &toDeviceTime, &kernelTime, &fromDeviceTime, // floatSample, sampleLength); // meanFloatToDeviceTime += toDeviceTime; // meanFloatKernelTime += kernelTime; // meanFloatFromDeviceTime += fromDeviceTime; // // run_double_test(N, nKernelIters, blocksPerGrid, threadsPerBlock, // &toDeviceTime, &kernelTime, &fromDeviceTime, // doubleSample, sampleLength); // meanDoubleToDeviceTime += toDeviceTime; // meanDoubleKernelTime += kernelTime; // meanDoubleFromDeviceTime += fromDeviceTime; // } // // meanFloatToDeviceTime /= nRuns; // meanFloatKernelTime /= nRuns; // meanFloatFromDeviceTime /= nRuns; // meanDoubleToDeviceTime /= nRuns; // meanDoubleKernelTime /= nRuns; // meanDoubleFromDeviceTime /= nRuns; // // meanFloatToDeviceTime /= CLOCKS_PER_SEC; // meanFloatKernelTime /= CLOCKS_PER_SEC; // meanFloatFromDeviceTime /= CLOCKS_PER_SEC; // meanDoubleToDeviceTime /= CLOCKS_PER_SEC; // meanDoubleKernelTime /= CLOCKS_PER_SEC; // meanDoubleFromDeviceTime /= CLOCKS_PER_SEC; // // printf("For single-precision floating point, mean times for:\n"); // printf(" Copy to device: %f s\n", meanFloatToDeviceTime); // printf(" Kernel execution: %f s\n", meanFloatKernelTime); // printf(" Copy from device: %f s\n", meanFloatFromDeviceTime); // printf("For double-precision floating point, mean times for:\n"); // printf(" Copy to device: %f s (%.2fx slower than single-precision)\n", // meanDoubleToDeviceTime, // meanDoubleToDeviceTime / meanFloatToDeviceTime); // printf(" Kernel execution: %f s (%.2fx slower than single-precision)\n", // meanDoubleKernelTime, // meanDoubleKernelTime / meanFloatKernelTime); // printf(" Copy from device: %f s (%.2fx slower than single-precision)\n", // meanDoubleFromDeviceTime, // meanDoubleFromDeviceTime / meanFloatFromDeviceTime); // // return 0; //}
d8772222b274989c9e4ad8c5d91e6f5969e01de5.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_common.cuh" __global__ void lots_of_float_compute(float *inputs, int N, size_t niters, float *outputs) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; size_t nthreads = gridDim.x * blockDim.x; for (; tid < N; tid += nthreads) { size_t iter; float val = inputs[tid]; for (iter = 0; iter < niters; iter++) { val = (val + 5.0f) - 101.0f; val = (val / 3.0f) + 102.0f; val = (val + 1.07f) - 103.0f; val = (val / 1.037f) + 104.0f; val = (val + 3.00f) - 105.0f; val = (val / 0.22f) + 106.0f; } outputs[tid] = val; } } __global__ void lots_of_double_compute(double *inputs, int N, size_t niters, double *outputs) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; size_t nthreads = gridDim.x * blockDim.x; for (; tid < N; tid += nthreads) { size_t iter; double val = inputs[tid]; for (iter = 0; iter < niters; iter++) { val = (val + 5.0) - 101.0; val = (val / 3.0) + 102.0; val = (val + 1.07) - 103.0; val = (val / 1.037) + 104.0; val = (val + 3.00) - 105.0; val = (val / 0.22) + 106.0; } outputs[tid] = val; } } static void run_float_test(size_t N, int niters, int blocksPerGrid,int threadsPerBlock, long* to_device_clock_cyl,long * kernel_clock_cyl, long* from_device_clock_cyl, float *sample, int sampleLength) { int i; float *h_floatInputs, *h_floatOutputs; float *d_floatInputs, *d_floatOutputs; h_floatInputs = (float *)malloc(sizeof(float) * N); h_floatOutputs = (float *)malloc(sizeof(float) * N); gpuErrchk(cudaMalloc((void **)&d_floatInputs, sizeof(float) * N)); gpuErrchk(cudaMalloc((void **)&d_floatOutputs, sizeof(float) * N)); for (i = 0; i < N; i++) { h_floatInputs[i] = (float)i; } clock_t ops_start, ops_end; ops_start = clock(); gpuErrchk(cudaMemcpy(d_floatInputs, h_floatInputs, sizeof(float) * N,cudaMemcpyHostToDevice)); ops_end = clock(); *to_device_clock_cyl = ops_end - ops_start; ops_start = clock(); lots_of_float_compute << <blocksPerGrid, threadsPerBlock >> >(d_floatInputs,N, niters, d_floatOutputs); gpuErrchk(cudaDeviceSynchronize()); ops_end = clock(); *kernel_clock_cyl = ops_end - ops_start; ops_start = clock(); gpuErrchk(cudaMemcpy(h_floatOutputs, d_floatOutputs, sizeof(float) * N,cudaMemcpyDeviceToHost)); ops_end = clock(); *from_device_clock_cyl = ops_end - ops_start; for (i = 0; i < sampleLength; i++) { sample[i] = h_floatOutputs[i]; } gpuErrchk(cudaFree(d_floatInputs)); gpuErrchk(cudaFree(d_floatOutputs)); free(h_floatInputs); free(h_floatOutputs); } static void run_double_test(size_t N, int niters, int blocksPerGrid,int threadsPerBlock, long* to_device_clock_cyl,long * kernel_clock_cyl, long* from_device_clock_cyl, double *sample, int sampleLength) { int i; double *h_doubleInputs, *h_doubleOutputs; double *d_doubleInputs, *d_doubleOutputs; h_doubleInputs = (double *)malloc(sizeof(double) * N); h_doubleOutputs = (double *)malloc(sizeof(double) * N); gpuErrchk(cudaMalloc((void **)&d_doubleInputs, sizeof(double) * N)); gpuErrchk(cudaMalloc((void **)&d_doubleOutputs, sizeof(double) * N)); for (i = 0; i < N; i++) { h_doubleInputs[i] = (double)i; } clock_t ops_start, ops_end; ops_start = clock(); gpuErrchk(cudaMemcpy(d_doubleInputs, h_doubleInputs, sizeof(double) * N, cudaMemcpyHostToDevice)); ops_end = clock(); *to_device_clock_cyl = ops_end - ops_start; ops_start = clock(); lots_of_double_compute << <blocksPerGrid, threadsPerBlock >> >(d_doubleInputs, N, niters, d_doubleOutputs); gpuErrchk(cudaDeviceSynchronize()); ops_end = clock(); *kernel_clock_cyl = ops_end - ops_start; ops_start = clock(); gpuErrchk(cudaMemcpy(h_doubleOutputs, d_doubleOutputs, sizeof(double) * N, cudaMemcpyDeviceToHost)); ops_end = clock(); *from_device_clock_cyl = ops_end - ops_start; for (i = 0; i < sampleLength; i++) { sample[i] = h_doubleOutputs[i]; } gpuErrchk(cudaFree(d_doubleInputs)); gpuErrchk(cudaFree(d_doubleOutputs)); free(h_doubleInputs); free(h_doubleOutputs); } //int main(int argc, char **argv) //{ // int i; // double meanFloatToDeviceTime, meanFloatKernelTime, meanFloatFromDeviceTime; // double meanDoubleToDeviceTime, meanDoubleKernelTime, // meanDoubleFromDeviceTime; // struct cudaDeviceProp deviceProperties; // size_t totalMem, freeMem; // float *floatSample; // double *doubleSample; // int sampleLength = 10; // int nRuns = 5; // int nKernelIters = 20; // // meanFloatToDeviceTime = meanFloatKernelTime = meanFloatFromDeviceTime = 0.0; // meanDoubleToDeviceTime = meanDoubleKernelTime = // meanDoubleFromDeviceTime = 0.0; // // gpuErrchk(cudaMemGetInfo(&freeMem, &totalMem)); // gpuErrchk(cudaGetDeviceProperties(&deviceProperties, 0)); // // size_t N = (freeMem * 0.9 / 2) / sizeof(double); // int threadsPerBlock = 256; // int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; // // if (blocksPerGrid > deviceProperties.maxGridSize[0]) // { // blocksPerGrid = deviceProperties.maxGridSize[0]; // } // // printf("Running %d blocks with %d threads/block over %lu elements\n", // blocksPerGrid, threadsPerBlock, N); // // floatSample = (float *)malloc(sizeof(float) * sampleLength); // doubleSample = (double *)malloc(sizeof(double) * sampleLength); // // for (i = 0; i < nRuns; i++) // { // long toDeviceTime, kernelTime, fromDeviceTime; // // run_float_test(N, nKernelIters, blocksPerGrid, threadsPerBlock, // &toDeviceTime, &kernelTime, &fromDeviceTime, // floatSample, sampleLength); // meanFloatToDeviceTime += toDeviceTime; // meanFloatKernelTime += kernelTime; // meanFloatFromDeviceTime += fromDeviceTime; // // run_double_test(N, nKernelIters, blocksPerGrid, threadsPerBlock, // &toDeviceTime, &kernelTime, &fromDeviceTime, // doubleSample, sampleLength); // meanDoubleToDeviceTime += toDeviceTime; // meanDoubleKernelTime += kernelTime; // meanDoubleFromDeviceTime += fromDeviceTime; // } // // meanFloatToDeviceTime /= nRuns; // meanFloatKernelTime /= nRuns; // meanFloatFromDeviceTime /= nRuns; // meanDoubleToDeviceTime /= nRuns; // meanDoubleKernelTime /= nRuns; // meanDoubleFromDeviceTime /= nRuns; // // meanFloatToDeviceTime /= CLOCKS_PER_SEC; // meanFloatKernelTime /= CLOCKS_PER_SEC; // meanFloatFromDeviceTime /= CLOCKS_PER_SEC; // meanDoubleToDeviceTime /= CLOCKS_PER_SEC; // meanDoubleKernelTime /= CLOCKS_PER_SEC; // meanDoubleFromDeviceTime /= CLOCKS_PER_SEC; // // printf("For single-precision floating point, mean times for:\n"); // printf(" Copy to device: %f s\n", meanFloatToDeviceTime); // printf(" Kernel execution: %f s\n", meanFloatKernelTime); // printf(" Copy from device: %f s\n", meanFloatFromDeviceTime); // printf("For double-precision floating point, mean times for:\n"); // printf(" Copy to device: %f s (%.2fx slower than single-precision)\n", // meanDoubleToDeviceTime, // meanDoubleToDeviceTime / meanFloatToDeviceTime); // printf(" Kernel execution: %f s (%.2fx slower than single-precision)\n", // meanDoubleKernelTime, // meanDoubleKernelTime / meanFloatKernelTime); // printf(" Copy from device: %f s (%.2fx slower than single-precision)\n", // meanDoubleFromDeviceTime, // meanDoubleFromDeviceTime / meanFloatFromDeviceTime); // // return 0; //}
31612c8277598ef97d18680416bb8bcb2a687c6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * set_value.cu * * Created on: Sep 20, 2015 * Author: lyx */ #include "set_value.h" using namespace global; namespace utils { __global__ void setValue(float* x, int n, float val) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) x[i] = val; } __global__ void dropout(float* x, float* t, int n, float threshold) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) if (t[i] < threshold) x[i] = 0; } __global__ void scale(float* x, int n, float epsilon) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) x[i] = x[i] * epsilon * 2 - epsilon; } void setGpuValue(float* x, int n, float val) { int threadsPerBlock = 256; int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( setValue), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x, n, val); } void setGpuUniformValue(float* x, int n, float epsilon) { int threadsPerBlock = 256; int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; hiprandGenerator_t generator; hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_MTGP32); hiprandSetPseudoRandomGeneratorSeed(generator, time(NULL)); hiprandGenerateUniform(generator, x, n); hipLaunchKernelGGL(( scale), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x, n, epsilon); hiprandDestroyGenerator(generator); } void setGpuNormalValue(float* x, int n, float mean, float stddev) { hiprandGenerator_t generator; hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_MTGP32); hiprandSetPseudoRandomGeneratorSeed(generator, time(NULL)); hiprandGenerateNormal(generator, x, n, mean, stddev); hiprandDestroyGenerator(generator); } void dropGpuValue(float *x, int n, float dropout_rate) { int threadsPerBlock = 256; int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; hiprandGenerator_t generator; hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_MTGP32); hiprandSetPseudoRandomGeneratorSeed(generator, time(NULL)); float* t; hipMalloc((void**)&t, sizeof(float) * n); hiprandGenerateUniform(generator, t, n); hipLaunchKernelGGL(( dropout), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x, t, n, dropout_rate); hipFree(t); hiprandDestroyGenerator(generator); } void scaleGpuValue(float *x, int n, float scale) { hipblasSscal(cublasHandle, n, &scale, x, 1); } }
31612c8277598ef97d18680416bb8bcb2a687c6a.cu
/* * set_value.cu * * Created on: Sep 20, 2015 * Author: lyx */ #include "set_value.h" using namespace global; namespace utils { __global__ void setValue(float* x, int n, float val) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) x[i] = val; } __global__ void dropout(float* x, float* t, int n, float threshold) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) if (t[i] < threshold) x[i] = 0; } __global__ void scale(float* x, int n, float epsilon) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) x[i] = x[i] * epsilon * 2 - epsilon; } void setGpuValue(float* x, int n, float val) { int threadsPerBlock = 256; int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; setValue<<<blocksPerGrid, threadsPerBlock>>>(x, n, val); } void setGpuUniformValue(float* x, int n, float epsilon) { int threadsPerBlock = 256; int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; curandGenerator_t generator; curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_MTGP32); curandSetPseudoRandomGeneratorSeed(generator, time(NULL)); curandGenerateUniform(generator, x, n); scale<<<blocksPerGrid, threadsPerBlock>>>(x, n, epsilon); curandDestroyGenerator(generator); } void setGpuNormalValue(float* x, int n, float mean, float stddev) { curandGenerator_t generator; curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_MTGP32); curandSetPseudoRandomGeneratorSeed(generator, time(NULL)); curandGenerateNormal(generator, x, n, mean, stddev); curandDestroyGenerator(generator); } void dropGpuValue(float *x, int n, float dropout_rate) { int threadsPerBlock = 256; int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; curandGenerator_t generator; curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_MTGP32); curandSetPseudoRandomGeneratorSeed(generator, time(NULL)); float* t; cudaMalloc((void**)&t, sizeof(float) * n); curandGenerateUniform(generator, t, n); dropout<<<blocksPerGrid, threadsPerBlock>>>(x, t, n, dropout_rate); cudaFree(t); curandDestroyGenerator(generator); } void scaleGpuValue(float *x, int n, float scale) { cublasSscal(cublasHandle, n, &scale, x, 1); } }
4c2c42d508276b8646f0b72547ffa7d30322a10c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2014-2015 Isis Innovation Limited and the authors of gSLICr #include "gSLICr_seg_engine_GPU.h" #include "gSLICr_seg_engine_shared.h" using namespace std; using namespace gSLICr; using namespace gSLICr::objects; using namespace gSLICr::engines; // ---------------------------------------------------- // // kernel function defines // // ---------------------------------------------------- __global__ void Cvt_Img_Space_device(const Vector4u* inimg, Vector4f* outimg, Vector2i img_size, COLOR_SPACE color_space); __global__ void Enforce_Connectivity_device(const int* in_idx_img, int* out_idx_img, Vector2i img_size); __global__ void Init_Cluster_Centers_device(const Vector4f* inimg, spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size); __global__ void Find_Center_Association_device(const Vector4f* inimg, const spixel_info* in_spixel_map, int* out_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist); __global__ void Update_Cluster_Center_device(const Vector4f* inimg, const int* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line); __global__ void Finalize_Reduction_Result_device(const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size, int no_blocks_per_spixel); __global__ void Draw_Segmentation_Result_device(const int* idx_img, Vector4u* sourceimg, Vector4u* outimg, Vector2i img_size); // ---------------------------------------------------- // // host function implementations // // ---------------------------------------------------- seg_engine_GPU::seg_engine_GPU(const settings& in_settings) : seg_engine(in_settings) { source_img = new UChar4Image(in_settings.img_size,true,true); cvt_img = new Float4Image(in_settings.img_size, true, true); idx_img = new IntImage(in_settings.img_size, true, true); tmp_idx_img = new IntImage(in_settings.img_size, true, true); if (in_settings.seg_method == GIVEN_NUM) { float cluster_size = (float)(in_settings.img_size.x * in_settings.img_size.y) / (float)in_settings.no_segs; spixel_size = (int)ceil(sqrtf(cluster_size)); } else { spixel_size = in_settings.spixel_size; } int spixel_per_col = (int)ceil(in_settings.img_size.x / spixel_size); int spixel_per_row = (int)ceil(in_settings.img_size.y / spixel_size); Vector2i map_size = Vector2i(spixel_per_col, spixel_per_row); spixel_map = new SpixelMap(map_size, true, true); float total_pixel_to_search = (float)(spixel_size * spixel_size * 9); no_grid_per_center = (int)ceil(total_pixel_to_search / (float)(BLOCK_DIM * BLOCK_DIM)); map_size.x *= no_grid_per_center; accum_map = new ORUtils::Image<spixel_info>(map_size, true, true); // normalizing factors max_xy_dist = 1.0f / (1.4242f * spixel_size); // sqrt(2) * spixel_size switch (in_settings.color_space) { case RGB: max_color_dist = 5.0f / (1.7321f * 255); break; case XYZ: max_color_dist = 5.0f / 1.7321f; break; case CIELAB: max_color_dist = 15.0f / (1.7321f * 128); break; } max_color_dist *= max_color_dist; max_xy_dist *= max_xy_dist; } gSLICr::engines::seg_engine_GPU::~seg_engine_GPU() { delete accum_map; } void gSLICr::engines::seg_engine_GPU::Cvt_Img_Space(UChar4Image* inimg, Float4Image* outimg, COLOR_SPACE color_space) { Vector4u* inimg_ptr = inimg->GetData(MEMORYDEVICE_CUDA); Vector4f* outimg_ptr = outimg->GetData(MEMORYDEVICE_CUDA); Vector2i img_size = inimg->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); Cvt_Img_Space_device << <gridSize, blockSize >> >(inimg_ptr, outimg_ptr, img_size, color_space); } void gSLICr::engines::seg_engine_GPU::Init_Cluster_Centers() { spixel_info* spixel_list = spixel_map->GetData(MEMORYDEVICE_CUDA); Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA); Vector2i map_size = spixel_map->noDims; Vector2i img_size = cvt_img->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)map_size.x / (float)blockSize.x), (int)ceil((float)map_size.y / (float)blockSize.y)); Init_Cluster_Centers_device << <gridSize, blockSize >> >(img_ptr, spixel_list, map_size, img_size, spixel_size); } void gSLICr::engines::seg_engine_GPU::Find_Center_Association() { spixel_info* spixel_list = spixel_map->GetData(MEMORYDEVICE_CUDA); Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA); int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA); Vector2i map_size = spixel_map->noDims; Vector2i img_size = cvt_img->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); Find_Center_Association_device << <gridSize, blockSize >> >(img_ptr, spixel_list, idx_ptr, map_size, img_size, spixel_size, gSLICr_settings.coh_weight,max_xy_dist,max_color_dist); } void gSLICr::engines::seg_engine_GPU::Update_Cluster_Center() { spixel_info* accum_map_ptr = accum_map->GetData(MEMORYDEVICE_CUDA); spixel_info* spixel_list_ptr = spixel_map->GetData(MEMORYDEVICE_CUDA); Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA); int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA); Vector2i map_size = spixel_map->noDims; Vector2i img_size = cvt_img->noDims; int no_blocks_per_line = spixel_size * 3 / BLOCK_DIM; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize(map_size.x, map_size.y, no_grid_per_center); hipLaunchKernelGGL(( Update_Cluster_Center_device), dim3(gridSize),dim3(blockSize), 0, 0, img_ptr, idx_ptr, accum_map_ptr, map_size, img_size, spixel_size, no_blocks_per_line); dim3 gridSize2(map_size.x, map_size.y); hipLaunchKernelGGL(( Finalize_Reduction_Result_device), dim3(gridSize2),dim3(blockSize), 0, 0, accum_map_ptr, spixel_list_ptr, map_size, no_grid_per_center); } void gSLICr::engines::seg_engine_GPU::Enforce_Connectivity() { int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA); int* tmp_idx_ptr = tmp_idx_img->GetData(MEMORYDEVICE_CUDA); Vector2i img_size = idx_img->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); Enforce_Connectivity_device << <gridSize, blockSize >> >(idx_ptr, tmp_idx_ptr, img_size); Enforce_Connectivity_device << <gridSize, blockSize >> >(tmp_idx_ptr, idx_ptr, img_size); } void gSLICr::engines::seg_engine_GPU::Draw_Segmentation_Result(UChar4Image* out_img) { Vector4u* inimg_ptr = source_img->GetData(MEMORYDEVICE_CUDA); Vector4u* outimg_ptr = out_img->GetData(MEMORYDEVICE_CUDA); int* idx_img_ptr = idx_img->GetData(MEMORYDEVICE_CUDA); Vector2i img_size = idx_img->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); hipLaunchKernelGGL(( Draw_Segmentation_Result_device), dim3(gridSize),dim3(blockSize), 0, 0, idx_img_ptr, inimg_ptr, outimg_ptr, img_size); out_img->UpdateHostFromDevice(); } // ---------------------------------------------------- // // device function implementations // // ---------------------------------------------------- __global__ void Cvt_Img_Space_device(const Vector4u* inimg, Vector4f* outimg, Vector2i img_size, COLOR_SPACE color_space) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > img_size.x - 1 || y > img_size.y - 1) return; //Image space convertion cvt_img_space_shared(inimg, outimg, img_size, x, y, color_space); } __global__ void Draw_Segmentation_Result_device(const int* idx_img, Vector4u* sourceimg, Vector4u* outimg, Vector2i img_size) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x == 0 || y == 0 || x > img_size.x - 2 || y > img_size.y - 2) return; //Lines creation/drawing draw_superpixel_boundry_shared(idx_img, sourceimg, outimg, img_size, x, y); } __global__ void Init_Cluster_Centers_device(const Vector4f* inimg, spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > map_size.x - 1 || y > map_size.y - 1) return; //Cluster center creation init_cluster_centers_shared(inimg, out_spixel, map_size, img_size, spixel_size, x, y); } __global__ void Find_Center_Association_device(const Vector4f* inimg, const spixel_info* in_spixel_map, int* out_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > img_size.x - 1 || y > img_size.y - 1) return; find_center_association_shared(inimg, in_spixel_map, out_idx_img, map_size, img_size, spixel_size, weight, x, y,max_xy_dist,max_color_dist); } __global__ void Update_Cluster_Center_device(const Vector4f* inimg, const int* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line) { int local_id = threadIdx.y * blockDim.x + threadIdx.x; __shared__ Vector4f color_shared[BLOCK_DIM*BLOCK_DIM]; __shared__ Vector2f xy_shared[BLOCK_DIM*BLOCK_DIM]; __shared__ int count_shared[BLOCK_DIM*BLOCK_DIM]; __shared__ bool should_add; color_shared[local_id] = Vector4f(0, 0, 0, 0); xy_shared[local_id] = Vector2f(0, 0); count_shared[local_id] = 0; should_add = false; __syncthreads(); int no_blocks_per_spixel = gridDim.z; int spixel_id = blockIdx.y * map_size.x + blockIdx.x; // compute the relative position in the search window int block_x = blockIdx.z % no_blocks_per_line; int block_y = blockIdx.z / no_blocks_per_line; int x_offset = block_x * BLOCK_DIM + threadIdx.x; int y_offset = block_y * BLOCK_DIM + threadIdx.y; if (x_offset < spixel_size * 3 && y_offset < spixel_size * 3) { // compute the start of the search window int x_start = blockIdx.x * spixel_size - spixel_size; int y_start = blockIdx.y * spixel_size - spixel_size; int x_img = x_start + x_offset; int y_img = y_start + y_offset; if (x_img >= 0 && x_img < img_size.x && y_img >= 0 && y_img < img_size.y) { int img_idx = y_img * img_size.x + x_img; if (in_idx_img[img_idx] == spixel_id) { color_shared[local_id] = inimg[img_idx]; xy_shared[local_id] = Vector2f(x_img, y_img); count_shared[local_id] = 1; should_add = true; } } } __syncthreads(); if (should_add) { if (local_id < 128) { color_shared[local_id] += color_shared[local_id + 128]; xy_shared[local_id] += xy_shared[local_id + 128]; count_shared[local_id] += count_shared[local_id + 128]; } __syncthreads(); if (local_id < 64) { color_shared[local_id] += color_shared[local_id + 64]; xy_shared[local_id] += xy_shared[local_id + 64]; count_shared[local_id] += count_shared[local_id + 64]; } __syncthreads(); if (local_id < 32) { color_shared[local_id] += color_shared[local_id + 32]; color_shared[local_id] += color_shared[local_id + 16]; color_shared[local_id] += color_shared[local_id + 8]; color_shared[local_id] += color_shared[local_id + 4]; color_shared[local_id] += color_shared[local_id + 2]; color_shared[local_id] += color_shared[local_id + 1]; xy_shared[local_id] += xy_shared[local_id + 32]; xy_shared[local_id] += xy_shared[local_id + 16]; xy_shared[local_id] += xy_shared[local_id + 8]; xy_shared[local_id] += xy_shared[local_id + 4]; xy_shared[local_id] += xy_shared[local_id + 2]; xy_shared[local_id] += xy_shared[local_id + 1]; count_shared[local_id] += count_shared[local_id + 32]; count_shared[local_id] += count_shared[local_id + 16]; count_shared[local_id] += count_shared[local_id + 8]; count_shared[local_id] += count_shared[local_id + 4]; count_shared[local_id] += count_shared[local_id + 2]; count_shared[local_id] += count_shared[local_id + 1]; } } __syncthreads(); if (local_id == 0) { int accum_map_idx = spixel_id * no_blocks_per_spixel + blockIdx.z; accum_map[accum_map_idx].center = xy_shared[0]; accum_map[accum_map_idx].color_info = color_shared[0]; accum_map[accum_map_idx].no_pixels = count_shared[0]; } } __global__ void Finalize_Reduction_Result_device(const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size, int no_blocks_per_spixel) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > map_size.x - 1 || y > map_size.y - 1) return; finalize_reduction_result_shared(accum_map, spixel_list, map_size, no_blocks_per_spixel, x, y); } __global__ void Enforce_Connectivity_device(const int* in_idx_img, int* out_idx_img, Vector2i img_size) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > img_size.x - 1 || y > img_size.y - 1) return; supress_local_lable(in_idx_img, out_idx_img, img_size, x, y); }
4c2c42d508276b8646f0b72547ffa7d30322a10c.cu
// Copyright 2014-2015 Isis Innovation Limited and the authors of gSLICr #include "gSLICr_seg_engine_GPU.h" #include "gSLICr_seg_engine_shared.h" using namespace std; using namespace gSLICr; using namespace gSLICr::objects; using namespace gSLICr::engines; // ---------------------------------------------------- // // kernel function defines // // ---------------------------------------------------- __global__ void Cvt_Img_Space_device(const Vector4u* inimg, Vector4f* outimg, Vector2i img_size, COLOR_SPACE color_space); __global__ void Enforce_Connectivity_device(const int* in_idx_img, int* out_idx_img, Vector2i img_size); __global__ void Init_Cluster_Centers_device(const Vector4f* inimg, spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size); __global__ void Find_Center_Association_device(const Vector4f* inimg, const spixel_info* in_spixel_map, int* out_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist); __global__ void Update_Cluster_Center_device(const Vector4f* inimg, const int* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line); __global__ void Finalize_Reduction_Result_device(const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size, int no_blocks_per_spixel); __global__ void Draw_Segmentation_Result_device(const int* idx_img, Vector4u* sourceimg, Vector4u* outimg, Vector2i img_size); // ---------------------------------------------------- // // host function implementations // // ---------------------------------------------------- seg_engine_GPU::seg_engine_GPU(const settings& in_settings) : seg_engine(in_settings) { source_img = new UChar4Image(in_settings.img_size,true,true); cvt_img = new Float4Image(in_settings.img_size, true, true); idx_img = new IntImage(in_settings.img_size, true, true); tmp_idx_img = new IntImage(in_settings.img_size, true, true); if (in_settings.seg_method == GIVEN_NUM) { float cluster_size = (float)(in_settings.img_size.x * in_settings.img_size.y) / (float)in_settings.no_segs; spixel_size = (int)ceil(sqrtf(cluster_size)); } else { spixel_size = in_settings.spixel_size; } int spixel_per_col = (int)ceil(in_settings.img_size.x / spixel_size); int spixel_per_row = (int)ceil(in_settings.img_size.y / spixel_size); Vector2i map_size = Vector2i(spixel_per_col, spixel_per_row); spixel_map = new SpixelMap(map_size, true, true); float total_pixel_to_search = (float)(spixel_size * spixel_size * 9); no_grid_per_center = (int)ceil(total_pixel_to_search / (float)(BLOCK_DIM * BLOCK_DIM)); map_size.x *= no_grid_per_center; accum_map = new ORUtils::Image<spixel_info>(map_size, true, true); // normalizing factors max_xy_dist = 1.0f / (1.4242f * spixel_size); // sqrt(2) * spixel_size switch (in_settings.color_space) { case RGB: max_color_dist = 5.0f / (1.7321f * 255); break; case XYZ: max_color_dist = 5.0f / 1.7321f; break; case CIELAB: max_color_dist = 15.0f / (1.7321f * 128); break; } max_color_dist *= max_color_dist; max_xy_dist *= max_xy_dist; } gSLICr::engines::seg_engine_GPU::~seg_engine_GPU() { delete accum_map; } void gSLICr::engines::seg_engine_GPU::Cvt_Img_Space(UChar4Image* inimg, Float4Image* outimg, COLOR_SPACE color_space) { Vector4u* inimg_ptr = inimg->GetData(MEMORYDEVICE_CUDA); Vector4f* outimg_ptr = outimg->GetData(MEMORYDEVICE_CUDA); Vector2i img_size = inimg->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); Cvt_Img_Space_device << <gridSize, blockSize >> >(inimg_ptr, outimg_ptr, img_size, color_space); } void gSLICr::engines::seg_engine_GPU::Init_Cluster_Centers() { spixel_info* spixel_list = spixel_map->GetData(MEMORYDEVICE_CUDA); Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA); Vector2i map_size = spixel_map->noDims; Vector2i img_size = cvt_img->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)map_size.x / (float)blockSize.x), (int)ceil((float)map_size.y / (float)blockSize.y)); Init_Cluster_Centers_device << <gridSize, blockSize >> >(img_ptr, spixel_list, map_size, img_size, spixel_size); } void gSLICr::engines::seg_engine_GPU::Find_Center_Association() { spixel_info* spixel_list = spixel_map->GetData(MEMORYDEVICE_CUDA); Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA); int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA); Vector2i map_size = spixel_map->noDims; Vector2i img_size = cvt_img->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); Find_Center_Association_device << <gridSize, blockSize >> >(img_ptr, spixel_list, idx_ptr, map_size, img_size, spixel_size, gSLICr_settings.coh_weight,max_xy_dist,max_color_dist); } void gSLICr::engines::seg_engine_GPU::Update_Cluster_Center() { spixel_info* accum_map_ptr = accum_map->GetData(MEMORYDEVICE_CUDA); spixel_info* spixel_list_ptr = spixel_map->GetData(MEMORYDEVICE_CUDA); Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA); int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA); Vector2i map_size = spixel_map->noDims; Vector2i img_size = cvt_img->noDims; int no_blocks_per_line = spixel_size * 3 / BLOCK_DIM; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize(map_size.x, map_size.y, no_grid_per_center); Update_Cluster_Center_device<<<gridSize,blockSize>>>(img_ptr, idx_ptr, accum_map_ptr, map_size, img_size, spixel_size, no_blocks_per_line); dim3 gridSize2(map_size.x, map_size.y); Finalize_Reduction_Result_device<<<gridSize2,blockSize>>>(accum_map_ptr, spixel_list_ptr, map_size, no_grid_per_center); } void gSLICr::engines::seg_engine_GPU::Enforce_Connectivity() { int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA); int* tmp_idx_ptr = tmp_idx_img->GetData(MEMORYDEVICE_CUDA); Vector2i img_size = idx_img->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); Enforce_Connectivity_device << <gridSize, blockSize >> >(idx_ptr, tmp_idx_ptr, img_size); Enforce_Connectivity_device << <gridSize, blockSize >> >(tmp_idx_ptr, idx_ptr, img_size); } void gSLICr::engines::seg_engine_GPU::Draw_Segmentation_Result(UChar4Image* out_img) { Vector4u* inimg_ptr = source_img->GetData(MEMORYDEVICE_CUDA); Vector4u* outimg_ptr = out_img->GetData(MEMORYDEVICE_CUDA); int* idx_img_ptr = idx_img->GetData(MEMORYDEVICE_CUDA); Vector2i img_size = idx_img->noDims; dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); Draw_Segmentation_Result_device<<<gridSize,blockSize>>>(idx_img_ptr, inimg_ptr, outimg_ptr, img_size); out_img->UpdateHostFromDevice(); } // ---------------------------------------------------- // // device function implementations // // ---------------------------------------------------- __global__ void Cvt_Img_Space_device(const Vector4u* inimg, Vector4f* outimg, Vector2i img_size, COLOR_SPACE color_space) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > img_size.x - 1 || y > img_size.y - 1) return; //Image space convertion cvt_img_space_shared(inimg, outimg, img_size, x, y, color_space); } __global__ void Draw_Segmentation_Result_device(const int* idx_img, Vector4u* sourceimg, Vector4u* outimg, Vector2i img_size) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x == 0 || y == 0 || x > img_size.x - 2 || y > img_size.y - 2) return; //Lines creation/drawing draw_superpixel_boundry_shared(idx_img, sourceimg, outimg, img_size, x, y); } __global__ void Init_Cluster_Centers_device(const Vector4f* inimg, spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > map_size.x - 1 || y > map_size.y - 1) return; //Cluster center creation init_cluster_centers_shared(inimg, out_spixel, map_size, img_size, spixel_size, x, y); } __global__ void Find_Center_Association_device(const Vector4f* inimg, const spixel_info* in_spixel_map, int* out_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > img_size.x - 1 || y > img_size.y - 1) return; find_center_association_shared(inimg, in_spixel_map, out_idx_img, map_size, img_size, spixel_size, weight, x, y,max_xy_dist,max_color_dist); } __global__ void Update_Cluster_Center_device(const Vector4f* inimg, const int* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line) { int local_id = threadIdx.y * blockDim.x + threadIdx.x; __shared__ Vector4f color_shared[BLOCK_DIM*BLOCK_DIM]; __shared__ Vector2f xy_shared[BLOCK_DIM*BLOCK_DIM]; __shared__ int count_shared[BLOCK_DIM*BLOCK_DIM]; __shared__ bool should_add; color_shared[local_id] = Vector4f(0, 0, 0, 0); xy_shared[local_id] = Vector2f(0, 0); count_shared[local_id] = 0; should_add = false; __syncthreads(); int no_blocks_per_spixel = gridDim.z; int spixel_id = blockIdx.y * map_size.x + blockIdx.x; // compute the relative position in the search window int block_x = blockIdx.z % no_blocks_per_line; int block_y = blockIdx.z / no_blocks_per_line; int x_offset = block_x * BLOCK_DIM + threadIdx.x; int y_offset = block_y * BLOCK_DIM + threadIdx.y; if (x_offset < spixel_size * 3 && y_offset < spixel_size * 3) { // compute the start of the search window int x_start = blockIdx.x * spixel_size - spixel_size; int y_start = blockIdx.y * spixel_size - spixel_size; int x_img = x_start + x_offset; int y_img = y_start + y_offset; if (x_img >= 0 && x_img < img_size.x && y_img >= 0 && y_img < img_size.y) { int img_idx = y_img * img_size.x + x_img; if (in_idx_img[img_idx] == spixel_id) { color_shared[local_id] = inimg[img_idx]; xy_shared[local_id] = Vector2f(x_img, y_img); count_shared[local_id] = 1; should_add = true; } } } __syncthreads(); if (should_add) { if (local_id < 128) { color_shared[local_id] += color_shared[local_id + 128]; xy_shared[local_id] += xy_shared[local_id + 128]; count_shared[local_id] += count_shared[local_id + 128]; } __syncthreads(); if (local_id < 64) { color_shared[local_id] += color_shared[local_id + 64]; xy_shared[local_id] += xy_shared[local_id + 64]; count_shared[local_id] += count_shared[local_id + 64]; } __syncthreads(); if (local_id < 32) { color_shared[local_id] += color_shared[local_id + 32]; color_shared[local_id] += color_shared[local_id + 16]; color_shared[local_id] += color_shared[local_id + 8]; color_shared[local_id] += color_shared[local_id + 4]; color_shared[local_id] += color_shared[local_id + 2]; color_shared[local_id] += color_shared[local_id + 1]; xy_shared[local_id] += xy_shared[local_id + 32]; xy_shared[local_id] += xy_shared[local_id + 16]; xy_shared[local_id] += xy_shared[local_id + 8]; xy_shared[local_id] += xy_shared[local_id + 4]; xy_shared[local_id] += xy_shared[local_id + 2]; xy_shared[local_id] += xy_shared[local_id + 1]; count_shared[local_id] += count_shared[local_id + 32]; count_shared[local_id] += count_shared[local_id + 16]; count_shared[local_id] += count_shared[local_id + 8]; count_shared[local_id] += count_shared[local_id + 4]; count_shared[local_id] += count_shared[local_id + 2]; count_shared[local_id] += count_shared[local_id + 1]; } } __syncthreads(); if (local_id == 0) { int accum_map_idx = spixel_id * no_blocks_per_spixel + blockIdx.z; accum_map[accum_map_idx].center = xy_shared[0]; accum_map[accum_map_idx].color_info = color_shared[0]; accum_map[accum_map_idx].no_pixels = count_shared[0]; } } __global__ void Finalize_Reduction_Result_device(const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size, int no_blocks_per_spixel) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > map_size.x - 1 || y > map_size.y - 1) return; finalize_reduction_result_shared(accum_map, spixel_list, map_size, no_blocks_per_spixel, x, y); } __global__ void Enforce_Connectivity_device(const int* in_idx_img, int* out_idx_img, Vector2i img_size) { int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if (x > img_size.x - 1 || y > img_size.y - 1) return; supress_local_lable(in_idx_img, out_idx_img, img_size, x, y); }
0e83c91f484b1e1c84f902c9a692530be822364c.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/layers/crowd_data_layer.hpp" namespace caffe { template <typename Dtype> void CrowdDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty"); // 1. Reshape to loaded crowd image and copy it to top[0]. top[0]->ReshapeLike(batch->data_); caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); // 2. Reshape to loaded density map and copy it to top[1]. top[1]->ReshapeLike(batch->label_); caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); DLOG(INFO) << "Prefetch copied"; // 3. Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); this->prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(CrowdDataLayer); } // namespace caffe
0e83c91f484b1e1c84f902c9a692530be822364c.cu
#include <vector> #include "caffe/layers/crowd_data_layer.hpp" namespace caffe { template <typename Dtype> void CrowdDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty"); // 1. Reshape to loaded crowd image and copy it to top[0]. top[0]->ReshapeLike(batch->data_); caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); // 2. Reshape to loaded density map and copy it to top[1]. top[1]->ReshapeLike(batch->label_); caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); DLOG(INFO) << "Prefetch copied"; // 3. Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); this->prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(CrowdDataLayer); } // namespace caffe
9866d319523a0a280eb4d0ae132adf5a3694b060.hip
// !!! This is a file automatically generated by hipify!!! /* Fractal code for CS 4380 / CS 5351 Copyright (c) 2019 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <cstdio> #include <cmath> #include <hip/hip_runtime.h> static const int ThreadsPerBlock = 512; static __global__ void FractalKernel(const int width, const int start_frame, const int gpu_frames, unsigned char* const pic) { const double Delta = 0.006; const double xMid = 0.232977; const double yMid = 0.550325; // todo: use the GPU to compute the requested frames (base the code on the previous project) const int idx = threadIdx.x + blockIdx.x * blockDim.x + (start_frame * width * width); if (idx < ((gpu_frames + start_frame) * width * width)) { const int frame = idx / (width * width); const int row = (idx / width) % width; const int col = idx % width; const double delta = Delta * pow(0.98, frame); const double xMin = xMid - delta; const double yMin = yMid - delta; const double dw = 2.0 * delta / width; const double cy = yMin + row * dw; const double cx = xMin + col * dw; double x = cx; double y = cy; double x2, y2; int depth = 256; do { x2 = x * x; y2 = y * y; y = 2 * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0)); pic[(frame - start_frame) * width * width + row * width + col] = (unsigned char)depth; } } unsigned char* GPU_Init(const int gpu_frames, const int width) { unsigned char* pic_d; if (hipSuccess != hipMalloc((void **)&pic_d, gpu_frames * width * width * sizeof(unsigned char))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} return pic_d; } void GPU_Exec(const int start_frame, const int gpu_frames, const int width, unsigned char* pic_d) { // todo: launch the kernel with ThreadsPerBlock and the appropriate number of blocks (do not wait for the kernel to finish) const int size = gpu_frames * width * width * sizeof(unsigned char); hipLaunchKernelGGL(( FractalKernel), dim3((size + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, start_frame, gpu_frames, width, pic_d); } void GPU_Fini(const int gpu_frames, const int width, unsigned char* pic, unsigned char* pic_d) { // todo: copy the result from the device to the host and free the device memory if (hipSuccess != hipMemcpy(pic, pic_d, gpu_frames * width * width * sizeof(unsigned char), hipMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} hipFree(pic_d); }
9866d319523a0a280eb4d0ae132adf5a3694b060.cu
/* Fractal code for CS 4380 / CS 5351 Copyright (c) 2019 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <cstdio> #include <cmath> #include <cuda.h> static const int ThreadsPerBlock = 512; static __global__ void FractalKernel(const int width, const int start_frame, const int gpu_frames, unsigned char* const pic) { const double Delta = 0.006; const double xMid = 0.232977; const double yMid = 0.550325; // todo: use the GPU to compute the requested frames (base the code on the previous project) const int idx = threadIdx.x + blockIdx.x * blockDim.x + (start_frame * width * width); if (idx < ((gpu_frames + start_frame) * width * width)) { const int frame = idx / (width * width); const int row = (idx / width) % width; const int col = idx % width; const double delta = Delta * pow(0.98, frame); const double xMin = xMid - delta; const double yMin = yMid - delta; const double dw = 2.0 * delta / width; const double cy = yMin + row * dw; const double cx = xMin + col * dw; double x = cx; double y = cy; double x2, y2; int depth = 256; do { x2 = x * x; y2 = y * y; y = 2 * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0)); pic[(frame - start_frame) * width * width + row * width + col] = (unsigned char)depth; } } unsigned char* GPU_Init(const int gpu_frames, const int width) { unsigned char* pic_d; if (cudaSuccess != cudaMalloc((void **)&pic_d, gpu_frames * width * width * sizeof(unsigned char))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} return pic_d; } void GPU_Exec(const int start_frame, const int gpu_frames, const int width, unsigned char* pic_d) { // todo: launch the kernel with ThreadsPerBlock and the appropriate number of blocks (do not wait for the kernel to finish) const int size = gpu_frames * width * width * sizeof(unsigned char); FractalKernel<<<(size + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(start_frame, gpu_frames, width, pic_d); } void GPU_Fini(const int gpu_frames, const int width, unsigned char* pic, unsigned char* pic_d) { // todo: copy the result from the device to the host and free the device memory if (cudaSuccess != cudaMemcpy(pic, pic_d, gpu_frames * width * width * sizeof(unsigned char), cudaMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} cudaFree(pic_d); }
a991850f4b3ce2e6d7a9efff0bc60c5f095be157.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/concatenate.cuh> #include <cudf/detail/gather.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/search.hpp> #include <cudf/detail/stream_compaction.hpp> #include <cudf/detail/unary.hpp> #include <cudf/dictionary/detail/encode.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/dictionary/update_keys.hpp> #include <cudf/stream_compaction.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <rmm/thrust_rmm_allocator.h> namespace cudf { namespace dictionary { namespace detail { /** * @brief Create a new dictionary column by adding the new keys elements * to the existing dictionary_column. * * ``` * Example: * d1 = {[a, b, c, d, f], {4, 0, 3, 1, 2, 2, 2, 4, 0}} * d2 = add_keys( d1, [d, b, e] ) * d2 is now {[a, b, c, d, e, f], [5, 0, 3, 1, 2, 2, 2, 5, 0]} * ``` * */ std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column, column_view const& new_keys, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), hipStream_t stream = 0) { CUDF_EXPECTS(!new_keys.has_nulls(), "Keys must not have nulls"); auto old_keys = dictionary_column.keys(); // [a,b,c,d,f] CUDF_EXPECTS(new_keys.type() == old_keys.type(), "Keys must be the same type"); // first, concatenate the keys together // [a,b,c,d,f] + [d,b,e] = [a,b,c,d,f,d,b,e] auto combined_keys = cudf::detail::concatenate( std::vector<column_view>{old_keys, new_keys}, rmm::mr::get_current_device_resource(), stream); // sort and remove any duplicates from the combined keys // drop_duplicates([a,b,c,d,f,d,b,e]) = [a,b,c,d,e,f] auto table_keys = cudf::detail::drop_duplicates(table_view{{*combined_keys}}, std::vector<size_type>{0}, // only one key column duplicate_keep_option::KEEP_FIRST, null_equality::EQUAL, mr, stream) ->release(); std::unique_ptr<column> keys_column(std::move(table_keys.front())); // create a map for the indices // lower_bound([a,b,c,d,e,f],[a,b,c,d,f]) = [0,1,2,3,5] auto map_indices = cudf::detail::lower_bound( table_view{{keys_column->view()}}, table_view{{old_keys}}, std::vector<order>{order::ASCENDING}, std::vector<null_order>{null_order::AFTER}, // should be no nulls here mr, stream); // now create the indices column -- map old values to the new ones // gather([4,0,3,1,2,2,2,4,0],[0,1,2,3,5]) = [5,0,3,1,2,2,2,5,0] column_view indices_view(dictionary_column.indices().type(), dictionary_column.size(), dictionary_column.indices().head(), nullptr, 0, dictionary_column.offset()); // the result may contain nulls if the input contains nulls // and the corresponding index is therefore invalid/undefined auto table_indices = cudf::detail::gather(table_view{{map_indices->view()}}, indices_view, cudf::detail::out_of_bounds_policy::IGNORE, cudf::detail::negative_index_policy::NOT_ALLOWED, mr, stream) ->release(); // The output of lower_bound is INT32 but we need to convert to unsigned indices. auto const indices_type = get_indices_type_for_size(keys_column->size()); auto indices_column = [&] { column_view gather_result = table_indices.front()->view(); auto const indices_size = gather_result.size(); // we can just use the lower-bound/gather data directly for UINT32 case if (indices_type.id() == type_id::UINT32) { auto contents = table_indices.front()->release(); return std::make_unique<column>(data_type{type_id::UINT32}, indices_size, std::move(*(contents.data.release())), rmm::device_buffer{0, stream, mr}, 0); } // otherwise we need to convert the gather result column_view cast_view(gather_result.type(), indices_size, gather_result.head(), nullptr, 0); return cudf::detail::cast(cast_view, indices_type, mr, stream); }(); // create new dictionary column with keys_column and indices_column // null mask has not changed return make_dictionary_column(std::move(keys_column), std::move(indices_column), copy_bitmask(dictionary_column.parent(), stream, mr), dictionary_column.null_count()); } } // namespace detail std::unique_ptr<column> add_keys(dictionary_column_view const& dictionary_column, column_view const& keys, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::add_keys(dictionary_column, keys, mr); } } // namespace dictionary } // namespace cudf
a991850f4b3ce2e6d7a9efff0bc60c5f095be157.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/concatenate.cuh> #include <cudf/detail/gather.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/search.hpp> #include <cudf/detail/stream_compaction.hpp> #include <cudf/detail/unary.hpp> #include <cudf/dictionary/detail/encode.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/dictionary/update_keys.hpp> #include <cudf/stream_compaction.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <rmm/thrust_rmm_allocator.h> namespace cudf { namespace dictionary { namespace detail { /** * @brief Create a new dictionary column by adding the new keys elements * to the existing dictionary_column. * * ``` * Example: * d1 = {[a, b, c, d, f], {4, 0, 3, 1, 2, 2, 2, 4, 0}} * d2 = add_keys( d1, [d, b, e] ) * d2 is now {[a, b, c, d, e, f], [5, 0, 3, 1, 2, 2, 2, 5, 0]} * ``` * */ std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column, column_view const& new_keys, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), cudaStream_t stream = 0) { CUDF_EXPECTS(!new_keys.has_nulls(), "Keys must not have nulls"); auto old_keys = dictionary_column.keys(); // [a,b,c,d,f] CUDF_EXPECTS(new_keys.type() == old_keys.type(), "Keys must be the same type"); // first, concatenate the keys together // [a,b,c,d,f] + [d,b,e] = [a,b,c,d,f,d,b,e] auto combined_keys = cudf::detail::concatenate( std::vector<column_view>{old_keys, new_keys}, rmm::mr::get_current_device_resource(), stream); // sort and remove any duplicates from the combined keys // drop_duplicates([a,b,c,d,f,d,b,e]) = [a,b,c,d,e,f] auto table_keys = cudf::detail::drop_duplicates(table_view{{*combined_keys}}, std::vector<size_type>{0}, // only one key column duplicate_keep_option::KEEP_FIRST, null_equality::EQUAL, mr, stream) ->release(); std::unique_ptr<column> keys_column(std::move(table_keys.front())); // create a map for the indices // lower_bound([a,b,c,d,e,f],[a,b,c,d,f]) = [0,1,2,3,5] auto map_indices = cudf::detail::lower_bound( table_view{{keys_column->view()}}, table_view{{old_keys}}, std::vector<order>{order::ASCENDING}, std::vector<null_order>{null_order::AFTER}, // should be no nulls here mr, stream); // now create the indices column -- map old values to the new ones // gather([4,0,3,1,2,2,2,4,0],[0,1,2,3,5]) = [5,0,3,1,2,2,2,5,0] column_view indices_view(dictionary_column.indices().type(), dictionary_column.size(), dictionary_column.indices().head(), nullptr, 0, dictionary_column.offset()); // the result may contain nulls if the input contains nulls // and the corresponding index is therefore invalid/undefined auto table_indices = cudf::detail::gather(table_view{{map_indices->view()}}, indices_view, cudf::detail::out_of_bounds_policy::IGNORE, cudf::detail::negative_index_policy::NOT_ALLOWED, mr, stream) ->release(); // The output of lower_bound is INT32 but we need to convert to unsigned indices. auto const indices_type = get_indices_type_for_size(keys_column->size()); auto indices_column = [&] { column_view gather_result = table_indices.front()->view(); auto const indices_size = gather_result.size(); // we can just use the lower-bound/gather data directly for UINT32 case if (indices_type.id() == type_id::UINT32) { auto contents = table_indices.front()->release(); return std::make_unique<column>(data_type{type_id::UINT32}, indices_size, std::move(*(contents.data.release())), rmm::device_buffer{0, stream, mr}, 0); } // otherwise we need to convert the gather result column_view cast_view(gather_result.type(), indices_size, gather_result.head(), nullptr, 0); return cudf::detail::cast(cast_view, indices_type, mr, stream); }(); // create new dictionary column with keys_column and indices_column // null mask has not changed return make_dictionary_column(std::move(keys_column), std::move(indices_column), copy_bitmask(dictionary_column.parent(), stream, mr), dictionary_column.null_count()); } } // namespace detail std::unique_ptr<column> add_keys(dictionary_column_view const& dictionary_column, column_view const& keys, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::add_keys(dictionary_column, keys, mr); } } // namespace dictionary } // namespace cudf
0dd0a0bef15d194b3c77b9960232044de3e84c20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from dgemm_tesla_T_T_special.cu normal d -> s, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #include "commonblas_s.h" /* * saxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void saxpy( float alpha, const float* __restrict__ b, float* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A^T*B^T + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This kernel is for matrices divisible by the corresponding blocking sizes. @ingroup magma_sblas3 ********************************************************************/ __global__ void sgemm_kernel_T_T_64_16_16_16_4_special( float* __restrict__ C, const float* __restrict__ A, const float* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta ) { __shared__ float Bb[16][17]; const int tx = threadIdx.x; const int ty = threadIdx.y; int iby = ((blockIdx.y + blockIdx.x) % (n/16))*16; const int idt = ty * 16 + tx; int ibx = blockIdx.x * 64+idt; //int iby = blockIdx.y * 16; A += ibx; B += tx + __mul24(iby+ty, ldb); C += __mul24(ibx, ldc) + iby; const float *Bend = B + k; float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; Bb[tx][ty+0 ] = B[0*ldb]; Bb[tx][ty+4 ] = B[4*ldb]; Bb[tx][ty+8 ] = B[8*ldb]; Bb[tx][ty+12] = B[12*ldb]; __syncthreads(); A += 4 * lda; saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[12][0], Cb ); saxpy( Ab[1], &Bb[13][0], Cb ); saxpy( Ab[2], &Bb[14][0], Cb ); saxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); #pragma unroll 16 for(int i = 0; i < 16; i++) { C[i] = alpha * Cb[i] + beta * C[i]; } } extern "C" void magmablas_sgemm_T_T_64_16_16_16_4_special( float *C, const float *A, const float *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, float alpha, float beta ) { dim3 threads( 16, 4 ); dim3 grid( m/64, n/16 ); hipLaunchKernelGGL(( sgemm_kernel_T_T_64_16_16_16_4_special), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
0dd0a0bef15d194b3c77b9960232044de3e84c20.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from dgemm_tesla_T_T_special.cu normal d -> s, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #include "commonblas_s.h" /* * saxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void saxpy( float alpha, const float* __restrict__ b, float* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A^T*B^T + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This kernel is for matrices divisible by the corresponding blocking sizes. @ingroup magma_sblas3 ********************************************************************/ __global__ void sgemm_kernel_T_T_64_16_16_16_4_special( float* __restrict__ C, const float* __restrict__ A, const float* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta ) { __shared__ float Bb[16][17]; const int tx = threadIdx.x; const int ty = threadIdx.y; int iby = ((blockIdx.y + blockIdx.x) % (n/16))*16; const int idt = ty * 16 + tx; int ibx = blockIdx.x * 64+idt; //int iby = blockIdx.y * 16; A += ibx; B += tx + __mul24(iby+ty, ldb); C += __mul24(ibx, ldc) + iby; const float *Bend = B + k; float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; Bb[tx][ty+0 ] = B[0*ldb]; Bb[tx][ty+4 ] = B[4*ldb]; Bb[tx][ty+8 ] = B[8*ldb]; Bb[tx][ty+12] = B[12*ldb]; __syncthreads(); A += 4 * lda; saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[12][0], Cb ); saxpy( Ab[1], &Bb[13][0], Cb ); saxpy( Ab[2], &Bb[14][0], Cb ); saxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); #pragma unroll 16 for(int i = 0; i < 16; i++) { C[i] = alpha * Cb[i] + beta * C[i]; } } extern "C" void magmablas_sgemm_T_T_64_16_16_16_4_special( float *C, const float *A, const float *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, float alpha, float beta ) { dim3 threads( 16, 4 ); dim3 grid( m/64, n/16 ); sgemm_kernel_T_T_64_16_16_16_4_special<<< grid, threads, 0, magma_stream >>> ( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
29f4860ba99b555ac908a03e5af0944ab9ff3579.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <iostream> #include <math.h> #include <ctype.h> #include <hip/hip_runtime.h> #include <time.h> #define DATAMB(bytes) (bytes/1024/1024) #define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0)) #define CEIL(a,b) ((a+b-1)/b) typedef unsigned char uch; typedef unsigned long ul; typedef unsigned int ui; struct ImgProp{ int Hpixels; int Vpixels; uch HeaderInfo[54]; ul Hbytes; } ip; typedef struct{ ui i; ui j; }pixelCoords; // buffers for images uch *TheImg, *CopyImg; uch *GPUImg, *GPUCopyImg, *GPUptr, *GPUResult, *NoiseMap, *KernelIndices; double *GPU_PREV_BW, *GPU_CURR_BW; // noisy pixel locations pixelCoords *NoisyPixelCoords; // mutex variables for tracking noisy pixels ui *GlobalMax, *GlobalMin, *NumNoisyPixelsGPU, *GPUmutexes, *GPU_SAD; #define IPHB ip.Hbytes #define IPH ip.Hpixels #define IPV ip.Vpixels #define IMAGESIZE (IPHB*IPV) #define IMAGEPIX (IPH*IPV) // Kernel that locates potentially noisy pixels in an image by using impulse noise detection __global__ void findNoisyPixels(pixelCoords *locations, uch *ImgSrc, uch *noiseMap, ui*globalMax, ui*globalMin, ui*ListLength, ui Hpixels, ui Vpixels) { // 3x3 matrix of pixels around current pixel //uch mat3x3[8]; // 3 x 3 - 1 = 8 // threads/blocks info and IDs ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; // leave buffer frame around image to avoid 8 edge cases for convolutions if (MYcol > Hpixels-4 || MYcol < 3 || MYrow > Vpixels-4 || MYrow < 3) return; ui MYpixIndex = MYrow * Hpixels + MYcol; // pixel index in B&W image uch pIJ = ImgSrc[MYpixIndex]; uch max = 0; uch min = 255; uch curr; uch nMax; uch nMin; uch oldMax; uch oldMin; int row; int col; int indx; // find min and max pixel intensities in current window for (int i = -1; i <= 1; i++){ for (int j = -1; j <= 1; j++){ if(!(j==0 && i==0)){ row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; curr = ImgSrc[indx]; if(curr > max) max = curr; if(curr < min) min = curr; } } } // atomically update global max and min pixel intensities oldMax = atomicMax(globalMax, (ui)max); oldMin = atomicMin(globalMin, (ui)min); // if the old max wasn't updated, then max is "salt" noise // otherwise, we must assume that 255 is "salt" noise if(oldMax == max) nMax = max; else nMax = 255; // if the old min wasn't updated, then min is "pepper" noise // otherwise, we must assume that 0 is "pepper" noise if(oldMin == min) nMin = min; else nMin = 0; // if the current pixel intensity is equal to min or max, // then it is likely s&p noise. Mark as such. if(pIJ == nMin || pIJ == nMax){ int listIndex = atomicAdd(ListLength, (ui)1); locations[listIndex].i = MYrow; locations[listIndex].j = MYcol; noiseMap[MYpixIndex] = 0; } // if(pIJ == 255 || pIJ == 0){ // ui listIndex = atomicAdd(ListLength, (ui)1); // locations[listIndex].i = MYrow; // locations[listIndex].j = MYcol; // noiseMap[MYpixIndex] = 0; // } } // __device__ // uch Horz[5][5] = { { 0, 0, 0, 0, 0 }, // { 1, 1, 1, 1, 1 }, // { 1, 1, 0, 1, 1 }, // { 1, 1, 1, 1, 1 }, // { 0, 0, 0, 0, 0 } }; // __device__ // uch Vert[5][5] = { { 0, 1, 1, 1, 0 }, // { 0, 1, 1, 1, 0 }, // { 0, 1, 0, 1, 0 }, // { 0, 1, 1, 1, 0 }, // { 0, 1, 1, 1, 0 } }; // __device__ // uch mask45[7][7]={ {0, 0, 0, 0, 1, 0, 0}, // {0, 0, 0, 1, 1, 1, 0}, // {0, 0, 1, 1, 1, 1, 1}, // {0, 1, 1, 0, 1, 1, 0}, // {1, 1, 1, 1, 1, 0, 0}, // {0, 1, 1, 1, 0, 0, 0}, // {0, 0, 1, 0, 0, 0, 0}}; // __device__ // uch mask135[7][7]={ {0, 0, 1, 0, 0, 0, 0}, // {0, 1, 1, 1, 0, 0, 0}, // {1, 1, 1, 1, 1, 0, 0}, // {0, 1, 1, 0, 1, 1, 0}, // {0, 0, 1, 1, 1, 1, 1}, // {0, 0, 0, 1, 1, 1, 0}, // {0, 0, 0, 0, 1, 0, 0}}; //3x3 standard mask __constant__ double mask0[3][3] = { {0.1036, 0.1464, 0.1036}, {0.1464, 0, 0.1464}, {0.1036, 0.1464, 0.1036}}; // horizontal 5x5 mask __constant__ double mask1[5][5] = { {0, 0, 0, 0, 0 }, {0.0465, 0.0735, 0.1040, 0.0735, 0.0465 }, {0.0520, 0.1040, 0, 0.1040, 0.0520 }, {0.0465, 0.0735, 0.1040, 0.0735, 0.0465 }, {0, 0, 0, 0, 0 }}; //vertical 5x5 mask __constant__ double mask2[5][5] = { {0, 0.0465, 0.0520, 0.0465, 0}, {0, 0.0735, 0.1040, 0.0735, 0}, {0, 0.1040, 0, 0.1040, 0}, {0, 0.0735, 0.1040, 0.0735, 0}, {0, 0.0465, 0.0520, 0.0465, 0}}; //45 degree 7x7 mask __constant__ double mask3[7][7] = { {0, 0, 0, 0, 0.0251, 0, 0 }, {0, 0, 0, 0.0397, 0.0355, 0.0281, 0 }, {0, 0, 0.0562, 0.0794, 0.0562, 0.0355, 0.0251 }, {0, 0.0397, 0.0794, 0, 0.0794, 0.0397, 0 }, {0.0251, 0.0355, 0.0562, 0.0794, 0.0562, 0, 0 }, {0, 0.0281, 0.0355, 0.0397, 0, 0, 0 }, {0, 0, 0.0251, 0, 0, 0, 0 }}; //135 degree 7x7 mask __constant__ double mask4[7][7] = { {0, 0, 0.0251, 0, 0, 0, 0 }, {0, 0.0281, 0.0355, 0.0397, 0, 0, 0 }, {0.0251, 0.0355, 0.0562, 0.0794, 0.0562, 0, 0 }, {0, 0.0397, 0.0794, 0, 0.0794, 0.0397, 0 }, {0, 0, 0.0562, 0.0794, 0.0562, 0.0355, 0.0251 }, {0, 0, 0, 0.0397, 0.0355, 0.0281, 0 }, {0, 0, 0, 0, 0.0251, 0, 0 }}; // Kernel that determines appropriate inpainting mask to use based on surrounding noiseless pixels __global__ void determineMasks(pixelCoords *locations, uch *ImgSrc, uch *noiseMap, uch *kernelIndices, ui ListLength, ui Hpixels, ui R) { // threads/blocks info and IDs ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; // ensure not out-of-bounds if (MYgtid > ListLength) return; // masked arrays of those pixels denoted as noise-free uch noiseFreeLists[60]; uch *maskA = noiseFreeLists; uch *maskB = maskA+14; uch *maskC = maskB+14; uch *maskD = maskC+14; uch *listLengths = maskD+14; uch *currMask; uch currListLength; // control and tracking variables int i, j, row, col, indx, maskAIndx=0, maskBIndx=0, maskCIndx=0, maskDIndx=0, chosenMask; float minStdDev=1000000.0, currStdDev, sum = 0.0, mean, standardDeviation = 0.0; // obtain current noisy pixel indices pixelCoords currCoord = locations[MYgtid]; ui MYrow = currCoord.i; ui MYcol = currCoord.j; // iterate through both 5x5 masks to find values of noise-free pixels for (i = -2; i <= 2; i++){ for (j = -2; j <= 2; j++){ // find current absolute index row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; // if the current pixel is noise-free AND if(noiseMap[indx]){ // if the current 5x5 horizontal mask cell is set to TRUE if(mask1[i+2][j+2]) { // obtain noise free pixel and add to list maskA[maskAIndx] = ImgSrc[indx]; maskAIndx++; } // if the current 5x5 vertical mask cell is set to TRUE if(mask2[i+2][j+2]) { // obtain noise free pixel and add to list maskB[maskBIndx] = ImgSrc[indx]; maskBIndx++; } } } } // iterate through both 7x7 masks to find values of noise-free pixels for (i = -3; i <= 3; i++){ for ( j = -3; j <= 3; j++){ // find current absolute index row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; // if the current pixel is noise-free AND if(noiseMap[indx]){ // if the current 7x7 45 degree mask cell is set to TRUE if(mask3[i+3][j+3]) { // obtain noise free pixel and add to list maskC[maskCIndx] = ImgSrc[indx]; maskCIndx++; } // if the current 7x7 135 degree mask cell is set to TRUE if(mask4[i+3][j+3]) { // obtain noise free pixel and add to list maskD[maskDIndx] = ImgSrc[indx]; maskDIndx++; } } } } // if the amounts of noise free pixels in any of the directional masks is // below threshold R, then we use 3x3 convolution // this helps to mitigate promoting false edges if(maskAIndx < R || maskBIndx < R || maskCIndx < R || maskDIndx < R) chosenMask = 0; else { // assign list lengths for smoother access listLengths[0] = maskAIndx; listLengths[1] = maskBIndx; listLengths[2] = maskCIndx; listLengths[3] = maskDIndx; // find the mask index (from 1 to 4) of minimum standard deviation for(i=0; i < 4; i++) { currListLength = listLengths[i]; currMask = maskA+(i*14); // first find mean of array for(j = 0; j < currListLength; j++) { sum += (float)currMask[j]; } mean = sum/currListLength; // then find sum of individual deviations for(j = 0; j < currListLength; j++) standardDeviation += pow((float)currMask[j] - mean, 2); // final StdDev is normalized by list length currStdDev = standardDeviation / currListLength; if(currStdDev < minStdDev) { chosenMask = i+1; minStdDev = currStdDev; } } } // assign the mask index that was chosen kernelIndices[MYgtid] = chosenMask; } // convolutions based on kernel indices __global__ void Convolute(double *ImgCurr, double *ImgBW, pixelCoords *pc, uch *kernalI, ui numNoisy, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if (MYgtid >= numNoisy) return; // index out of range // current noisy pixel coordinates ui i=pc[MYgtid].i,j=pc[MYgtid].j,m=kernalI[MYgtid]; // absolute pixel index ui MYpixIndex = i * Hpixels + j; int a,b,row,col,index; double C = 0.0; switch(m) { case 0: for (a = -1; a <= 1; a++){ for (b = -1; b <= 1; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask0[a + 1][b + 1]); } } ImgCurr[MYpixIndex] = C; break; case 1: for (a = -2; a <= 2; a++){ for (b = -2; b <= 2; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask1[a + 2][b + 2]); } } ImgCurr[MYpixIndex] = C; break; case 2: for (a = -2; a <= 2; a++){ for (b = -2; b <= 2; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask2[a + 2][b + 2]); } } ImgCurr[MYpixIndex] = C; break; case 3: for (a = -3; a <= 3; a++){ for (b = -3; b <= 3; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask3[a + 3][b + 3]); } } ImgCurr[MYpixIndex] = C; break; default: for (a = -3; a <= 3; a++){ for (b = -3; b <= 3; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask4[a + 3][b + 3]); } } // assign convolution sum to current noisy pixel index ImgCurr[MYpixIndex] = C; break; } } // sum of absolute differences, reconstruction progress tracking mechanism __global__ void SAD(ui *sad, double *prev, double *current, pixelCoords *pc, ui numNoisy, ui Hpixels, ui Vpixels) { // thread IDs ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if (MYgtid >= numNoisy) return; // index out of range ui i=pc[MYgtid].i, j=pc[MYgtid].j; // current noisy pixel coordinates ui MYpixIndex = i * Hpixels + j; // absolute index // difference of old and updated pixel values, round to nearest integer int absDiff=(int)(prev[MYpixIndex]-current[MYpixIndex]+0.5); // absolute difference if(absDiff<0) absDiff = absDiff*(-1); atomicAdd(sad, (ui)absDiff); // update global sum } // Kernel that calculates a B&W image from an RGB image // resulting image has a double type for each pixel position __global__ void BWKernel(uch *ImgBW, uch *ImgGPU, double *ImgfpBW, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; double R, G, B; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol; ui MYpixIndex = MYrow * Hpixels + MYcol; B = (double)ImgGPU[MYsrcIndex]; G = (double)ImgGPU[MYsrcIndex + 1]; R = (double)ImgGPU[MYsrcIndex + 2]; ImgBW[MYpixIndex] = (uch)((R+G+B)/3.0); ImgfpBW[MYpixIndex] = (R+G+B)/3.0; } // Kernel that calculates a RGB (grayscale) version of B&W image for filing as Windows BMP __global__ void RGBKernel(uch *ImgRGB, double *ImgBW, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYdstIndex = MYrow * RowBytes + 3 * MYcol; ui MYpixIndex = MYrow * Hpixels + MYcol; uch pixInt = ImgBW[MYpixIndex]; ImgRGB[MYdstIndex] = pixInt; ImgRGB[MYdstIndex+1] = pixInt; ImgRGB[MYdstIndex+2] = pixInt; } // Kernel that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) __global__ void NoisyPixCopy(double *NPDst, double *ImgSrc, pixelCoords *pc, ui NoisyPixelListLength, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if (MYgtid >= NoisyPixelListLength) return;// outside the allocated memory pixelCoords currCoord = pc[MYgtid]; ui srcIndex = currCoord.i * Hpixels + currCoord.j; NPDst[srcIndex] = ImgSrc[srcIndex]; } // Kernel that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) __global__ void PixCopy(double *ImgDst, double *ImgSrc, ui FS) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if (MYgtid > FS) return; // outside the allocated memory ImgDst[MYgtid] = ImgSrc[MYgtid]; } /* // helper function that wraps CUDA API calls, reports any error and exits void chkCUDAErr(hipError_t error_id) { if (error_id != hipSuccess) { printf("CUDA ERROR :::%\n", hipGetErrorString(error_id)); exit(EXIT_FAILURE); } } */ // Read a 24-bit/pixel BMP file into a 1D linear array. // Allocate memory to store the 1D image and return its pointer. uch *ReadBMPlin(char* fn) { static uch *Img; FILE* f = fopen(fn, "rb"); if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); } uch HeaderInfo[54]; fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width; int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height; int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes; //save header for re-use memcpy(ip.HeaderInfo, HeaderInfo,54); //printf("\n Input File name: %17s (%u x %u) File Size=%u", fn, // ip.Hpixels, ip.Vpixels, IMAGESIZE); // allocate memory to store the main image (1 Dimensional array) Img = (uch *)malloc(IMAGESIZE); if (Img == NULL) return Img; // Cannot allocate memory // read the image from disk fread(Img, sizeof(uch), IMAGESIZE, f); fclose(f); return Img; } // Write the 1D linear-memory stored image into file. void WriteBMPlin(uch *Img, char* fn) { FILE* f = fopen(fn, "wb"); if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); } //write header fwrite(ip.HeaderInfo, sizeof(uch), 54, f); //write data fwrite(Img, sizeof(uch), IMAGESIZE, f); printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); fclose(f); } int main(int argc, char **argv) { float /*totalTime, tfrCPUtoGPU, tfrGPUtoCPU,*/ kernelExecutionTime; // GPU code run times hipError_t cudaStatus; hipEvent_t time1, time2;//, time3, time4; char InputFileName[255], OutputFileName[255], ProgName[255]; ui BlkPerRow, ThrPerBlk=256, NumBlocks, /* GPUDataTransfer,*/ NumBlocksNP; hipDeviceProp_t GPUprop; ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100]; ui GPUtotalBufferSize, R = 5, T = 5, NumNoisyPixelsCPU, mutexInit[4] = {0, 255, 0, 0}; ui CPU_SAD; strcpy(ProgName, "randNoiseRemoval"); switch (argc){ case 6: ThrPerBlk = atoi(argv[5]); case 5: R = atoi(argv[4]); case 4: T = atoi(argv[3]); case 3: strcpy(InputFileName, argv[1]); strcpy(OutputFileName, argv[2]); break; default: printf("\n\nUsage: %s InputFilename OutputFilename [T] [R] [ThrPerBlk]", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 5", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 5 5",ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 5 5 128",ProgName); printf("\n\nT = reconstruction threshold, R = mask selection threshold\n\n"); exit(EXIT_FAILURE); } if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) { printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk); exit(EXIT_FAILURE); } // Create CPU memory to store the input and output images TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated if (TheImg == NULL){ printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } CopyImg = (uch *)malloc(IMAGESIZE); if (CopyImg == NULL){ free(TheImg); printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } // Choose which GPU to run on, change this on a multi-GPU system. int NumGPUs = 0; hipGetDeviceCount(&NumGPUs); if (NumGPUs == 0){ printf("\nNo CUDA Device is available\n"); exit(EXIT_FAILURE); } cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); exit(EXIT_FAILURE); } hipGetDeviceProperties(&GPUprop, 0); SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024; SupportedMBlocks = SupportedKBlocks / 1024; sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K'); MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock; hipEventCreate(&time1); hipEventCreate(&time2); // hipEventCreate(&time3); // hipEventCreate(&time4); /* >>> GPU STORAGE DETAILS >>> GPUImage: IMAGESIZE GPUCopyImage(BW) : IMAGEPIX NoisyPixelCoords: IMAGEPIX*sizeof(pixelCoords) NoiseMap : IMAGEPIX KernelIndices : IMAGEPIX GlobalMax : sizeof(ui) GlobalMin : sizeof(ui) NumNoisyPixelsGPU : sizeof(ui) GPU_PREV_BW : sizeof(double) * IMAGEPIX GPU_CURR_BW : sizeof(double) * IMAGEPIX GPU_SAD : sizeof(ui) *********************** */ // allocate sufficient memory on the GPU to hold all above items GPUtotalBufferSize = IMAGESIZE+(IMAGEPIX*sizeof(pixelCoords))+IMAGEPIX*3+sizeof(ui)*4+2*(sizeof(double)*IMAGEPIX); cudaStatus = hipMalloc((void**)&GPUptr, GPUtotalBufferSize); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! Can't allocate GPU memory for buffers"); exit(EXIT_FAILURE); } // setup buffer pointers for functions GPUImg = (uch *)GPUptr; GPUCopyImg = GPUImg + IMAGESIZE; NoiseMap = GPUCopyImg + IMAGEPIX; // add the previous image/array of noisy pixel intensities KernelIndices = NoiseMap + IMAGEPIX; NoisyPixelCoords = (pixelCoords*)(KernelIndices + IMAGEPIX); GPU_PREV_BW = (double*)(NoisyPixelCoords+IMAGEPIX); GPU_CURR_BW = GPU_PREV_BW + IMAGEPIX; GlobalMax = (ui*)(GPU_CURR_BW + IMAGEPIX); GlobalMin = GlobalMax+1; NumNoisyPixelsGPU = GlobalMin+1; GPU_SAD = NumNoisyPixelsGPU+1; // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(GPUImg, TheImg, IMAGESIZE, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy for input image CPU to GPU failed!"); exit(EXIT_FAILURE); } // Copy mutex initializations from CPU to GPU cudaStatus = hipMemcpy(GlobalMax, mutexInit, 4*sizeof(ui), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy for mutex initializations CPU to GPU failed!"); exit(EXIT_FAILURE); } // assume pixels are not noisy by default cudaStatus = hipMemset (NoiseMap, 1, IMAGEPIX ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset for Noise Map failed!"); exit(EXIT_FAILURE); } hipEventRecord(time1, 0); // Time stamp at the start of the GPU transfer BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk); NumBlocks = IPV*BlkPerRow; hipLaunchKernelGGL(( BWKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUCopyImg, GPUImg, GPU_CURR_BW, IPH); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "\n\n hipDeviceSynchronize for B&WKernel returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( findNoisyPixels) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, NoisyPixelCoords, GPUCopyImg, NoiseMap, GlobalMax, GlobalMin, NumNoisyPixelsGPU, IPH, IPV); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize for findNoisyPixels returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } //hipEventRecord(time3, 0); cudaStatus = hipMemcpy(&NumNoisyPixelsCPU, NumNoisyPixelsGPU, sizeof(ui), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of NumNoisyPixels, GPU to CPU failed!"); exit(EXIT_FAILURE); } // only schedule as many threads are needed for NoisyPixelListLength NumBlocksNP = CEIL(NumNoisyPixelsCPU, ThrPerBlk); hipLaunchKernelGGL(( determineMasks) , dim3(NumBlocksNP), dim3(ThrPerBlk) , 0, 0, NoisyPixelCoords, GPUCopyImg, NoiseMap, KernelIndices, NumNoisyPixelsCPU, IPH, R); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize for determineMasks returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( PixCopy) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPU_PREV_BW, GPU_CURR_BW, IMAGEPIX); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize for PixCopy returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } int t=0; // progress tracking do{ // reset SAD (sum of absolute pixel differences) cudaStatus = hipMemset (GPU_SAD, 0, sizeof(ui) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset for GPU_SAD failed!"); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( Convolute) , dim3(NumBlocksNP), dim3(ThrPerBlk) , 0, 0, GPU_CURR_BW, GPU_PREV_BW, NoisyPixelCoords, KernelIndices, NumNoisyPixelsCPU, IPH); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "\n\n hipDeviceSynchronize for Convolute returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( SAD) , dim3(NumBlocksNP), dim3(ThrPerBlk) , 0, 0, GPU_SAD, GPU_PREV_BW, GPU_CURR_BW, NoisyPixelCoords, NumNoisyPixelsCPU, IPH, IPV); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "\n\n hipDeviceSynchronize for SAD returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( NoisyPixCopy) , dim3(NumBlocksNP), dim3(ThrPerBlk) , 0, 0, GPU_PREV_BW, GPU_CURR_BW, NoisyPixelCoords, NumNoisyPixelsCPU, IPH); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "\n\n hipDeviceSynchronize for NoisyPixCopy returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } // CudaMemcpy the SAD from GPU to CPU here cudaStatus = hipMemcpy(&CPU_SAD, GPU_SAD, sizeof(ui), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of SAD from GPU to CPU failed!"); exit(EXIT_FAILURE); } t++; } while(CPU_SAD > T); // must convert floating point B&W back to unsigned char formats hipLaunchKernelGGL(( RGBKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUImg, GPU_CURR_BW, IPH); GPUResult = GPUImg; hipEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done //GPUDataTransfer = GPUtotalBufferSize; //Copy output (results) from GPU buffer to host (CPU) memory. // cudaStatus = hipMemcpy(CopyImg, GPUResult, IMAGESIZE, hipMemcpyDeviceToHost); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy GPU to CPU failed!"); // exit(EXIT_FAILURE); // } //hipEventRecord(time4, 0); hipEventSynchronize(time1); hipEventSynchronize(time2); //hipEventSynchronize(time3); //hipEventSynchronize(time4); //hipEventElapsedTime(&totalTime, time1, time4); //hipEventElapsedTime(&tfrCPUtoGPU, time1, time2); hipEventElapsedTime(&kernelExecutionTime, time1, time2); //hipEventElapsedTime(&tfrGPUtoCPU, time3, time4); cudaStatus = hipDeviceSynchronize(); //checkError(hipGetLastError()); // screen for errors in kernel launches if (cudaStatus != hipSuccess) { fprintf(stderr, "\n Program failed after hipDeviceSynchronize()!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } //WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk // printf("\n\n--------------------------------------------------------------------------\n"); // printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n", // GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk); // printf("--------------------------------------------------------------------------\n"); // printf("%s %s %s %d %d %u [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, // T, R, ThrPerBlk, NumBlocks, BlkPerRow); // printf("--------------------------------------------------------------------------\n"); // printf("CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrCPUtoGPU)); //printf("Kernel Execution =%7.2f ms\n", kernelExecutionTime);//, DATAMB(GPUDataTransfer), DATABW(GPUDataTransfer, kernelExecutionTime)); ... %4d MB ... %6.2f GB/s // printf("GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU)); // printf("--------------------------------------------------------------------------\n"); // printf("Total time elapsed =%7.2f ms %4d MB ... %6.2f GB/s\n", totalTime, DATAMB((2 * IMAGESIZE + GPUDataTransfer)), DATABW((2 * IMAGESIZE + GPUDataTransfer), totalTime)); //printf("--------------------------------------------------------------------------\n\n"); printf("%d\n", t); // Deallocate CPU, GPU memory and destroy events. hipFree(GPUptr); hipEventDestroy(time1); hipEventDestroy(time2); // hipEventDestroy(time3); // hipEventDestroy(time4); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } free(TheImg); free(CopyImg); return(EXIT_SUCCESS); }
29f4860ba99b555ac908a03e5af0944ab9ff3579.cu
#include <cuda_runtime.h> #include <curand_kernel.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <iostream> #include <math.h> #include <ctype.h> #include <cuda.h> #include <time.h> #define DATAMB(bytes) (bytes/1024/1024) #define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0)) #define CEIL(a,b) ((a+b-1)/b) typedef unsigned char uch; typedef unsigned long ul; typedef unsigned int ui; struct ImgProp{ int Hpixels; int Vpixels; uch HeaderInfo[54]; ul Hbytes; } ip; typedef struct{ ui i; ui j; }pixelCoords; // buffers for images uch *TheImg, *CopyImg; uch *GPUImg, *GPUCopyImg, *GPUptr, *GPUResult, *NoiseMap, *KernelIndices; double *GPU_PREV_BW, *GPU_CURR_BW; // noisy pixel locations pixelCoords *NoisyPixelCoords; // mutex variables for tracking noisy pixels ui *GlobalMax, *GlobalMin, *NumNoisyPixelsGPU, *GPUmutexes, *GPU_SAD; #define IPHB ip.Hbytes #define IPH ip.Hpixels #define IPV ip.Vpixels #define IMAGESIZE (IPHB*IPV) #define IMAGEPIX (IPH*IPV) // Kernel that locates potentially noisy pixels in an image by using impulse noise detection __global__ void findNoisyPixels(pixelCoords *locations, uch *ImgSrc, uch *noiseMap, ui*globalMax, ui*globalMin, ui*ListLength, ui Hpixels, ui Vpixels) { // 3x3 matrix of pixels around current pixel //uch mat3x3[8]; // 3 x 3 - 1 = 8 // threads/blocks info and IDs ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; // leave buffer frame around image to avoid 8 edge cases for convolutions if (MYcol > Hpixels-4 || MYcol < 3 || MYrow > Vpixels-4 || MYrow < 3) return; ui MYpixIndex = MYrow * Hpixels + MYcol; // pixel index in B&W image uch pIJ = ImgSrc[MYpixIndex]; uch max = 0; uch min = 255; uch curr; uch nMax; uch nMin; uch oldMax; uch oldMin; int row; int col; int indx; // find min and max pixel intensities in current window for (int i = -1; i <= 1; i++){ for (int j = -1; j <= 1; j++){ if(!(j==0 && i==0)){ row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; curr = ImgSrc[indx]; if(curr > max) max = curr; if(curr < min) min = curr; } } } // atomically update global max and min pixel intensities oldMax = atomicMax(globalMax, (ui)max); oldMin = atomicMin(globalMin, (ui)min); // if the old max wasn't updated, then max is "salt" noise // otherwise, we must assume that 255 is "salt" noise if(oldMax == max) nMax = max; else nMax = 255; // if the old min wasn't updated, then min is "pepper" noise // otherwise, we must assume that 0 is "pepper" noise if(oldMin == min) nMin = min; else nMin = 0; // if the current pixel intensity is equal to min or max, // then it is likely s&p noise. Mark as such. if(pIJ == nMin || pIJ == nMax){ int listIndex = atomicAdd(ListLength, (ui)1); locations[listIndex].i = MYrow; locations[listIndex].j = MYcol; noiseMap[MYpixIndex] = 0; } // if(pIJ == 255 || pIJ == 0){ // ui listIndex = atomicAdd(ListLength, (ui)1); // locations[listIndex].i = MYrow; // locations[listIndex].j = MYcol; // noiseMap[MYpixIndex] = 0; // } } // __device__ // uch Horz[5][5] = { { 0, 0, 0, 0, 0 }, // { 1, 1, 1, 1, 1 }, // { 1, 1, 0, 1, 1 }, // { 1, 1, 1, 1, 1 }, // { 0, 0, 0, 0, 0 } }; // __device__ // uch Vert[5][5] = { { 0, 1, 1, 1, 0 }, // { 0, 1, 1, 1, 0 }, // { 0, 1, 0, 1, 0 }, // { 0, 1, 1, 1, 0 }, // { 0, 1, 1, 1, 0 } }; // __device__ // uch mask45[7][7]={ {0, 0, 0, 0, 1, 0, 0}, // {0, 0, 0, 1, 1, 1, 0}, // {0, 0, 1, 1, 1, 1, 1}, // {0, 1, 1, 0, 1, 1, 0}, // {1, 1, 1, 1, 1, 0, 0}, // {0, 1, 1, 1, 0, 0, 0}, // {0, 0, 1, 0, 0, 0, 0}}; // __device__ // uch mask135[7][7]={ {0, 0, 1, 0, 0, 0, 0}, // {0, 1, 1, 1, 0, 0, 0}, // {1, 1, 1, 1, 1, 0, 0}, // {0, 1, 1, 0, 1, 1, 0}, // {0, 0, 1, 1, 1, 1, 1}, // {0, 0, 0, 1, 1, 1, 0}, // {0, 0, 0, 0, 1, 0, 0}}; //3x3 standard mask __constant__ double mask0[3][3] = { {0.1036, 0.1464, 0.1036}, {0.1464, 0, 0.1464}, {0.1036, 0.1464, 0.1036}}; // horizontal 5x5 mask __constant__ double mask1[5][5] = { {0, 0, 0, 0, 0 }, {0.0465, 0.0735, 0.1040, 0.0735, 0.0465 }, {0.0520, 0.1040, 0, 0.1040, 0.0520 }, {0.0465, 0.0735, 0.1040, 0.0735, 0.0465 }, {0, 0, 0, 0, 0 }}; //vertical 5x5 mask __constant__ double mask2[5][5] = { {0, 0.0465, 0.0520, 0.0465, 0}, {0, 0.0735, 0.1040, 0.0735, 0}, {0, 0.1040, 0, 0.1040, 0}, {0, 0.0735, 0.1040, 0.0735, 0}, {0, 0.0465, 0.0520, 0.0465, 0}}; //45 degree 7x7 mask __constant__ double mask3[7][7] = { {0, 0, 0, 0, 0.0251, 0, 0 }, {0, 0, 0, 0.0397, 0.0355, 0.0281, 0 }, {0, 0, 0.0562, 0.0794, 0.0562, 0.0355, 0.0251 }, {0, 0.0397, 0.0794, 0, 0.0794, 0.0397, 0 }, {0.0251, 0.0355, 0.0562, 0.0794, 0.0562, 0, 0 }, {0, 0.0281, 0.0355, 0.0397, 0, 0, 0 }, {0, 0, 0.0251, 0, 0, 0, 0 }}; //135 degree 7x7 mask __constant__ double mask4[7][7] = { {0, 0, 0.0251, 0, 0, 0, 0 }, {0, 0.0281, 0.0355, 0.0397, 0, 0, 0 }, {0.0251, 0.0355, 0.0562, 0.0794, 0.0562, 0, 0 }, {0, 0.0397, 0.0794, 0, 0.0794, 0.0397, 0 }, {0, 0, 0.0562, 0.0794, 0.0562, 0.0355, 0.0251 }, {0, 0, 0, 0.0397, 0.0355, 0.0281, 0 }, {0, 0, 0, 0, 0.0251, 0, 0 }}; // Kernel that determines appropriate inpainting mask to use based on surrounding noiseless pixels __global__ void determineMasks(pixelCoords *locations, uch *ImgSrc, uch *noiseMap, uch *kernelIndices, ui ListLength, ui Hpixels, ui R) { // threads/blocks info and IDs ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; // ensure not out-of-bounds if (MYgtid > ListLength) return; // masked arrays of those pixels denoted as noise-free uch noiseFreeLists[60]; uch *maskA = noiseFreeLists; uch *maskB = maskA+14; uch *maskC = maskB+14; uch *maskD = maskC+14; uch *listLengths = maskD+14; uch *currMask; uch currListLength; // control and tracking variables int i, j, row, col, indx, maskAIndx=0, maskBIndx=0, maskCIndx=0, maskDIndx=0, chosenMask; float minStdDev=1000000.0, currStdDev, sum = 0.0, mean, standardDeviation = 0.0; // obtain current noisy pixel indices pixelCoords currCoord = locations[MYgtid]; ui MYrow = currCoord.i; ui MYcol = currCoord.j; // iterate through both 5x5 masks to find values of noise-free pixels for (i = -2; i <= 2; i++){ for (j = -2; j <= 2; j++){ // find current absolute index row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; // if the current pixel is noise-free AND if(noiseMap[indx]){ // if the current 5x5 horizontal mask cell is set to TRUE if(mask1[i+2][j+2]) { // obtain noise free pixel and add to list maskA[maskAIndx] = ImgSrc[indx]; maskAIndx++; } // if the current 5x5 vertical mask cell is set to TRUE if(mask2[i+2][j+2]) { // obtain noise free pixel and add to list maskB[maskBIndx] = ImgSrc[indx]; maskBIndx++; } } } } // iterate through both 7x7 masks to find values of noise-free pixels for (i = -3; i <= 3; i++){ for ( j = -3; j <= 3; j++){ // find current absolute index row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; // if the current pixel is noise-free AND if(noiseMap[indx]){ // if the current 7x7 45 degree mask cell is set to TRUE if(mask3[i+3][j+3]) { // obtain noise free pixel and add to list maskC[maskCIndx] = ImgSrc[indx]; maskCIndx++; } // if the current 7x7 135 degree mask cell is set to TRUE if(mask4[i+3][j+3]) { // obtain noise free pixel and add to list maskD[maskDIndx] = ImgSrc[indx]; maskDIndx++; } } } } // if the amounts of noise free pixels in any of the directional masks is // below threshold R, then we use 3x3 convolution // this helps to mitigate promoting false edges if(maskAIndx < R || maskBIndx < R || maskCIndx < R || maskDIndx < R) chosenMask = 0; else { // assign list lengths for smoother access listLengths[0] = maskAIndx; listLengths[1] = maskBIndx; listLengths[2] = maskCIndx; listLengths[3] = maskDIndx; // find the mask index (from 1 to 4) of minimum standard deviation for(i=0; i < 4; i++) { currListLength = listLengths[i]; currMask = maskA+(i*14); // first find mean of array for(j = 0; j < currListLength; j++) { sum += (float)currMask[j]; } mean = sum/currListLength; // then find sum of individual deviations for(j = 0; j < currListLength; j++) standardDeviation += pow((float)currMask[j] - mean, 2); // final StdDev is normalized by list length currStdDev = standardDeviation / currListLength; if(currStdDev < minStdDev) { chosenMask = i+1; minStdDev = currStdDev; } } } // assign the mask index that was chosen kernelIndices[MYgtid] = chosenMask; } // convolutions based on kernel indices __global__ void Convolute(double *ImgCurr, double *ImgBW, pixelCoords *pc, uch *kernalI, ui numNoisy, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if (MYgtid >= numNoisy) return; // index out of range // current noisy pixel coordinates ui i=pc[MYgtid].i,j=pc[MYgtid].j,m=kernalI[MYgtid]; // absolute pixel index ui MYpixIndex = i * Hpixels + j; int a,b,row,col,index; double C = 0.0; switch(m) { case 0: for (a = -1; a <= 1; a++){ for (b = -1; b <= 1; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask0[a + 1][b + 1]); } } ImgCurr[MYpixIndex] = C; break; case 1: for (a = -2; a <= 2; a++){ for (b = -2; b <= 2; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask1[a + 2][b + 2]); } } ImgCurr[MYpixIndex] = C; break; case 2: for (a = -2; a <= 2; a++){ for (b = -2; b <= 2; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask2[a + 2][b + 2]); } } ImgCurr[MYpixIndex] = C; break; case 3: for (a = -3; a <= 3; a++){ for (b = -3; b <= 3; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask3[a + 3][b + 3]); } } ImgCurr[MYpixIndex] = C; break; default: for (a = -3; a <= 3; a++){ for (b = -3; b <= 3; b++){ row = i + a; col = j + b; index = row*Hpixels + col; C += (ImgBW[index] * mask4[a + 3][b + 3]); } } // assign convolution sum to current noisy pixel index ImgCurr[MYpixIndex] = C; break; } } // sum of absolute differences, reconstruction progress tracking mechanism __global__ void SAD(ui *sad, double *prev, double *current, pixelCoords *pc, ui numNoisy, ui Hpixels, ui Vpixels) { // thread IDs ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if (MYgtid >= numNoisy) return; // index out of range ui i=pc[MYgtid].i, j=pc[MYgtid].j; // current noisy pixel coordinates ui MYpixIndex = i * Hpixels + j; // absolute index // difference of old and updated pixel values, round to nearest integer int absDiff=(int)(prev[MYpixIndex]-current[MYpixIndex]+0.5); // absolute difference if(absDiff<0) absDiff = absDiff*(-1); atomicAdd(sad, (ui)absDiff); // update global sum } // Kernel that calculates a B&W image from an RGB image // resulting image has a double type for each pixel position __global__ void BWKernel(uch *ImgBW, uch *ImgGPU, double *ImgfpBW, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; double R, G, B; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol; ui MYpixIndex = MYrow * Hpixels + MYcol; B = (double)ImgGPU[MYsrcIndex]; G = (double)ImgGPU[MYsrcIndex + 1]; R = (double)ImgGPU[MYsrcIndex + 2]; ImgBW[MYpixIndex] = (uch)((R+G+B)/3.0); ImgfpBW[MYpixIndex] = (R+G+B)/3.0; } // Kernel that calculates a RGB (grayscale) version of B&W image for filing as Windows BMP __global__ void RGBKernel(uch *ImgRGB, double *ImgBW, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYdstIndex = MYrow * RowBytes + 3 * MYcol; ui MYpixIndex = MYrow * Hpixels + MYcol; uch pixInt = ImgBW[MYpixIndex]; ImgRGB[MYdstIndex] = pixInt; ImgRGB[MYdstIndex+1] = pixInt; ImgRGB[MYdstIndex+2] = pixInt; } // Kernel that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) __global__ void NoisyPixCopy(double *NPDst, double *ImgSrc, pixelCoords *pc, ui NoisyPixelListLength, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if (MYgtid >= NoisyPixelListLength) return;// outside the allocated memory pixelCoords currCoord = pc[MYgtid]; ui srcIndex = currCoord.i * Hpixels + currCoord.j; NPDst[srcIndex] = ImgSrc[srcIndex]; } // Kernel that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) __global__ void PixCopy(double *ImgDst, double *ImgSrc, ui FS) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if (MYgtid > FS) return; // outside the allocated memory ImgDst[MYgtid] = ImgSrc[MYgtid]; } /* // helper function that wraps CUDA API calls, reports any error and exits void chkCUDAErr(cudaError_t error_id) { if (error_id != CUDA_SUCCESS) { printf("CUDA ERROR :::%\n", cudaGetErrorString(error_id)); exit(EXIT_FAILURE); } } */ // Read a 24-bit/pixel BMP file into a 1D linear array. // Allocate memory to store the 1D image and return its pointer. uch *ReadBMPlin(char* fn) { static uch *Img; FILE* f = fopen(fn, "rb"); if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); } uch HeaderInfo[54]; fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width; int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height; int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes; //save header for re-use memcpy(ip.HeaderInfo, HeaderInfo,54); //printf("\n Input File name: %17s (%u x %u) File Size=%u", fn, // ip.Hpixels, ip.Vpixels, IMAGESIZE); // allocate memory to store the main image (1 Dimensional array) Img = (uch *)malloc(IMAGESIZE); if (Img == NULL) return Img; // Cannot allocate memory // read the image from disk fread(Img, sizeof(uch), IMAGESIZE, f); fclose(f); return Img; } // Write the 1D linear-memory stored image into file. void WriteBMPlin(uch *Img, char* fn) { FILE* f = fopen(fn, "wb"); if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); } //write header fwrite(ip.HeaderInfo, sizeof(uch), 54, f); //write data fwrite(Img, sizeof(uch), IMAGESIZE, f); printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); fclose(f); } int main(int argc, char **argv) { float /*totalTime, tfrCPUtoGPU, tfrGPUtoCPU,*/ kernelExecutionTime; // GPU code run times cudaError_t cudaStatus; cudaEvent_t time1, time2;//, time3, time4; char InputFileName[255], OutputFileName[255], ProgName[255]; ui BlkPerRow, ThrPerBlk=256, NumBlocks, /* GPUDataTransfer,*/ NumBlocksNP; cudaDeviceProp GPUprop; ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100]; ui GPUtotalBufferSize, R = 5, T = 5, NumNoisyPixelsCPU, mutexInit[4] = {0, 255, 0, 0}; ui CPU_SAD; strcpy(ProgName, "randNoiseRemoval"); switch (argc){ case 6: ThrPerBlk = atoi(argv[5]); case 5: R = atoi(argv[4]); case 4: T = atoi(argv[3]); case 3: strcpy(InputFileName, argv[1]); strcpy(OutputFileName, argv[2]); break; default: printf("\n\nUsage: %s InputFilename OutputFilename [T] [R] [ThrPerBlk]", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 5", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 5 5",ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 5 5 128",ProgName); printf("\n\nT = reconstruction threshold, R = mask selection threshold\n\n"); exit(EXIT_FAILURE); } if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) { printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk); exit(EXIT_FAILURE); } // Create CPU memory to store the input and output images TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated if (TheImg == NULL){ printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } CopyImg = (uch *)malloc(IMAGESIZE); if (CopyImg == NULL){ free(TheImg); printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } // Choose which GPU to run on, change this on a multi-GPU system. int NumGPUs = 0; cudaGetDeviceCount(&NumGPUs); if (NumGPUs == 0){ printf("\nNo CUDA Device is available\n"); exit(EXIT_FAILURE); } cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); exit(EXIT_FAILURE); } cudaGetDeviceProperties(&GPUprop, 0); SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024; SupportedMBlocks = SupportedKBlocks / 1024; sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K'); MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock; cudaEventCreate(&time1); cudaEventCreate(&time2); // cudaEventCreate(&time3); // cudaEventCreate(&time4); /* >>> GPU STORAGE DETAILS >>> GPUImage: IMAGESIZE GPUCopyImage(BW) : IMAGEPIX NoisyPixelCoords: IMAGEPIX*sizeof(pixelCoords) NoiseMap : IMAGEPIX KernelIndices : IMAGEPIX GlobalMax : sizeof(ui) GlobalMin : sizeof(ui) NumNoisyPixelsGPU : sizeof(ui) GPU_PREV_BW : sizeof(double) * IMAGEPIX GPU_CURR_BW : sizeof(double) * IMAGEPIX GPU_SAD : sizeof(ui) *********************** */ // allocate sufficient memory on the GPU to hold all above items GPUtotalBufferSize = IMAGESIZE+(IMAGEPIX*sizeof(pixelCoords))+IMAGEPIX*3+sizeof(ui)*4+2*(sizeof(double)*IMAGEPIX); cudaStatus = cudaMalloc((void**)&GPUptr, GPUtotalBufferSize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory for buffers"); exit(EXIT_FAILURE); } // setup buffer pointers for functions GPUImg = (uch *)GPUptr; GPUCopyImg = GPUImg + IMAGESIZE; NoiseMap = GPUCopyImg + IMAGEPIX; // add the previous image/array of noisy pixel intensities KernelIndices = NoiseMap + IMAGEPIX; NoisyPixelCoords = (pixelCoords*)(KernelIndices + IMAGEPIX); GPU_PREV_BW = (double*)(NoisyPixelCoords+IMAGEPIX); GPU_CURR_BW = GPU_PREV_BW + IMAGEPIX; GlobalMax = (ui*)(GPU_CURR_BW + IMAGEPIX); GlobalMin = GlobalMax+1; NumNoisyPixelsGPU = GlobalMin+1; GPU_SAD = NumNoisyPixelsGPU+1; // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy for input image CPU to GPU failed!"); exit(EXIT_FAILURE); } // Copy mutex initializations from CPU to GPU cudaStatus = cudaMemcpy(GlobalMax, mutexInit, 4*sizeof(ui), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy for mutex initializations CPU to GPU failed!"); exit(EXIT_FAILURE); } // assume pixels are not noisy by default cudaStatus = cudaMemset (NoiseMap, 1, IMAGEPIX ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset for Noise Map failed!"); exit(EXIT_FAILURE); } cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk); NumBlocks = IPV*BlkPerRow; BWKernel <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, GPU_CURR_BW, IPH); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\n cudaDeviceSynchronize for B&WKernel returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } findNoisyPixels <<< NumBlocks, ThrPerBlk >>> (NoisyPixelCoords, GPUCopyImg, NoiseMap, GlobalMax, GlobalMin, NumNoisyPixelsGPU, IPH, IPV); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize for findNoisyPixels returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } //cudaEventRecord(time3, 0); cudaStatus = cudaMemcpy(&NumNoisyPixelsCPU, NumNoisyPixelsGPU, sizeof(ui), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of NumNoisyPixels, GPU to CPU failed!"); exit(EXIT_FAILURE); } // only schedule as many threads are needed for NoisyPixelListLength NumBlocksNP = CEIL(NumNoisyPixelsCPU, ThrPerBlk); determineMasks <<< NumBlocksNP, ThrPerBlk >>> (NoisyPixelCoords, GPUCopyImg, NoiseMap, KernelIndices, NumNoisyPixelsCPU, IPH, R); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize for determineMasks returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } PixCopy <<< NumBlocks, ThrPerBlk >>> (GPU_PREV_BW, GPU_CURR_BW, IMAGEPIX); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize for PixCopy returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } int t=0; // progress tracking do{ // reset SAD (sum of absolute pixel differences) cudaStatus = cudaMemset (GPU_SAD, 0, sizeof(ui) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset for GPU_SAD failed!"); exit(EXIT_FAILURE); } Convolute <<< NumBlocksNP, ThrPerBlk >>> (GPU_CURR_BW, GPU_PREV_BW, NoisyPixelCoords, KernelIndices, NumNoisyPixelsCPU, IPH); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\n cudaDeviceSynchronize for Convolute returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } SAD <<< NumBlocksNP, ThrPerBlk >>> (GPU_SAD, GPU_PREV_BW, GPU_CURR_BW, NoisyPixelCoords, NumNoisyPixelsCPU, IPH, IPV); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\n cudaDeviceSynchronize for SAD returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } NoisyPixCopy <<< NumBlocksNP, ThrPerBlk >>> (GPU_PREV_BW, GPU_CURR_BW, NoisyPixelCoords, NumNoisyPixelsCPU, IPH); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\n cudaDeviceSynchronize for NoisyPixCopy returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } // CudaMemcpy the SAD from GPU to CPU here cudaStatus = cudaMemcpy(&CPU_SAD, GPU_SAD, sizeof(ui), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of SAD from GPU to CPU failed!"); exit(EXIT_FAILURE); } t++; } while(CPU_SAD > T); // must convert floating point B&W back to unsigned char formats RGBKernel <<< NumBlocks, ThrPerBlk >>> (GPUImg, GPU_CURR_BW, IPH); GPUResult = GPUImg; cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done //GPUDataTransfer = GPUtotalBufferSize; //Copy output (results) from GPU buffer to host (CPU) memory. // cudaStatus = cudaMemcpy(CopyImg, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy GPU to CPU failed!"); // exit(EXIT_FAILURE); // } //cudaEventRecord(time4, 0); cudaEventSynchronize(time1); cudaEventSynchronize(time2); //cudaEventSynchronize(time3); //cudaEventSynchronize(time4); //cudaEventElapsedTime(&totalTime, time1, time4); //cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2); cudaEventElapsedTime(&kernelExecutionTime, time1, time2); //cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4); cudaStatus = cudaDeviceSynchronize(); //checkError(cudaGetLastError()); // screen for errors in kernel launches if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } //WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk // printf("\n\n--------------------------------------------------------------------------\n"); // printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n", // GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk); // printf("--------------------------------------------------------------------------\n"); // printf("%s %s %s %d %d %u [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, // T, R, ThrPerBlk, NumBlocks, BlkPerRow); // printf("--------------------------------------------------------------------------\n"); // printf("CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrCPUtoGPU)); //printf("Kernel Execution =%7.2f ms\n", kernelExecutionTime);//, DATAMB(GPUDataTransfer), DATABW(GPUDataTransfer, kernelExecutionTime)); ... %4d MB ... %6.2f GB/s // printf("GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU)); // printf("--------------------------------------------------------------------------\n"); // printf("Total time elapsed =%7.2f ms %4d MB ... %6.2f GB/s\n", totalTime, DATAMB((2 * IMAGESIZE + GPUDataTransfer)), DATABW((2 * IMAGESIZE + GPUDataTransfer), totalTime)); //printf("--------------------------------------------------------------------------\n\n"); printf("%d\n", t); // Deallocate CPU, GPU memory and destroy events. cudaFree(GPUptr); cudaEventDestroy(time1); cudaEventDestroy(time2); // cudaEventDestroy(time3); // cudaEventDestroy(time4); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } free(TheImg); free(CopyImg); return(EXIT_SUCCESS); }
d5032c989028aac5ee9c24a8115fae9da508505a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/device_ptr.h> #include <thrust/scan.h> #include "utils.h" #include <dls.h> namespace SCAMP { // This kernel computes a sliding mean with specified window size and a // corresponding prefix sum array (A) __global__ void sliding_mean(double *pref_sum, size_t window, size_t size, double *means) { const double coeff = 1.0 / (double)window; size_t a = blockIdx.x * blockDim.x + threadIdx.x; size_t b = blockIdx.x * blockDim.x + threadIdx.x + window; if (a == 0) { means[a] = pref_sum[window - 1] * coeff; } if (a < size - 1) { means[a + 1] = (pref_sum[b] - pref_sum[a]) * coeff; } } __global__ void sliding_norm(double *cumsumsqr, unsigned int window, unsigned int size, double *norms) { int a = blockIdx.x * blockDim.x + threadIdx.x; int b = blockIdx.x * blockDim.x + threadIdx.x + window; if (a == 0) { norms[a] = 1 / sqrt(cumsumsqr[window - 1]); } else if (b < size + window) { norms[a] = 1 / sqrt(cumsumsqr[b - 1] - cumsumsqr[a - 1]); } } __global__ void sliding_dfdg(const double *T, const double *means, double *df, double *dg, const int m, const int n) { const double half = 1.0 / (double)2.0; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n - 1) { df[tid] = (T[tid + m] - T[tid]) * half; dg[tid] = (T[tid + m] - means[tid + 1]) + (T[tid] - means[tid]); } } __global__ void __launch_bounds__(512, 4) fastinvnorm(double *norm, const double *mean, const double *T, int m, int n) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int jump = ceil(n / (double)(blockDim.x * gridDim.x)); int start = jump * tid; int end = jump * (tid + 1); end = min(end, n); if (start >= n) { return; } double sum = 0; for (int i = 0; i < m; ++i) { double val = T[i + start] - mean[start]; sum += val * val; } norm[start] = sum; for (int i = start + 1; i < end; ++i) { norm[i] = norm[i - 1] + ((T[i - 1] - mean[i - 1]) + (T[i + m - 1] - mean[i])) * (T[i + m - 1] - T[i - 1]); } for (int i = start; i < end; ++i) { norm[i] = 1.0 / sqrt(norm[i]); } } __global__ void cross_correlation_to_ed(float *profile, unsigned int n, unsigned int m) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { profile[tid] = sqrt(max(2 * (1 - profile[tid]), 0.0)) * sqrt((double)m); } } __global__ void merge_mp_idx(float *mp, uint32_t *mpi, uint32_t n, uint64_t *merged) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { mp_entry item; item.floats[0] = (float)mp[tid]; item.ints[1] = mpi[tid]; merged[tid] = item.ulong; } } void elementwise_max_with_index(std::vector<float> &mp_full, std::vector<uint32_t> &mpi_full, int64_t merge_start, int64_t tile_sz, std::vector<uint64_t> *to_merge) { for (int i = 0; i < tile_sz; ++i) { mp_entry curr; curr.ulong = to_merge->at(i); if (mp_full[i + merge_start] < curr.floats[0]) { mp_full[i + merge_start] = curr.floats[0]; mpi_full[i + merge_start] = curr.ints[1]; } } } __global__ void elementwise_max_kernel(uint64_t *mp_full, uint64_t merge_start, uint64_t tile_sz, uint64_t *to_merge, uint64_t index_offset){ int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < tile_sz) { mp_entry e1, e2; e1.ulong = mp_full[tid + merge_start]; e2.ulong = to_merge[tid]; if (e1.floats[0] < e2.floats[0]) { e2.ints[1] += index_offset; mp_full[tid + merge_start] = e2.ulong; } } } void elementwise_max_device( uint64_t *mp_full, uint64_t merge_start, uint64_t tile_sz, uint64_t *to_merge, uint64_t index_offset, hipStream_t s){ dim3 grid(ceil(tile_sz / (double)512), 1, 1); dim3 block(512, 1, 1); hipLaunchKernelGGL(( elementwise_max_kernel), dim3(grid), dim3(block), 0, s, mp_full, merge_start, tile_sz, to_merge, index_offset); gpuErrchk(hipPeekAtLastError()); } extern "C" { // struct parameters_dls { // size_t n; // size_t m; // hipStream_t s; // }; dls_decdef(compute_statistics_HALADAPT,void, const double *T, double *input, double *scratch, size_t n, size_t m, size_t array_size); void compute_statistics_HALADAPT_GPU_CUDA(const double *T, double *input, double *scratch, size_t n, size_t m, size_t array_size) { dls_get_addr((void**) &T, "r"); dls_get_addr((void**) &input, "b"); dls_get_addr((void**) &scratch, "b"); dls_get_arg(&n, sizeof(size_t)); dls_get_arg(&m, sizeof(size_t)); dls_get_arg(&array_size, sizeof(size_t)); // dls_get_arg(&s, sizeof(hipStream_t)); //dls_get_arg(&parameters, sizeof(parameters_dls)); double *norms = input; double *df = &input[array_size]; double *dg = &input[2*array_size]; double *means = &input[3*array_size]; dim3 grid(ceil(n / (double)512), 1, 1); dim3 block(512, 1, 1); // gpuErrchk(hipPeekAtLastError()); // Use prefix sum to compute sliding mean hipLaunchKernelGGL(( sliding_mean), dim3(grid), dim3(block), 0, 0, scratch, m, n, means); // gpuErrchk(hipPeekAtLastError()); // Compute differential values hipLaunchKernelGGL(( sliding_dfdg), dim3(grid), dim3(block), 0, 0, T, means, df, dg, m, n); // gpuErrchk(hipPeekAtLastError()); // This will be kind of slow on the GPU, may cause latency between tiles int workers = n / m + 1; hipLaunchKernelGGL(( fastinvnorm), dim3(dim3(ceil(workers / (double)512), 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, norms, means, T, m, n); // gpuErrchk(hipPeekAtLastError()); } } void compute_statistics(const double *T, double *norms, double *df, double *dg, double *means, size_t n, size_t m, hipStream_t s, double *scratch, size_t array_size, size_t tile_size) { // std::inclusive_scan(T, T + n + m -1, scratch); //TODO make that work, dont know why he cant find the inclusive scan in the headerfile numeric scratch[0] = T[0]; int i; for (i=1 ; i< n + m - 1 ; i++) { scratch[i] = scratch[i-1] + T[i]; } char *dls_modules = dls_get_module_info(); if (!dls_is_in_list(dls_modules, "DLS_AUTOADD")) { printf("Manually register implementations...\n"); dls_add_impl(compute_statistics_HALADAPT, "spm", "compute_statistics_HALADAPT_GPU_CUDA", &compute_statistics_HALADAPT_GPU_CUDA, PM_GPU | PM_CUDA); } double *input = (double*) malloc(sizeof(double) * array_size * 4); dls_register_marea((void*) T, tile_size*sizeof(double), DLS_VT_D); dls_register_marea(input, 4*array_size*sizeof(double), DLS_VT_D); dls_register_marea(scratch, tile_size*sizeof(double), DLS_VT_D); dls_predict_call(compute_statistics_HALADAPT, "rbbvvvp", T, input, scratch, &n, &m, &array_size, 1); dls_start_tgraph(); dls_validate_marea((void*)T); dls_validate_marea((void*)scratch); dls_validate_marea((void*)input); dls_unregister_marea((void*)T); dls_unregister_marea((void*)scratch); dls_unregister_marea((void*)input); memcpy(norms, input, array_size*sizeof(double)); memcpy(df, &input[array_size], array_size*sizeof(double)); memcpy(dg, &input[2 * array_size], array_size*sizeof(double)); memcpy(means, &input[3 * array_size], array_size*sizeof(double)); free(input); } void launch_merge_mp_idx(float *mp, uint32_t *mpi, uint32_t n, uint64_t *merged, hipStream_t s) { hipLaunchKernelGGL(( merge_mp_idx), dim3(dim3(::ceil(n / 1024.0), 1, 1)), dim3(dim3(1024, 1, 1)), 0, s, mp, mpi, n, merged); } } // namespace SCAMP
d5032c989028aac5ee9c24a8115fae9da508505a.cu
#include <thrust/device_ptr.h> #include <thrust/scan.h> #include "utils.h" #include <dls.h> namespace SCAMP { // This kernel computes a sliding mean with specified window size and a // corresponding prefix sum array (A) __global__ void sliding_mean(double *pref_sum, size_t window, size_t size, double *means) { const double coeff = 1.0 / (double)window; size_t a = blockIdx.x * blockDim.x + threadIdx.x; size_t b = blockIdx.x * blockDim.x + threadIdx.x + window; if (a == 0) { means[a] = pref_sum[window - 1] * coeff; } if (a < size - 1) { means[a + 1] = (pref_sum[b] - pref_sum[a]) * coeff; } } __global__ void sliding_norm(double *cumsumsqr, unsigned int window, unsigned int size, double *norms) { int a = blockIdx.x * blockDim.x + threadIdx.x; int b = blockIdx.x * blockDim.x + threadIdx.x + window; if (a == 0) { norms[a] = 1 / sqrt(cumsumsqr[window - 1]); } else if (b < size + window) { norms[a] = 1 / sqrt(cumsumsqr[b - 1] - cumsumsqr[a - 1]); } } __global__ void sliding_dfdg(const double *T, const double *means, double *df, double *dg, const int m, const int n) { const double half = 1.0 / (double)2.0; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n - 1) { df[tid] = (T[tid + m] - T[tid]) * half; dg[tid] = (T[tid + m] - means[tid + 1]) + (T[tid] - means[tid]); } } __global__ void __launch_bounds__(512, 4) fastinvnorm(double *norm, const double *mean, const double *T, int m, int n) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int jump = ceil(n / (double)(blockDim.x * gridDim.x)); int start = jump * tid; int end = jump * (tid + 1); end = min(end, n); if (start >= n) { return; } double sum = 0; for (int i = 0; i < m; ++i) { double val = T[i + start] - mean[start]; sum += val * val; } norm[start] = sum; for (int i = start + 1; i < end; ++i) { norm[i] = norm[i - 1] + ((T[i - 1] - mean[i - 1]) + (T[i + m - 1] - mean[i])) * (T[i + m - 1] - T[i - 1]); } for (int i = start; i < end; ++i) { norm[i] = 1.0 / sqrt(norm[i]); } } __global__ void cross_correlation_to_ed(float *profile, unsigned int n, unsigned int m) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { profile[tid] = sqrt(max(2 * (1 - profile[tid]), 0.0)) * sqrt((double)m); } } __global__ void merge_mp_idx(float *mp, uint32_t *mpi, uint32_t n, uint64_t *merged) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { mp_entry item; item.floats[0] = (float)mp[tid]; item.ints[1] = mpi[tid]; merged[tid] = item.ulong; } } void elementwise_max_with_index(std::vector<float> &mp_full, std::vector<uint32_t> &mpi_full, int64_t merge_start, int64_t tile_sz, std::vector<uint64_t> *to_merge) { for (int i = 0; i < tile_sz; ++i) { mp_entry curr; curr.ulong = to_merge->at(i); if (mp_full[i + merge_start] < curr.floats[0]) { mp_full[i + merge_start] = curr.floats[0]; mpi_full[i + merge_start] = curr.ints[1]; } } } __global__ void elementwise_max_kernel(uint64_t *mp_full, uint64_t merge_start, uint64_t tile_sz, uint64_t *to_merge, uint64_t index_offset){ int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < tile_sz) { mp_entry e1, e2; e1.ulong = mp_full[tid + merge_start]; e2.ulong = to_merge[tid]; if (e1.floats[0] < e2.floats[0]) { e2.ints[1] += index_offset; mp_full[tid + merge_start] = e2.ulong; } } } void elementwise_max_device( uint64_t *mp_full, uint64_t merge_start, uint64_t tile_sz, uint64_t *to_merge, uint64_t index_offset, cudaStream_t s){ dim3 grid(ceil(tile_sz / (double)512), 1, 1); dim3 block(512, 1, 1); elementwise_max_kernel<<<grid, block, 0, s>>>(mp_full, merge_start, tile_sz, to_merge, index_offset); gpuErrchk(cudaPeekAtLastError()); } extern "C" { // struct parameters_dls { // size_t n; // size_t m; // cudaStream_t s; // }; dls_decdef(compute_statistics_HALADAPT,void, const double *T, double *input, double *scratch, size_t n, size_t m, size_t array_size); void compute_statistics_HALADAPT_GPU_CUDA(const double *T, double *input, double *scratch, size_t n, size_t m, size_t array_size) { dls_get_addr((void**) &T, "r"); dls_get_addr((void**) &input, "b"); dls_get_addr((void**) &scratch, "b"); dls_get_arg(&n, sizeof(size_t)); dls_get_arg(&m, sizeof(size_t)); dls_get_arg(&array_size, sizeof(size_t)); // dls_get_arg(&s, sizeof(cudaStream_t)); //dls_get_arg(&parameters, sizeof(parameters_dls)); double *norms = input; double *df = &input[array_size]; double *dg = &input[2*array_size]; double *means = &input[3*array_size]; dim3 grid(ceil(n / (double)512), 1, 1); dim3 block(512, 1, 1); // gpuErrchk(cudaPeekAtLastError()); // Use prefix sum to compute sliding mean sliding_mean<<<grid, block, 0, 0>>>(scratch, m, n, means); // gpuErrchk(cudaPeekAtLastError()); // Compute differential values sliding_dfdg<<<grid, block, 0, 0>>>(T, means, df, dg, m, n); // gpuErrchk(cudaPeekAtLastError()); // This will be kind of slow on the GPU, may cause latency between tiles int workers = n / m + 1; fastinvnorm<<<dim3(ceil(workers / (double)512), 1, 1), dim3(512, 1, 1), 0, 0>>>(norms, means, T, m, n); // gpuErrchk(cudaPeekAtLastError()); } } void compute_statistics(const double *T, double *norms, double *df, double *dg, double *means, size_t n, size_t m, cudaStream_t s, double *scratch, size_t array_size, size_t tile_size) { // std::inclusive_scan(T, T + n + m -1, scratch); //TODO make that work, dont know why he cant find the inclusive scan in the headerfile numeric scratch[0] = T[0]; int i; for (i=1 ; i< n + m - 1 ; i++) { scratch[i] = scratch[i-1] + T[i]; } char *dls_modules = dls_get_module_info(); if (!dls_is_in_list(dls_modules, "DLS_AUTOADD")) { printf("Manually register implementations...\n"); dls_add_impl(compute_statistics_HALADAPT, "spm", "compute_statistics_HALADAPT_GPU_CUDA", &compute_statistics_HALADAPT_GPU_CUDA, PM_GPU | PM_CUDA); } double *input = (double*) malloc(sizeof(double) * array_size * 4); dls_register_marea((void*) T, tile_size*sizeof(double), DLS_VT_D); dls_register_marea(input, 4*array_size*sizeof(double), DLS_VT_D); dls_register_marea(scratch, tile_size*sizeof(double), DLS_VT_D); dls_predict_call(compute_statistics_HALADAPT, "rbbvvvp", T, input, scratch, &n, &m, &array_size, 1); dls_start_tgraph(); dls_validate_marea((void*)T); dls_validate_marea((void*)scratch); dls_validate_marea((void*)input); dls_unregister_marea((void*)T); dls_unregister_marea((void*)scratch); dls_unregister_marea((void*)input); memcpy(norms, input, array_size*sizeof(double)); memcpy(df, &input[array_size], array_size*sizeof(double)); memcpy(dg, &input[2 * array_size], array_size*sizeof(double)); memcpy(means, &input[3 * array_size], array_size*sizeof(double)); free(input); } void launch_merge_mp_idx(float *mp, uint32_t *mpi, uint32_t n, uint64_t *merged, cudaStream_t s) { merge_mp_idx<<<dim3(std::ceil(n / 1024.0), 1, 1), dim3(1024, 1, 1), 0, s>>>( mp, mpi, n, merged); } } // namespace SCAMP
f4419eddfa05194200dca08b797663dead672cd4.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <hip/hip_runtime.h> #include <thrust/block_2d.h> #include <thrust/window_transform.h> // Utilities and system includes #include <helper_functions.h> #include <helper_cuda.h> #include "convolutionSeparable_common.h" #include <thrust/window_2d.h> #include <thrust/window_transform.h> //////////////////////////////////////////////////////////////////////////////// // Reference CPU convolution //////////////////////////////////////////////////////////////////////////////// extern "C" void convolutionRowCPU( float *h_Result, float *h_Data, float *h_Kernel, int imageW, int imageH, int kernelR ); extern "C" void convolutionColumnCPU( float *h_Result, float *h_Data, float *h_Kernel, int imageW, int imageH, int kernelR ); int imageW , imageH; //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; if(argc==2) { printf("Size = "); printf("%d",atoi(argv[1])); imageW = atoi(argv[1]); imageH = atoi(argv[1]); } else { imageW = 3072; imageH = 3072; } const int iterations = 16; StopWatchInterface *hTimer = NULL; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); sdkCreateTimer(&hTimer); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); thrust::block_2d<float> input_block (imageW,imageH); thrust::block_2d<float> output_block (imageW,imageH); input_block.upload(h_Input); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(hipDeviceSynchronize()); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); } thrust::convolve(thrust::hip::shared,&input_block,h_Kernel,h_Kernel,KERNEL_LENGTH,&output_block); } checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&hTimer); double gpuTime = 0.001 * sdkGetTimerValue(&hTimer) / (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); output_block.download(&h_OutputGPU); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results`\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); sdkDeleteTimer(&hTimer); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
f4419eddfa05194200dca08b797663dead672cd4.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <cuda_runtime.h> #include <thrust/block_2d.h> #include <thrust/window_transform.h> // Utilities and system includes #include <helper_functions.h> #include <helper_cuda.h> #include "convolutionSeparable_common.h" #include <thrust/window_2d.h> #include <thrust/window_transform.h> //////////////////////////////////////////////////////////////////////////////// // Reference CPU convolution //////////////////////////////////////////////////////////////////////////////// extern "C" void convolutionRowCPU( float *h_Result, float *h_Data, float *h_Kernel, int imageW, int imageH, int kernelR ); extern "C" void convolutionColumnCPU( float *h_Result, float *h_Data, float *h_Kernel, int imageW, int imageH, int kernelR ); int imageW , imageH; //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; if(argc==2) { printf("Size = "); printf("%d",atoi(argv[1])); imageW = atoi(argv[1]); imageH = atoi(argv[1]); } else { imageW = 3072; imageH = 3072; } const int iterations = 16; StopWatchInterface *hTimer = NULL; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); sdkCreateTimer(&hTimer); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); thrust::block_2d<float> input_block (imageW,imageH); thrust::block_2d<float> output_block (imageW,imageH); input_block.upload(h_Input); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(cudaDeviceSynchronize()); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); } thrust::convolve(thrust::cuda::shared,&input_block,h_Kernel,h_Kernel,KERNEL_LENGTH,&output_block); } checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&hTimer); double gpuTime = 0.001 * sdkGetTimerValue(&hTimer) / (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); output_block.download(&h_OutputGPU); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results`\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); sdkDeleteTimer(&hTimer); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
a58a427dd5d8ed0c0541e66c205719e98591b1d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GpuHoloKernels.cuh" #include <stdio.h> namespace gpu_holo { __host__ void getKernelDims(dim3 &blocks, dim3 &threads, int w, int h) { const int THREADS_X = 32; const int THREADS_Y = 32; const int BLOCKS_X = static_cast<int>(::ceil(w) / static_cast<double>(THREADS_X)); const int BLOCKS_Y = static_cast<int>(::ceil(h) / static_cast<double>(THREADS_Y)); threads = dim3(THREADS_X, THREADS_Y); blocks = dim3(BLOCKS_X, BLOCKS_Y); } __global__ void absSqKernel( const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x <= re.cols - 1 && y <= re.rows - 1 && y >= 0 && x >= 0) { double val_re = re(x, y); double val_im = im(x, y); output(x, y) = val_re * val_re + val_im * val_im; } } __host__ void callAbsSqKernel( const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { dim3 blocks, threads; getKernelDims(blocks, threads, re.cols, re.rows); hipLaunchKernelGGL(( absSqKernel) , dim3(blocks), dim3(threads) , 0, 0, re, im, output); } __global__ void argKernel(const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x <= re.cols - 1 && y <= re.rows - 1 && y >= 0 && x >= 0) { double val_re = re(x, y); double val_im = im(x, y); output(x, y) = atan2(val_im, val_re); } } __host__ void callArgKernel(const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { dim3 blocks, threads; getKernelDims(blocks, threads, re.cols, re.rows); hipLaunchKernelGGL(( argKernel) , dim3(blocks), dim3(threads) , 0, 0, re, im, output); } __global__ void logAbsKernel(const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x <= re.cols - 1 && y <= re.rows - 1 && y >= 0 && x >= 0) { double val_re = re(x, y); double val_im = im(x, y); output(x, y) = log(1.0 + val_re * val_re + val_im * val_im); } } __host__ void calllogAbsKernel(const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { dim3 blocks, threads; getKernelDims(blocks, threads, re.cols, re.rows); hipLaunchKernelGGL(( logAbsKernel) , dim3(blocks), dim3(threads) , 0, 0, re, im, output); } }
a58a427dd5d8ed0c0541e66c205719e98591b1d5.cu
#include "GpuHoloKernels.cuh" #include <stdio.h> namespace gpu_holo { __host__ void getKernelDims(dim3 &blocks, dim3 &threads, int w, int h) { const int THREADS_X = 32; const int THREADS_Y = 32; const int BLOCKS_X = static_cast<int>(std::ceil(w) / static_cast<double>(THREADS_X)); const int BLOCKS_Y = static_cast<int>(std::ceil(h) / static_cast<double>(THREADS_Y)); threads = dim3(THREADS_X, THREADS_Y); blocks = dim3(BLOCKS_X, BLOCKS_Y); } __global__ void absSqKernel( const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x <= re.cols - 1 && y <= re.rows - 1 && y >= 0 && x >= 0) { double val_re = re(x, y); double val_im = im(x, y); output(x, y) = val_re * val_re + val_im * val_im; } } __host__ void callAbsSqKernel( const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { dim3 blocks, threads; getKernelDims(blocks, threads, re.cols, re.rows); absSqKernel <<< blocks, threads >>> (re, im, output); } __global__ void argKernel(const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x <= re.cols - 1 && y <= re.rows - 1 && y >= 0 && x >= 0) { double val_re = re(x, y); double val_im = im(x, y); output(x, y) = atan2(val_im, val_re); } } __host__ void callArgKernel(const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { dim3 blocks, threads; getKernelDims(blocks, threads, re.cols, re.rows); argKernel <<< blocks, threads >>> (re, im, output); } __global__ void logAbsKernel(const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x <= re.cols - 1 && y <= re.rows - 1 && y >= 0 && x >= 0) { double val_re = re(x, y); double val_im = im(x, y); output(x, y) = log(1.0 + val_re * val_re + val_im * val_im); } } __host__ void calllogAbsKernel(const cv::cuda::PtrStepSz<double> re, const cv::cuda::PtrStepSz<double> im, cv::cuda::PtrStepSz<double> output) { dim3 blocks, threads; getKernelDims(blocks, threads, re.cols, re.rows); logAbsKernel <<< blocks, threads >>> (re, im, output); } }
7d81c7fa6b9faef117bdebe96a87864d3c8a6b7c.hip
// !!! This is a file automatically generated by hipify!!! #include "common_hip.cuh" // The vertex buffer and index buffer of the geometry to test against. rtBuffer<VertexData> vertex_buffer; rtBuffer<uint3> index_buffer; // The current ray. rtDeclareVariable(optix::Ray, ray, rtCurrentRay, ); // Output attributes that we will define/write upon an intersection. rtDeclareVariable(optix::float3, attr_geo_normal, attribute GEO_NORMAL, ); rtDeclareVariable(optix::float3, attr_tangent, attribute TANGENT , ); rtDeclareVariable(optix::float3, attr_normal, attribute NORMAL , ); rtDeclareVariable(optix::float3, attr_uv, attribute TEX_UV , ); // Checks for intersection against geometry made of indexed triangle data. RT_PROGRAM void intersection_triangle_indexed(int primitive_index) { // Triangle vertices. const uint3 indices = index_buffer[primitive_index]; VertexData const& v0 = vertex_buffer[indices.x]; VertexData const& v1 = vertex_buffer[indices.y]; VertexData const& v2 = vertex_buffer[indices.z]; // Ray-triangle intersection test. float3 n; float t; float beta; float gamma; if (intersect_triangle(ray, v0.position, v1.position, v2.position, n, t, beta, gamma)) { // NOTE: intersect_triangle() is defined in optixu_math_namespace.h. See: https://docs.nvidia.com/gameworks/content/gameworkslibrary/optix/optixapireference/optixu__math__namespace_8h.html if (rtPotentialIntersection(t)) { // Barycentric interpolation const float alpha = 1.0f - beta - gamma; // NOTE: We will normalize the results in the hit shaders. attr_geo_normal = n; attr_tangent = v0.tangent * alpha + v1.tangent * beta + v2.tangent * gamma; attr_normal = v0.normal * alpha + v1.normal * beta + v2.normal * gamma; attr_uv = v0.uv * alpha + v1.uv * beta + v2.uv * gamma; // Report intersection for material 0. rtReportIntersection(0); } } }
7d81c7fa6b9faef117bdebe96a87864d3c8a6b7c.cu
#include "common.cuh" // The vertex buffer and index buffer of the geometry to test against. rtBuffer<VertexData> vertex_buffer; rtBuffer<uint3> index_buffer; // The current ray. rtDeclareVariable(optix::Ray, ray, rtCurrentRay, ); // Output attributes that we will define/write upon an intersection. rtDeclareVariable(optix::float3, attr_geo_normal, attribute GEO_NORMAL, ); rtDeclareVariable(optix::float3, attr_tangent, attribute TANGENT , ); rtDeclareVariable(optix::float3, attr_normal, attribute NORMAL , ); rtDeclareVariable(optix::float3, attr_uv, attribute TEX_UV , ); // Checks for intersection against geometry made of indexed triangle data. RT_PROGRAM void intersection_triangle_indexed(int primitive_index) { // Triangle vertices. const uint3 indices = index_buffer[primitive_index]; VertexData const& v0 = vertex_buffer[indices.x]; VertexData const& v1 = vertex_buffer[indices.y]; VertexData const& v2 = vertex_buffer[indices.z]; // Ray-triangle intersection test. float3 n; float t; float beta; float gamma; if (intersect_triangle(ray, v0.position, v1.position, v2.position, n, t, beta, gamma)) { // NOTE: intersect_triangle() is defined in optixu_math_namespace.h. See: https://docs.nvidia.com/gameworks/content/gameworkslibrary/optix/optixapireference/optixu__math__namespace_8h.html if (rtPotentialIntersection(t)) { // Barycentric interpolation const float alpha = 1.0f - beta - gamma; // NOTE: We will normalize the results in the hit shaders. attr_geo_normal = n; attr_tangent = v0.tangent * alpha + v1.tangent * beta + v2.tangent * gamma; attr_normal = v0.normal * alpha + v1.normal * beta + v2.normal * gamma; attr_uv = v0.uv * alpha + v1.uv * beta + v2.uv * gamma; // Report intersection for material 0. rtReportIntersection(0); } } }
3aa8118b7b30987129f1141bb44aaa2dcf21cebe.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { /// @brief refer to CPU forward -- the BLAS implementation is the same. template <typename Dtype> void ConvolutionSparseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = (*top)[i]->mutable_gpu_data(); Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* c0_data = c0_buffer_.mutable_gpu_data(); Dtype* c1_data = c1_buffer_.mutable_gpu_data(); const Dtype* w0_data = this->blobs_[0]->gpu_data(); const Dtype* w1_data = this->blobs_[1]->gpu_data(); const Dtype* w2_data = this->blobs_[2]->gpu_data(); int bottom_offset = height_ * width_ * channels_ / group_; int c0_offset = height_ * width_ * channels_ / group_; int w0_offset = channels_ * channels_ / group_ / group_; // int col_offset = K_ * N_; int c1_offset = K_ * N_; int w2_offset = M_ * K_; int top_offset = M_ * N_; UpdatePtrs(); for (int n = 0; n < num_; ++n) { // PCA on channels for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ / group_, height_ * width_, channels_ / group_, (Dtype)1., w0_data + g * w0_offset, bottom_data + bottom[i]->offset(n) + g * bottom_offset, (Dtype)0., c0_data + g * c0_offset); } // im2col im2col_gpu(c0_data, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // multiply with w1, save to c1 caffe_gpu_gemm_batch<Dtype>(CblasNoTrans, CblasNoTrans, kernel_h_ * kernel_w_, N_, kernel_h_ * kernel_w_, (Dtype)1., (const Dtype**)w1_data_ptrs_gpu, (const Dtype**)col_data_ptrs_gpu, (Dtype)0., c1_data_ptrs_gpu, channels_); for (int g = 0; g < group_; ++g) { // multiply with w2, save to top caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., w2_data + w2_offset * g, c1_data + c1_offset * g, (Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g); } // add bias bias_multiplier_.gpu_data(); if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype)1., this->blobs_[3]->gpu_data(), bias_multiplier_.gpu_data(), (Dtype)1., top_data + (*top)[i]->offset(n)); } } } } /// @brief refer to CPU backward -- the BLAS implementation is the same. template <typename Dtype> void ConvolutionSparseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { Dtype* w0_data = this->blobs_[0]->mutable_gpu_data(); Dtype* w0_diff = this->blobs_[0]->mutable_gpu_diff(); Dtype* w1_data = this->blobs_[1]->mutable_gpu_data(); Dtype* w1_diff = this->blobs_[1]->mutable_gpu_diff(); Dtype* w2_data = this->blobs_[2]->mutable_gpu_data(); Dtype* w2_diff = this->blobs_[2]->mutable_gpu_diff(); Dtype* bias_diff = this->blobs_[3]->mutable_gpu_diff(); // zero accumulated gradients caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), w0_diff); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), w1_diff); caffe_gpu_set(this->blobs_[2]->count(), Dtype(0), w2_diff); if (bias_term_) { caffe_gpu_set(this->blobs_[3]->count(), Dtype(0), bias_diff); } int bottom_offset = height_ * width_ * channels_ / group_; int c0_offset = height_ * width_ * channels_ / group_; int w0_offset = channels_ * channels_ / group_ / group_; int c1_offset = K_ * N_; int w2_offset = M_ * K_; int top_offset = M_ * N_; for (int i = 0; i < top.size(); ++i) { Dtype* bottom_data = (*bottom)[i]->mutable_gpu_data(); Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff(); Dtype* top_data = top[i]->mutable_gpu_data(); Dtype* top_diff = top[i]->mutable_gpu_diff(); Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* col_diff = col_buffer_.mutable_gpu_diff(); Dtype* c0_data = c0_buffer_.mutable_gpu_data(); Dtype* c0_diff = c0_buffer_.mutable_gpu_diff(); Dtype* c1_data = c1_buffer_.mutable_gpu_data(); Dtype* c1_diff = c1_buffer_.mutable_gpu_diff(); UpdatePtrs(); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. /****************** Forward Pass *******************/ // PCA on channels for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ / group_, height_ * width_, channels_ / group_, (Dtype)1., w0_data + g * w0_offset, bottom_data + (*bottom)[i]->offset(n) + g * bottom_offset, (Dtype)0., c0_data + g * c0_offset); } // im2col im2col_gpu(c0_data, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // multiply with w1, save to c1 caffe_gpu_gemm_batch<Dtype>(CblasNoTrans, CblasNoTrans, kernel_h_ * kernel_w_, N_, kernel_h_ * kernel_w_, (Dtype)1., (const Dtype**)w1_data_ptrs_gpu, (const Dtype**)col_data_ptrs_gpu, (Dtype)0., c1_data_ptrs_gpu, channels_); for (int g = 0; g < group_; ++g) { // multiply with w2, save to top caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., w2_data + w2_offset * g, c1_data + c1_offset * g, (Dtype)0., top_data + top[i]->offset(n) + top_offset * g); } // add bias if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype)1., this->blobs_[3]->gpu_data(), bias_multiplier_.gpu_data(), (Dtype)1., top_data + top[i]->offset(n)); } /****************** Backward Pass *******************/ // bias if (bias_term_) { caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N_, (Dtype)1., top_diff + top[i]->offset(n), bias_multiplier_.gpu_data(), (Dtype)1., bias_diff); } for (int g = 0; g < group_; ++g) { // w2_diff caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff + top[i]->offset(n) + top_offset * g, c1_data + c1_offset * g, (Dtype)1., w2_diff + w2_offset * g); // c1_diff caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., w2_data + w2_offset * g, top_diff + top[i]->offset(n) + top_offset * g, (Dtype)0., c1_diff + c1_offset * g); } // w1_diff caffe_gpu_gemm_batch<Dtype>(CblasNoTrans, CblasTrans, kernel_h_ * kernel_w_, kernel_h_ * kernel_w_, N_, (Dtype)1., (const Dtype**)c1_diff_ptrs_gpu, (const Dtype**)col_data_ptrs_gpu, (Dtype)1., w1_diff_ptrs_gpu, channels_); // col_diff caffe_gpu_gemm_batch<Dtype>(CblasTrans, CblasNoTrans, kernel_h_ * kernel_w_, N_, kernel_h_ * kernel_w_, (Dtype)1., (const Dtype**)w1_data_ptrs_gpu, (const Dtype**)c1_diff_ptrs_gpu, (Dtype)0., col_diff_ptrs_gpu, channels_); // c0_diff col2im_gpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, c0_diff); // w0 for (int g = 0; g < group_; ++g) { // w0_diff caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, channels_ / group_, channels_ / group_, height_ * width_, (Dtype)1., c0_diff + g * c0_offset, bottom_data + (*bottom)[i]->offset(n) + g * bottom_offset, (Dtype)1., w0_diff + g * w0_offset); // bottom_diff caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, channels_ / group_, height_ * width_, channels_ / group_, (Dtype)1., w0_data + g * w0_offset, c0_diff + g * c0_offset, (Dtype)0., bottom_diff + (*bottom)[i]->offset(n) + g * bottom_offset); } } } } template <typename Dtype> void ConvolutionSparseLayer<Dtype>::UpdatePtrs() { Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* col_diff = col_buffer_.mutable_gpu_diff(); Dtype* w1_data = this->blobs_[1]->mutable_gpu_data(); Dtype* w1_diff = this->blobs_[1]->mutable_gpu_diff(); Dtype* c1_data = c1_buffer_.mutable_gpu_data(); Dtype* c1_diff = c1_buffer_.mutable_gpu_diff(); // Setup gpu pointers for gemm batch mode Dtype** col_data_ptrs_cpu = new Dtype*[channels_]; Dtype** col_diff_ptrs_cpu = new Dtype*[channels_]; Dtype** w1_data_ptrs_cpu = new Dtype*[channels_]; Dtype** w1_diff_ptrs_cpu = new Dtype*[channels_]; Dtype** c1_data_ptrs_cpu = new Dtype*[channels_]; Dtype** c1_diff_ptrs_cpu = new Dtype*[channels_]; for(int i=0; i<channels_; i++) { col_data_ptrs_cpu[i] = col_data + kernel_h_ * kernel_w_ * N_ * i; col_diff_ptrs_cpu[i] = col_diff + kernel_h_ * kernel_w_ * N_*i; w1_data_ptrs_cpu[i] = w1_data + i * kernel_h_ * kernel_w_ * kernel_h_ * kernel_w_; w1_diff_ptrs_cpu[i] = w1_diff + i * kernel_h_ * kernel_w_ * kernel_h_ * kernel_w_; c1_data_ptrs_cpu[i] = c1_data + kernel_h_ * kernel_w_ * N_ * i; c1_diff_ptrs_cpu[i] = c1_diff + kernel_h_ * kernel_w_ * N_ * i; } hipMemcpy(col_data_ptrs_gpu, col_data_ptrs_cpu, channels_ * sizeof(Dtype*), hipMemcpyHostToDevice); hipMemcpy(col_diff_ptrs_gpu, col_diff_ptrs_cpu, channels_ * sizeof(Dtype*), hipMemcpyHostToDevice); hipMemcpy(w1_data_ptrs_gpu, w1_data_ptrs_cpu, channels_ * sizeof(Dtype*), hipMemcpyHostToDevice); hipMemcpy(w1_diff_ptrs_gpu, w1_diff_ptrs_cpu, channels_ * sizeof(Dtype*), hipMemcpyHostToDevice); hipMemcpy(c1_data_ptrs_gpu, c1_data_ptrs_cpu, channels_ * sizeof(Dtype*), hipMemcpyHostToDevice); hipMemcpy(c1_diff_ptrs_gpu, c1_diff_ptrs_cpu, channels_ * sizeof(Dtype*), hipMemcpyHostToDevice); delete[] col_data_ptrs_cpu; delete[] col_diff_ptrs_cpu; delete[] w1_data_ptrs_cpu; delete[] w1_diff_ptrs_cpu; delete[] c1_data_ptrs_cpu; delete[] c1_diff_ptrs_cpu; } INSTANTIATE_CLASS(ConvolutionSparseLayer); } // namespace caffe /* template <typename Dtype> void ConvolutionSparseLayer<Dtype>::Backward_gpu_org(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } const int weight_offset = M_ * K_; const int col_offset = K_ * N_; const int top_offset = M_ * N_; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = NULL; // Bias gradient, if necessary. if (bias_term_ && this->param_propagate_down_[1]) { top_diff = top[i]->gpu_diff(); for (int n = 0; n < num_; ++n) { caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N_, 1., top_diff + top[0]->offset(n), bias_multiplier_.gpu_data(), 1., bias_diff); } } if (this->param_propagate_down_[0] || propagate_down[i]) { if (!top_diff) { top_diff = top[i]->gpu_diff(); } Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* col_diff = col_buffer_.mutable_gpu_diff(); const Dtype* bottom_data = (*bottom)[i]->gpu_data(); Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff(); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. im2col_gpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff + top[i]->offset(n) + top_offset * g, col_data + col_offset * g, (Dtype)1., weight_diff + weight_offset * g); } } // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., weight + weight_offset * g, top_diff + top[i]->offset(n) + top_offset * g, (Dtype)0., col_diff + col_offset * g); } // col2im back to the data col2im_gpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, bottom_diff + (*bottom)[i]->offset(n)); } } } } } */
3aa8118b7b30987129f1141bb44aaa2dcf21cebe.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { /// @brief refer to CPU forward -- the BLAS implementation is the same. template <typename Dtype> void ConvolutionSparseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = (*top)[i]->mutable_gpu_data(); Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* c0_data = c0_buffer_.mutable_gpu_data(); Dtype* c1_data = c1_buffer_.mutable_gpu_data(); const Dtype* w0_data = this->blobs_[0]->gpu_data(); const Dtype* w1_data = this->blobs_[1]->gpu_data(); const Dtype* w2_data = this->blobs_[2]->gpu_data(); int bottom_offset = height_ * width_ * channels_ / group_; int c0_offset = height_ * width_ * channels_ / group_; int w0_offset = channels_ * channels_ / group_ / group_; // int col_offset = K_ * N_; int c1_offset = K_ * N_; int w2_offset = M_ * K_; int top_offset = M_ * N_; UpdatePtrs(); for (int n = 0; n < num_; ++n) { // PCA on channels for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ / group_, height_ * width_, channels_ / group_, (Dtype)1., w0_data + g * w0_offset, bottom_data + bottom[i]->offset(n) + g * bottom_offset, (Dtype)0., c0_data + g * c0_offset); } // im2col im2col_gpu(c0_data, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // multiply with w1, save to c1 caffe_gpu_gemm_batch<Dtype>(CblasNoTrans, CblasNoTrans, kernel_h_ * kernel_w_, N_, kernel_h_ * kernel_w_, (Dtype)1., (const Dtype**)w1_data_ptrs_gpu, (const Dtype**)col_data_ptrs_gpu, (Dtype)0., c1_data_ptrs_gpu, channels_); for (int g = 0; g < group_; ++g) { // multiply with w2, save to top caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., w2_data + w2_offset * g, c1_data + c1_offset * g, (Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g); } // add bias bias_multiplier_.gpu_data(); if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype)1., this->blobs_[3]->gpu_data(), bias_multiplier_.gpu_data(), (Dtype)1., top_data + (*top)[i]->offset(n)); } } } } /// @brief refer to CPU backward -- the BLAS implementation is the same. template <typename Dtype> void ConvolutionSparseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { Dtype* w0_data = this->blobs_[0]->mutable_gpu_data(); Dtype* w0_diff = this->blobs_[0]->mutable_gpu_diff(); Dtype* w1_data = this->blobs_[1]->mutable_gpu_data(); Dtype* w1_diff = this->blobs_[1]->mutable_gpu_diff(); Dtype* w2_data = this->blobs_[2]->mutable_gpu_data(); Dtype* w2_diff = this->blobs_[2]->mutable_gpu_diff(); Dtype* bias_diff = this->blobs_[3]->mutable_gpu_diff(); // zero accumulated gradients caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), w0_diff); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), w1_diff); caffe_gpu_set(this->blobs_[2]->count(), Dtype(0), w2_diff); if (bias_term_) { caffe_gpu_set(this->blobs_[3]->count(), Dtype(0), bias_diff); } int bottom_offset = height_ * width_ * channels_ / group_; int c0_offset = height_ * width_ * channels_ / group_; int w0_offset = channels_ * channels_ / group_ / group_; int c1_offset = K_ * N_; int w2_offset = M_ * K_; int top_offset = M_ * N_; for (int i = 0; i < top.size(); ++i) { Dtype* bottom_data = (*bottom)[i]->mutable_gpu_data(); Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff(); Dtype* top_data = top[i]->mutable_gpu_data(); Dtype* top_diff = top[i]->mutable_gpu_diff(); Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* col_diff = col_buffer_.mutable_gpu_diff(); Dtype* c0_data = c0_buffer_.mutable_gpu_data(); Dtype* c0_diff = c0_buffer_.mutable_gpu_diff(); Dtype* c1_data = c1_buffer_.mutable_gpu_data(); Dtype* c1_diff = c1_buffer_.mutable_gpu_diff(); UpdatePtrs(); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. /****************** Forward Pass *******************/ // PCA on channels for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ / group_, height_ * width_, channels_ / group_, (Dtype)1., w0_data + g * w0_offset, bottom_data + (*bottom)[i]->offset(n) + g * bottom_offset, (Dtype)0., c0_data + g * c0_offset); } // im2col im2col_gpu(c0_data, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // multiply with w1, save to c1 caffe_gpu_gemm_batch<Dtype>(CblasNoTrans, CblasNoTrans, kernel_h_ * kernel_w_, N_, kernel_h_ * kernel_w_, (Dtype)1., (const Dtype**)w1_data_ptrs_gpu, (const Dtype**)col_data_ptrs_gpu, (Dtype)0., c1_data_ptrs_gpu, channels_); for (int g = 0; g < group_; ++g) { // multiply with w2, save to top caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., w2_data + w2_offset * g, c1_data + c1_offset * g, (Dtype)0., top_data + top[i]->offset(n) + top_offset * g); } // add bias if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype)1., this->blobs_[3]->gpu_data(), bias_multiplier_.gpu_data(), (Dtype)1., top_data + top[i]->offset(n)); } /****************** Backward Pass *******************/ // bias if (bias_term_) { caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N_, (Dtype)1., top_diff + top[i]->offset(n), bias_multiplier_.gpu_data(), (Dtype)1., bias_diff); } for (int g = 0; g < group_; ++g) { // w2_diff caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff + top[i]->offset(n) + top_offset * g, c1_data + c1_offset * g, (Dtype)1., w2_diff + w2_offset * g); // c1_diff caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., w2_data + w2_offset * g, top_diff + top[i]->offset(n) + top_offset * g, (Dtype)0., c1_diff + c1_offset * g); } // w1_diff caffe_gpu_gemm_batch<Dtype>(CblasNoTrans, CblasTrans, kernel_h_ * kernel_w_, kernel_h_ * kernel_w_, N_, (Dtype)1., (const Dtype**)c1_diff_ptrs_gpu, (const Dtype**)col_data_ptrs_gpu, (Dtype)1., w1_diff_ptrs_gpu, channels_); // col_diff caffe_gpu_gemm_batch<Dtype>(CblasTrans, CblasNoTrans, kernel_h_ * kernel_w_, N_, kernel_h_ * kernel_w_, (Dtype)1., (const Dtype**)w1_data_ptrs_gpu, (const Dtype**)c1_diff_ptrs_gpu, (Dtype)0., col_diff_ptrs_gpu, channels_); // c0_diff col2im_gpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, c0_diff); // w0 for (int g = 0; g < group_; ++g) { // w0_diff caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, channels_ / group_, channels_ / group_, height_ * width_, (Dtype)1., c0_diff + g * c0_offset, bottom_data + (*bottom)[i]->offset(n) + g * bottom_offset, (Dtype)1., w0_diff + g * w0_offset); // bottom_diff caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, channels_ / group_, height_ * width_, channels_ / group_, (Dtype)1., w0_data + g * w0_offset, c0_diff + g * c0_offset, (Dtype)0., bottom_diff + (*bottom)[i]->offset(n) + g * bottom_offset); } } } } template <typename Dtype> void ConvolutionSparseLayer<Dtype>::UpdatePtrs() { Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* col_diff = col_buffer_.mutable_gpu_diff(); Dtype* w1_data = this->blobs_[1]->mutable_gpu_data(); Dtype* w1_diff = this->blobs_[1]->mutable_gpu_diff(); Dtype* c1_data = c1_buffer_.mutable_gpu_data(); Dtype* c1_diff = c1_buffer_.mutable_gpu_diff(); // Setup gpu pointers for gemm batch mode Dtype** col_data_ptrs_cpu = new Dtype*[channels_]; Dtype** col_diff_ptrs_cpu = new Dtype*[channels_]; Dtype** w1_data_ptrs_cpu = new Dtype*[channels_]; Dtype** w1_diff_ptrs_cpu = new Dtype*[channels_]; Dtype** c1_data_ptrs_cpu = new Dtype*[channels_]; Dtype** c1_diff_ptrs_cpu = new Dtype*[channels_]; for(int i=0; i<channels_; i++) { col_data_ptrs_cpu[i] = col_data + kernel_h_ * kernel_w_ * N_ * i; col_diff_ptrs_cpu[i] = col_diff + kernel_h_ * kernel_w_ * N_*i; w1_data_ptrs_cpu[i] = w1_data + i * kernel_h_ * kernel_w_ * kernel_h_ * kernel_w_; w1_diff_ptrs_cpu[i] = w1_diff + i * kernel_h_ * kernel_w_ * kernel_h_ * kernel_w_; c1_data_ptrs_cpu[i] = c1_data + kernel_h_ * kernel_w_ * N_ * i; c1_diff_ptrs_cpu[i] = c1_diff + kernel_h_ * kernel_w_ * N_ * i; } cudaMemcpy(col_data_ptrs_gpu, col_data_ptrs_cpu, channels_ * sizeof(Dtype*), cudaMemcpyHostToDevice); cudaMemcpy(col_diff_ptrs_gpu, col_diff_ptrs_cpu, channels_ * sizeof(Dtype*), cudaMemcpyHostToDevice); cudaMemcpy(w1_data_ptrs_gpu, w1_data_ptrs_cpu, channels_ * sizeof(Dtype*), cudaMemcpyHostToDevice); cudaMemcpy(w1_diff_ptrs_gpu, w1_diff_ptrs_cpu, channels_ * sizeof(Dtype*), cudaMemcpyHostToDevice); cudaMemcpy(c1_data_ptrs_gpu, c1_data_ptrs_cpu, channels_ * sizeof(Dtype*), cudaMemcpyHostToDevice); cudaMemcpy(c1_diff_ptrs_gpu, c1_diff_ptrs_cpu, channels_ * sizeof(Dtype*), cudaMemcpyHostToDevice); delete[] col_data_ptrs_cpu; delete[] col_diff_ptrs_cpu; delete[] w1_data_ptrs_cpu; delete[] w1_diff_ptrs_cpu; delete[] c1_data_ptrs_cpu; delete[] c1_diff_ptrs_cpu; } INSTANTIATE_CLASS(ConvolutionSparseLayer); } // namespace caffe /* template <typename Dtype> void ConvolutionSparseLayer<Dtype>::Backward_gpu_org(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } const int weight_offset = M_ * K_; const int col_offset = K_ * N_; const int top_offset = M_ * N_; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = NULL; // Bias gradient, if necessary. if (bias_term_ && this->param_propagate_down_[1]) { top_diff = top[i]->gpu_diff(); for (int n = 0; n < num_; ++n) { caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N_, 1., top_diff + top[0]->offset(n), bias_multiplier_.gpu_data(), 1., bias_diff); } } if (this->param_propagate_down_[0] || propagate_down[i]) { if (!top_diff) { top_diff = top[i]->gpu_diff(); } Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* col_diff = col_buffer_.mutable_gpu_diff(); const Dtype* bottom_data = (*bottom)[i]->gpu_data(); Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff(); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. im2col_gpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff + top[i]->offset(n) + top_offset * g, col_data + col_offset * g, (Dtype)1., weight_diff + weight_offset * g); } } // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., weight + weight_offset * g, top_diff + top[i]->offset(n) + top_offset * g, (Dtype)0., col_diff + col_offset * g); } // col2im back to the data col2im_gpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, bottom_diff + (*bottom)[i]->offset(n)); } } } } } */
b5b76837b95ac199b60dcab3eb0dc32e1296d6e3.hip
// !!! This is a file automatically generated by hipify!!! // // cuxTimer_host.cu // // Host-side functions for timing control. It offers two functions: // cuxTimerReset() - Resets the timer internal data structures. // This is only safe when there are no timer-using kernels // running on the machine. // // Resetting everything from the host enables a moderate // performance increase on the device side, so is worth // doing if possible. // // cuxTimerDisplay() - Outputs the timing data to the specified // FILE pointer (or screen if none). Display can be in CSV // format or pre-formatted for the screen. You may also // optionally disable headers and column names, in the event // that you call cuxTimerDisplay() repeatedly to accumulate // stats during the program run. // // Note that cuxTimerDisplay does NOT reset the timing data // automatically - call cuxTimerReset for that. // // This file automatically includes cuxTimer.h // #ifndef CUXTIMER_HOST_CU #define CUXTIMER_HOST_CU #ifndef CUXTIMER_DISABLE #include "cuxTimer.h" #include "cuxTimer.cu" #include "cuxTimer_private.h" // Initialises things and clears all counters from the host side static __host__ void cudaxTimerReset() { #ifdef TIMERDEBUG // If debugging, we need to init cuPrintf cudaPrintfInit(); #endif // Clear the data buffer on the device void *globalptr; hipGetSymbolAddress(&globalptr, _cuxtimer_globals); hipMemset(globalptr, 0, sizeof(_cuxtimer_globals)); // Set up the constant info for the timer _timerConfig pc; pc.init = __CUXTIMERINIT_INITIALISED; hipMemcpyToSymbol(_cuxtimer_constants, &pc, sizeof(pc)); } // // cudaxTimerDisplay // // Dumps out the timing data. It's pretty simple - we just dump it for all IDs // which were actually called. // // We output to "fp". If "csv" is true, we output in CSV format. // If "show_headings" is true, we add column headings and other formatting. // static __host__ void cudaxTimerDisplay(FILE *fp, int csv, int show_headings) { #ifdef TIMERDEBUG // Dump printfs if we're debugging cudaPrintfDisplay(stdout, true); #endif static _timerGlobals timerHostGlobals; // Static so it doesn't live on the stack _timerData &timerLocalData = timerHostGlobals.funcdata; _timerStrings &timerHostStrings = timerHostGlobals.strings; memset(&timerHostGlobals, 0, sizeof(timerHostGlobals)); // Clear our local buffer unsigned int tot_count=0; unsigned long long tot_cycles=0, tot_max=0, tot_avg=0; // Create local copy of the timing info and config hipMemcpyFromSymbol(&timerHostGlobals, _cuxtimer_globals, sizeof(timerHostGlobals)); // The config comes from either the constant bank (if host-side reset // was called) or from device memory. Determine which. _timerConfig pc; hipMemcpyFromSymbol(&pc, _cuxtimer_constants, sizeof(pc)); if(pc.init == __CUXTIMERINIT_NOTINITIALISED) hipMemcpyFromSymbol(&pc, _cuxtimer_config, sizeof(pc)); // Verify that things were initialised if(pc.init != __CUXTIMERINIT_INITIALISED) { fprintf(stderr, "cuxTimer data: %s\n", (pc.init == __CUXTIMERINIT_NOTINITIALISED) ? "NOT INITIALISED" : "DEVICE-INIT FAILED"); return; } // Write out the headings if(show_headings) { if(!csv) { fprintf(fp, "Timing stats:\n"); fprintf(fp, "%20s: %8s %12s %12s %12s", "Name", "Count", "Tot Cycles", "Max Cycles", "Avg Cycles", "Description"); } else { fprintf(fp, "Name,Count,Tot Cycles,Max Cycles,Avg Cycles,Description"); } fprintf(fp, "\n"); } // Loop through all IDs, printing only those which have data for(int id=0; id<__CUXTIMER_IDS; id++) { if(timerLocalData.count[id] > 0) { // Print name - if it exists - or ID number if not. if(_timer_isNameSet(timerHostStrings.nameMask, id)) { if(timerHostStrings.idNames[id][0] == '\0') fprintf(fp, "Bad name for id %d", id); else fprintf(fp, (!csv) ? "%20s" : "\"%s\"", timerHostStrings.idNames[id]); } else fprintf(fp, (!csv) ? "%20d" : "%d", id); // Print timing data if(!csv) fprintf(fp, ": %8d %12llu %12llu %12llu", timerLocalData.count[id], timerLocalData.idtime[id], timerLocalData.maximum[id], timerLocalData.idtime[id]/timerLocalData.count[id]); else fprintf(fp, ",%d,%llu,%llu,%llu", timerLocalData.count[id], timerLocalData.idtime[id], timerLocalData.maximum[id], timerLocalData.idtime[id]/timerLocalData.count[id]); // Print the description, if any if(_timer_isNameSet(timerHostStrings.descrMask, id)) { if(timerHostStrings.idDescrs[id][0] == '\0') fprintf(fp, "Bad description for id %d", id); else fprintf(fp, (!csv) ? " %s" : ",\"%s\"", timerHostStrings.idDescrs[id]); } else if(csv) fprintf(fp, ",none"); fprintf(fp, "\n"); tot_count += timerLocalData.count[id]; tot_cycles += timerLocalData.idtime[id]; tot_max += timerLocalData.maximum[id]; tot_avg += timerLocalData.idtime[id]/timerLocalData.count[id]; } } // The "total" footer only comes out with headings turned on as well. if(show_headings) { if(!csv) { fprintf(fp, "---------------------------------------------------------------------\n"); fprintf(fp, "%20s: %8d %12llu %12llu %12llu\n", "TOTAL", tot_count, tot_cycles, tot_max, tot_avg); } else { fprintf(fp, "%s,%d,%llu,%llu,%llu,n/a", "TOTAL", tot_count, tot_cycles, tot_max, tot_avg); fprintf(fp, "\n"); } } } #endif // CUXTIMER_DISABLE #endif // CUXTIMER_HOST_CU
b5b76837b95ac199b60dcab3eb0dc32e1296d6e3.cu
// // cuxTimer_host.cu // // Host-side functions for timing control. It offers two functions: // cuxTimerReset() - Resets the timer internal data structures. // This is only safe when there are no timer-using kernels // running on the machine. // // Resetting everything from the host enables a moderate // performance increase on the device side, so is worth // doing if possible. // // cuxTimerDisplay() - Outputs the timing data to the specified // FILE pointer (or screen if none). Display can be in CSV // format or pre-formatted for the screen. You may also // optionally disable headers and column names, in the event // that you call cuxTimerDisplay() repeatedly to accumulate // stats during the program run. // // Note that cuxTimerDisplay does NOT reset the timing data // automatically - call cuxTimerReset for that. // // This file automatically includes cuxTimer.h // #ifndef CUXTIMER_HOST_CU #define CUXTIMER_HOST_CU #ifndef CUXTIMER_DISABLE #include "cuxTimer.h" #include "cuxTimer.cu" #include "cuxTimer_private.h" // Initialises things and clears all counters from the host side static __host__ void cudaxTimerReset() { #ifdef TIMERDEBUG // If debugging, we need to init cuPrintf cudaPrintfInit(); #endif // Clear the data buffer on the device void *globalptr; cudaGetSymbolAddress(&globalptr, _cuxtimer_globals); cudaMemset(globalptr, 0, sizeof(_cuxtimer_globals)); // Set up the constant info for the timer _timerConfig pc; pc.init = __CUXTIMERINIT_INITIALISED; cudaMemcpyToSymbol(_cuxtimer_constants, &pc, sizeof(pc)); } // // cudaxTimerDisplay // // Dumps out the timing data. It's pretty simple - we just dump it for all IDs // which were actually called. // // We output to "fp". If "csv" is true, we output in CSV format. // If "show_headings" is true, we add column headings and other formatting. // static __host__ void cudaxTimerDisplay(FILE *fp, int csv, int show_headings) { #ifdef TIMERDEBUG // Dump printfs if we're debugging cudaPrintfDisplay(stdout, true); #endif static _timerGlobals timerHostGlobals; // Static so it doesn't live on the stack _timerData &timerLocalData = timerHostGlobals.funcdata; _timerStrings &timerHostStrings = timerHostGlobals.strings; memset(&timerHostGlobals, 0, sizeof(timerHostGlobals)); // Clear our local buffer unsigned int tot_count=0; unsigned long long tot_cycles=0, tot_max=0, tot_avg=0; // Create local copy of the timing info and config cudaMemcpyFromSymbol(&timerHostGlobals, _cuxtimer_globals, sizeof(timerHostGlobals)); // The config comes from either the constant bank (if host-side reset // was called) or from device memory. Determine which. _timerConfig pc; cudaMemcpyFromSymbol(&pc, _cuxtimer_constants, sizeof(pc)); if(pc.init == __CUXTIMERINIT_NOTINITIALISED) cudaMemcpyFromSymbol(&pc, _cuxtimer_config, sizeof(pc)); // Verify that things were initialised if(pc.init != __CUXTIMERINIT_INITIALISED) { fprintf(stderr, "cuxTimer data: %s\n", (pc.init == __CUXTIMERINIT_NOTINITIALISED) ? "NOT INITIALISED" : "DEVICE-INIT FAILED"); return; } // Write out the headings if(show_headings) { if(!csv) { fprintf(fp, "Timing stats:\n"); fprintf(fp, "%20s: %8s %12s %12s %12s", "Name", "Count", "Tot Cycles", "Max Cycles", "Avg Cycles", "Description"); } else { fprintf(fp, "Name,Count,Tot Cycles,Max Cycles,Avg Cycles,Description"); } fprintf(fp, "\n"); } // Loop through all IDs, printing only those which have data for(int id=0; id<__CUXTIMER_IDS; id++) { if(timerLocalData.count[id] > 0) { // Print name - if it exists - or ID number if not. if(_timer_isNameSet(timerHostStrings.nameMask, id)) { if(timerHostStrings.idNames[id][0] == '\0') fprintf(fp, "Bad name for id %d", id); else fprintf(fp, (!csv) ? "%20s" : "\"%s\"", timerHostStrings.idNames[id]); } else fprintf(fp, (!csv) ? "%20d" : "%d", id); // Print timing data if(!csv) fprintf(fp, ": %8d %12llu %12llu %12llu", timerLocalData.count[id], timerLocalData.idtime[id], timerLocalData.maximum[id], timerLocalData.idtime[id]/timerLocalData.count[id]); else fprintf(fp, ",%d,%llu,%llu,%llu", timerLocalData.count[id], timerLocalData.idtime[id], timerLocalData.maximum[id], timerLocalData.idtime[id]/timerLocalData.count[id]); // Print the description, if any if(_timer_isNameSet(timerHostStrings.descrMask, id)) { if(timerHostStrings.idDescrs[id][0] == '\0') fprintf(fp, "Bad description for id %d", id); else fprintf(fp, (!csv) ? " %s" : ",\"%s\"", timerHostStrings.idDescrs[id]); } else if(csv) fprintf(fp, ",none"); fprintf(fp, "\n"); tot_count += timerLocalData.count[id]; tot_cycles += timerLocalData.idtime[id]; tot_max += timerLocalData.maximum[id]; tot_avg += timerLocalData.idtime[id]/timerLocalData.count[id]; } } // The "total" footer only comes out with headings turned on as well. if(show_headings) { if(!csv) { fprintf(fp, "---------------------------------------------------------------------\n"); fprintf(fp, "%20s: %8d %12llu %12llu %12llu\n", "TOTAL", tot_count, tot_cycles, tot_max, tot_avg); } else { fprintf(fp, "%s,%d,%llu,%llu,%llu,n/a", "TOTAL", tot_count, tot_cycles, tot_max, tot_avg); fprintf(fp, "\n"); } } } #endif // CUXTIMER_DISABLE #endif // CUXTIMER_HOST_CU
560d8e70c0a0fc8017e13eeb27327ebafa7fc8e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int main() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); printf("Device name : %s\n", deviceProp.name); printf("Total global memory : %d MB\n",deviceProp.totalGlobalMem / 1024 / 1024); printf("Shared memory per block : %d\n",deviceProp.sharedMemPerBlock); printf("Registers per block : %d\n",deviceProp.regsPerBlock); printf("Warp size : %d\n", deviceProp.warpSize); printf("Memory pitch : %d\n", deviceProp.memPitch); printf("Max threads per block : %d\n",deviceProp.maxThreadsPerBlock); printf("Max threads dimensions : x = %d, y = %d, z = % d\n", deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]); printf("Max grid size: x = %d, y = %d, z = %d\n",deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]); printf("Clock rate: %d\n", deviceProp.clockRate); printf("Total constant memory: %d\n",deviceProp.totalConstMem); printf("Compute capability: %d.%d\n",deviceProp.major, deviceProp.minor); printf("Texture alignment: %d\n",deviceProp.textureAlignment); printf("Device overlap: %d\n",deviceProp.deviceOverlap); printf("Multiprocessor count: %d\n",deviceProp.multiProcessorCount); printf("Kernel execution timeout enabled: %s\n", deviceProp.kernelExecTimeoutEnabled ? "true" :"false"); return 0; }
560d8e70c0a0fc8017e13eeb27327ebafa7fc8e4.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int main() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); printf("Device name : %s\n", deviceProp.name); printf("Total global memory : %d MB\n",deviceProp.totalGlobalMem / 1024 / 1024); printf("Shared memory per block : %d\n",deviceProp.sharedMemPerBlock); printf("Registers per block : %d\n",deviceProp.regsPerBlock); printf("Warp size : %d\n", deviceProp.warpSize); printf("Memory pitch : %d\n", deviceProp.memPitch); printf("Max threads per block : %d\n",deviceProp.maxThreadsPerBlock); printf("Max threads dimensions : x = %d, y = %d, z = % d\n", deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]); printf("Max grid size: x = %d, y = %d, z = %d\n",deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]); printf("Clock rate: %d\n", deviceProp.clockRate); printf("Total constant memory: %d\n",deviceProp.totalConstMem); printf("Compute capability: %d.%d\n",deviceProp.major, deviceProp.minor); printf("Texture alignment: %d\n",deviceProp.textureAlignment); printf("Device overlap: %d\n",deviceProp.deviceOverlap); printf("Multiprocessor count: %d\n",deviceProp.multiProcessorCount); printf("Kernel execution timeout enabled: %s\n", deviceProp.kernelExecTimeoutEnabled ? "true" :"false"); return 0; }
132f7c1ccb600bcf134eded65ae51e0e3b4c7597.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/prelu.h" #include "paddle/fluid/operators/prelu_op.h" #include "paddle/fluid/operators/reduce_ops/cub_reduce.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; #define CUDA_NUM_THREADS 1024 inline static int PADDLE_GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename DeviceContext, typename T> class CUDAPReluKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* alpha = context.Input<Tensor>("Alpha"); auto* out = context.Output<Tensor>("Out"); const T* x_ptr = x->data<T>(); T* o_ptr = out->mutable_data<T>(context.GetPlace()); const T* alpha_ptr = alpha->data<T>(); auto& mode = context.Attr<std::string>("mode"); int numel = x->numel(); auto dim = x->dims(); VLOG(4) << "dim[0]:" << dim[0] << ", dim[1]:" << dim[1] << ", numel:" << numel; if (mode == "channel") { math::PreluChannelWiseDirectCUDAFunctor<T> prelu_channel_wise; prelu_channel_wise(context.cuda_device_context().stream(), x_ptr, alpha_ptr, o_ptr, dim[0], dim[1], numel); } else if (mode == "element") { math::PreluElementWiseDirectCUDAFunctor<T> prelu_element_wise; prelu_element_wise(context.cuda_device_context().stream(), x_ptr, alpha_ptr, o_ptr, dim[0], numel); } else { math::PreluScalarDirectCUDAFunctor<T> prelu_scalar; prelu_scalar(context.cuda_device_context().stream(), x_ptr, alpha_ptr, o_ptr, numel); } } }; enum PRELU_MODE { Element, Channel, Scalar }; template <typename T> __global__ void PReluOpGradKernel(const T* x_ptr, const T* alpha_ptr, const T* dy_ptr, T* dx_ptr, T* dalpha_ptr, size_t channel_num, size_t plane_size, size_t spatial_size, size_t numel, PRELU_MODE mode) { CUDA_KERNEL_LOOP(index, numel) { T scale; if (mode == Element) { size_t element_index = index % spatial_size; scale = alpha_ptr[element_index]; } else if (mode == Channel) { size_t temp = index / plane_size; size_t channel_index = temp % channel_num; scale = alpha_ptr[channel_index]; } else { scale = alpha_ptr[0]; } T x = x_ptr[index]; T dy = dy_ptr[index]; T zero = static_cast<T>(0); if (dx_ptr != nullptr) dx_ptr[index] = (x > zero) ? dy : scale * dy; if (dalpha_ptr != nullptr) dalpha_ptr[index] = (x > zero) ? zero : x * dy; } } template <typename T> class PreluOpGradFunctor { public: void operator()(gpuStream_t stream, const T* x, const T* alpha, const T* dy, T* dx, T* dalpha, const framework::DDim& input_dims, PRELU_MODE mode) { size_t numel = 1; for (size_t i = 0; i < input_dims.size(); ++i) { numel *= input_dims[i]; } size_t plane_size = numel / input_dims[0] / input_dims[1]; size_t spatial_size = numel / input_dims[0]; hipLaunchKernelGGL(( PReluOpGradKernel< T>), dim3(PADDLE_GET_BLOCKS(numel)), dim3(CUDA_NUM_THREADS), 0, stream, x, alpha, dy, dx, dalpha, input_dims[1], plane_size, spatial_size, numel, mode); } }; struct IdentityFunctor { template <typename T> HOSTDEVICE inline T operator()(const T& x) const { return x; } }; template <typename DeviceContext, typename T> class CUDAPReluGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* alpha = context.Input<Tensor>("Alpha"); auto* dx = context.Output<Tensor>(framework::GradVarName("X")); auto* dy = context.Input<Tensor>(framework::GradVarName("Out")); auto* dalpha = context.Output<Tensor>(framework::GradVarName("Alpha")); const T* x_ptr = x->data<T>(); const T* alpha_ptr = alpha->data<T>(); const T* dy_ptr = dy->data<T>(); T* dx_ptr = dx ? dx->mutable_data<T>(context.GetPlace()) : nullptr; T* dalpha_ptr = dalpha ? dalpha->mutable_data<T>(context.GetPlace()) : nullptr; if (!dx && !dalpha) return; auto& mode = context.Attr<std::string>("mode"); int numel = x->numel(); auto dim = x->dims(); std::vector<int> input_shape = framework::vectorize<int>(dim); auto stream = context.cuda_device_context().stream(); T* dalpha_tmp_ptr; Tensor dalpha_tmp; if (dalpha_ptr == nullptr) { dalpha_tmp_ptr = dalpha_ptr; } else { auto& dev_ctx = context.template device_context<DeviceContext>(); dalpha_tmp = context.AllocateTmpTensor<T, DeviceContext>(dim, dev_ctx); dalpha_tmp_ptr = dalpha_tmp.mutable_data<T>(context.GetPlace()); } PRELU_MODE m; if (mode == "element") { m = Element; } else if (mode == "channel") { m = Channel; } else { m = Scalar; } PreluOpGradFunctor<T> prelu_grad; prelu_grad(stream, x_ptr, alpha_ptr, dy_ptr, dx_ptr, dalpha_tmp_ptr, dim, m); if (dalpha_tmp_ptr == nullptr) return; std::vector<int> reduce_dims; for (size_t i = 0; i < dim.size(); i++) { if (mode == "channel" && i == 1) continue; if (mode == "element" && i != 0) continue; reduce_dims.push_back(i); } TensorReduce<T, T, hipcub::Sum, IdentityFunctor>( dalpha_tmp, dalpha, reduce_dims, static_cast<T>(0), hipcub::Sum(), IdentityFunctor(), stream); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( prelu, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, float>, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( prelu_grad, ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, float>, ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, double>);
132f7c1ccb600bcf134eded65ae51e0e3b4c7597.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/prelu.h" #include "paddle/fluid/operators/prelu_op.h" #include "paddle/fluid/operators/reduce_ops/cub_reduce.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; #define CUDA_NUM_THREADS 1024 inline static int PADDLE_GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename DeviceContext, typename T> class CUDAPReluKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* alpha = context.Input<Tensor>("Alpha"); auto* out = context.Output<Tensor>("Out"); const T* x_ptr = x->data<T>(); T* o_ptr = out->mutable_data<T>(context.GetPlace()); const T* alpha_ptr = alpha->data<T>(); auto& mode = context.Attr<std::string>("mode"); int numel = x->numel(); auto dim = x->dims(); VLOG(4) << "dim[0]:" << dim[0] << ", dim[1]:" << dim[1] << ", numel:" << numel; if (mode == "channel") { math::PreluChannelWiseDirectCUDAFunctor<T> prelu_channel_wise; prelu_channel_wise(context.cuda_device_context().stream(), x_ptr, alpha_ptr, o_ptr, dim[0], dim[1], numel); } else if (mode == "element") { math::PreluElementWiseDirectCUDAFunctor<T> prelu_element_wise; prelu_element_wise(context.cuda_device_context().stream(), x_ptr, alpha_ptr, o_ptr, dim[0], numel); } else { math::PreluScalarDirectCUDAFunctor<T> prelu_scalar; prelu_scalar(context.cuda_device_context().stream(), x_ptr, alpha_ptr, o_ptr, numel); } } }; enum PRELU_MODE { Element, Channel, Scalar }; template <typename T> __global__ void PReluOpGradKernel(const T* x_ptr, const T* alpha_ptr, const T* dy_ptr, T* dx_ptr, T* dalpha_ptr, size_t channel_num, size_t plane_size, size_t spatial_size, size_t numel, PRELU_MODE mode) { CUDA_KERNEL_LOOP(index, numel) { T scale; if (mode == Element) { size_t element_index = index % spatial_size; scale = alpha_ptr[element_index]; } else if (mode == Channel) { size_t temp = index / plane_size; size_t channel_index = temp % channel_num; scale = alpha_ptr[channel_index]; } else { scale = alpha_ptr[0]; } T x = x_ptr[index]; T dy = dy_ptr[index]; T zero = static_cast<T>(0); if (dx_ptr != nullptr) dx_ptr[index] = (x > zero) ? dy : scale * dy; if (dalpha_ptr != nullptr) dalpha_ptr[index] = (x > zero) ? zero : x * dy; } } template <typename T> class PreluOpGradFunctor { public: void operator()(gpuStream_t stream, const T* x, const T* alpha, const T* dy, T* dx, T* dalpha, const framework::DDim& input_dims, PRELU_MODE mode) { size_t numel = 1; for (size_t i = 0; i < input_dims.size(); ++i) { numel *= input_dims[i]; } size_t plane_size = numel / input_dims[0] / input_dims[1]; size_t spatial_size = numel / input_dims[0]; PReluOpGradKernel< T><<<PADDLE_GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, stream>>>( x, alpha, dy, dx, dalpha, input_dims[1], plane_size, spatial_size, numel, mode); } }; struct IdentityFunctor { template <typename T> HOSTDEVICE inline T operator()(const T& x) const { return x; } }; template <typename DeviceContext, typename T> class CUDAPReluGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* alpha = context.Input<Tensor>("Alpha"); auto* dx = context.Output<Tensor>(framework::GradVarName("X")); auto* dy = context.Input<Tensor>(framework::GradVarName("Out")); auto* dalpha = context.Output<Tensor>(framework::GradVarName("Alpha")); const T* x_ptr = x->data<T>(); const T* alpha_ptr = alpha->data<T>(); const T* dy_ptr = dy->data<T>(); T* dx_ptr = dx ? dx->mutable_data<T>(context.GetPlace()) : nullptr; T* dalpha_ptr = dalpha ? dalpha->mutable_data<T>(context.GetPlace()) : nullptr; if (!dx && !dalpha) return; auto& mode = context.Attr<std::string>("mode"); int numel = x->numel(); auto dim = x->dims(); std::vector<int> input_shape = framework::vectorize<int>(dim); auto stream = context.cuda_device_context().stream(); T* dalpha_tmp_ptr; Tensor dalpha_tmp; if (dalpha_ptr == nullptr) { dalpha_tmp_ptr = dalpha_ptr; } else { auto& dev_ctx = context.template device_context<DeviceContext>(); dalpha_tmp = context.AllocateTmpTensor<T, DeviceContext>(dim, dev_ctx); dalpha_tmp_ptr = dalpha_tmp.mutable_data<T>(context.GetPlace()); } PRELU_MODE m; if (mode == "element") { m = Element; } else if (mode == "channel") { m = Channel; } else { m = Scalar; } PreluOpGradFunctor<T> prelu_grad; prelu_grad(stream, x_ptr, alpha_ptr, dy_ptr, dx_ptr, dalpha_tmp_ptr, dim, m); if (dalpha_tmp_ptr == nullptr) return; std::vector<int> reduce_dims; for (size_t i = 0; i < dim.size(); i++) { if (mode == "channel" && i == 1) continue; if (mode == "element" && i != 0) continue; reduce_dims.push_back(i); } TensorReduce<T, T, cub::Sum, IdentityFunctor>( dalpha_tmp, dalpha, reduce_dims, static_cast<T>(0), cub::Sum(), IdentityFunctor(), stream); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( prelu, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, float>, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( prelu_grad, ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, float>, ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, double>);
cbfea3f56b2d3edf05c918f37db8f46aeb3d9d76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> ///////////per request timing. L1 enabled. Pascal L2 has misses even when data size is less than 1024 * 1024 (4m). So the eviction policy seems not to be LRU. //typedef unsigned char byte; void init_cpu_data(int* A, int size, int stride, int mod){ for (int i = 0; i < size; ++i){ A[i]=(i + stride) % mod; } } __device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines for (int it = 0; it < iterations; it++){ j = A[j]; } B[0] = j; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing1(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines //long long int start_time = 0;//////clock //long long int end_time = 0;//////clock //start_time = clock64();//////clock for (int it = 0; it < iterations; it++){ j = A[j]; } //end_time=clock64();//////clock //long long int total_time = end_time - start_time;//////clock //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! ( B[0] = j; //B[1] = (int) total_time; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside? //////shared memory: 0xc000 max (49152 Bytes = 48KB) __shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations. __shared__ int s_index[1024 * 4]; //__shared__ int s_index[1]; int j = starting_index;/////make them in the same page, and miss near in cache lines //int j = B[0]; long long int start_time = 0;//////clock long long int end_time = 0;//////clock long long int time_interval = 0;//////clock //long long int total_time = end_time - start_time;//////clock /* for (int it = 0; it < iterations; it++){ start_time = clock64();//////clock j = A[j]; //s_index[it] = j; end_time=clock64();//////clock s_tvalue[it] = end_time - start_time; } */ asm(".reg .u32 t1;\n\t" ".reg .u64 t2;\n\t" ".reg .u32 t3;\n\t" ".reg .u32 t4;\n\t" ".reg .u64 t5;\n\t" ".reg .u32 t6;\n\t" ".reg .u64 t7;\n\t" "cvta.to.shared.u64 t5, %0;\n\t" "cvt.u32.u64 t6, t5;\n\t" :: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed?? for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory asm("shl.b32 t1, %3, 2;\n\t" "cvt.u64.u32 t7, t1;\n\t" "add.s64 t2, t7, %4;\n\t" "shl.b32 t3, %6, 2;\n\t" "add.s32 t4, t3, t6;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %2, [t2];\n\t" "st.shared.u32 [t4], %2;\n\t" "mov.u64 %1, %clock64;" : "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it)); time_interval = end_time - start_time; //if(it >= 4 * 1024){ s_tvalue[it] = time_interval; //} } //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency B[0] = j; for (int it = 0; it < iterations; it++){ C[it] = s_index[it]; D[it] = s_tvalue[it]; } } __global__ void tlb_latency_test(int *A, int iterations, int *B, int *C, long long int *D, float clock_rate, int mod, int data_stride){ ///////////psacal L2 has 128 * 1024 = 131072 128B cache lines. But we only have 1024 * 4 slots in shared memory. P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2 P_chasing2(0, A, 512, B, C, D, B[0], clock_rate, data_stride);////////partially print the data __syncthreads(); } int main(int argc, char **argv) { printf("\n"); // set device hipDeviceProp_t device_prop; //int dev_id = findCudaDevice(argc, (const char **) argv); int dev_id = 0; checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id)); int peak_clk = 1;//kHz checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id)); float clock_rate = (float) peak_clk; //printf("clock_rate_out_kernel:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == hipComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } ///////////////////////////////////////////////////////////////////GPU data out int *GPU_data_out; checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2)); FILE * pFile; pFile = fopen ("output.txt","w"); for(int data_stride = 8; data_stride <= 8; data_stride = data_stride + 1){/////////stride shall be L1 cache line size. //printf("###################data_stride%d#########################\n", data_stride); //for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m for(int mod = 1024 * 1024; mod <= 1024 * 1024 + 8 * 64; mod = mod + 8){/////pascal L2 4m /////pascal L1 24KB ////////saturate the L2 ///////////////////////////////////////////////////////////////////CPU data begin int data_size = 512 * 1024 * 30;/////size = iteration * stride = 30 2mb pages. //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 int *CPU_data_in; CPU_data_in = (int*)malloc(sizeof(int) * data_size); init_cpu_data(CPU_data_in, data_size, data_stride, mod); int *CPU_data_out_index; CPU_data_out_index = (int*)malloc(sizeof(int) * iterations); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in int *GPU_data_in; checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size)); hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out int *GPU_data_out_index; checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations)); long long int *GPU_data_out_time; checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations)); hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here hipDeviceSynchronize(); hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost); hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32); for (int it = 0; it < 512; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(hipFree(GPU_data_out_index)); checkCudaErrors(hipFree(GPU_data_out_time)); checkCudaErrors(hipFree(GPU_data_in)); free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } //printf("############################################\n\n"); } checkCudaErrors(hipFree(GPU_data_out)); //free(CPU_data_out); fclose (pFile); exit(EXIT_SUCCESS); }
cbfea3f56b2d3edf05c918f37db8f46aeb3d9d76.cu
#include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> ///////////per request timing. L1 enabled. Pascal L2 has misses even when data size is less than 1024 * 1024 (4m). So the eviction policy seems not to be LRU. //typedef unsigned char byte; void init_cpu_data(int* A, int size, int stride, int mod){ for (int i = 0; i < size; ++i){ A[i]=(i + stride) % mod; } } __device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines for (int it = 0; it < iterations; it++){ j = A[j]; } B[0] = j; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing1(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines //long long int start_time = 0;//////clock //long long int end_time = 0;//////clock //start_time = clock64();//////clock for (int it = 0; it < iterations; it++){ j = A[j]; } //end_time=clock64();//////clock //long long int total_time = end_time - start_time;//////clock //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! ( B[0] = j; //B[1] = (int) total_time; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside? //////shared memory: 0xc000 max (49152 Bytes = 48KB) __shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations. __shared__ int s_index[1024 * 4]; //__shared__ int s_index[1]; int j = starting_index;/////make them in the same page, and miss near in cache lines //int j = B[0]; long long int start_time = 0;//////clock long long int end_time = 0;//////clock long long int time_interval = 0;//////clock //long long int total_time = end_time - start_time;//////clock /* for (int it = 0; it < iterations; it++){ start_time = clock64();//////clock j = A[j]; //s_index[it] = j; end_time=clock64();//////clock s_tvalue[it] = end_time - start_time; } */ asm(".reg .u32 t1;\n\t" ".reg .u64 t2;\n\t" ".reg .u32 t3;\n\t" ".reg .u32 t4;\n\t" ".reg .u64 t5;\n\t" ".reg .u32 t6;\n\t" ".reg .u64 t7;\n\t" "cvta.to.shared.u64 t5, %0;\n\t" "cvt.u32.u64 t6, t5;\n\t" :: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed?? for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory asm("shl.b32 t1, %3, 2;\n\t" "cvt.u64.u32 t7, t1;\n\t" "add.s64 t2, t7, %4;\n\t" "shl.b32 t3, %6, 2;\n\t" "add.s32 t4, t3, t6;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %2, [t2];\n\t" "st.shared.u32 [t4], %2;\n\t" "mov.u64 %1, %clock64;" : "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it)); time_interval = end_time - start_time; //if(it >= 4 * 1024){ s_tvalue[it] = time_interval; //} } //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency B[0] = j; for (int it = 0; it < iterations; it++){ C[it] = s_index[it]; D[it] = s_tvalue[it]; } } __global__ void tlb_latency_test(int *A, int iterations, int *B, int *C, long long int *D, float clock_rate, int mod, int data_stride){ ///////////psacal L2 has 128 * 1024 = 131072 128B cache lines. But we only have 1024 * 4 slots in shared memory. P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2 P_chasing2(0, A, 512, B, C, D, B[0], clock_rate, data_stride);////////partially print the data __syncthreads(); } int main(int argc, char **argv) { printf("\n"); // set device cudaDeviceProp device_prop; //int dev_id = findCudaDevice(argc, (const char **) argv); int dev_id = 0; checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id)); int peak_clk = 1;//kHz checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id)); float clock_rate = (float) peak_clk; //printf("clock_rate_out_kernel:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == cudaComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } ///////////////////////////////////////////////////////////////////GPU data out int *GPU_data_out; checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2)); FILE * pFile; pFile = fopen ("output.txt","w"); for(int data_stride = 8; data_stride <= 8; data_stride = data_stride + 1){/////////stride shall be L1 cache line size. //printf("###################data_stride%d#########################\n", data_stride); //for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m for(int mod = 1024 * 1024; mod <= 1024 * 1024 + 8 * 64; mod = mod + 8){/////pascal L2 4m /////pascal L1 24KB ////////saturate the L2 ///////////////////////////////////////////////////////////////////CPU data begin int data_size = 512 * 1024 * 30;/////size = iteration * stride = 30 2mb pages. //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 int *CPU_data_in; CPU_data_in = (int*)malloc(sizeof(int) * data_size); init_cpu_data(CPU_data_in, data_size, data_stride, mod); int *CPU_data_out_index; CPU_data_out_index = (int*)malloc(sizeof(int) * iterations); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in int *GPU_data_in; checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size)); cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out int *GPU_data_out_index; checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations)); long long int *GPU_data_out_time; checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations)); tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here cudaDeviceSynchronize(); cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost); cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32); for (int it = 0; it < 512; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(cudaFree(GPU_data_out_index)); checkCudaErrors(cudaFree(GPU_data_out_time)); checkCudaErrors(cudaFree(GPU_data_in)); free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } //printf("############################################\n\n"); } checkCudaErrors(cudaFree(GPU_data_out)); //free(CPU_data_out); fclose (pFile); exit(EXIT_SUCCESS); }
af6fdbfd9107bcc4b6c44f1d8833553ad77f72b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/operators/resize_op.h" namespace caffe2 { namespace { __global__ void NearestNeighborKernel( const int size, const int num_channels, const int input_height, const int input_width, const int output_height, const int output_width, const float height_scale, const float width_scale, const float* X, float* Y) { CUDA_1D_KERNEL_LOOP(index, size) { int indexTemp = index; const int w = indexTemp % output_width; indexTemp /= output_width; const int h = indexTemp % output_height; indexTemp /= output_height; const int c = indexTemp % num_channels; indexTemp /= num_channels; const int n = indexTemp; const int in_y = fminf(h / height_scale, input_height - 1); const int in_x = fminf(w / width_scale, input_width - 1); Y[index] = X[((n * num_channels + c) * input_height + in_y) * input_width + in_x]; } } __global__ void NearestNeighborGradientKernel( const int size, const int num_channels, const int input_height, const int input_width, const int output_height, const int output_width, const float height_scale, const float width_scale, const float* dY, float* dX) { CUDA_1D_KERNEL_LOOP(index, size) { int indexTemp = index; const int x = indexTemp % input_width; indexTemp /= input_width; const int y = indexTemp % input_height; indexTemp /= input_height; const int c = indexTemp % num_channels; indexTemp /= num_channels; const int n = indexTemp; const int out_y = fminf(y / height_scale, output_height - 1); const int out_x = fminf(x / width_scale, output_width - 1); const int out_index = ((n * num_channels + c) * output_height + out_y) * output_width + out_x; #if __CUDA_ARCH__ >= 350 atomicAdd(dX + out_index, __ldg(dY + index)); #else atomicAdd(dX + out_index, *(dY + index)); #endif } } } // namespace template <> bool ResizeNearestOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); const auto inputDims = X.sizes(); CAFFE_ENFORCE_EQ(4, inputDims.size()); const int batch_size = X.dim32(0), num_channels = X.dim32(1), input_height = X.dim32(2), input_width = X.dim32(3); if (InputSize() == 2) { const auto& scales = Input(1); CAFFE_ENFORCE_EQ(scales.dim(), 1); CAFFE_ENFORCE_EQ(scales.size(), 2); float scales_data[2]; context_.CopyToCPU<float>(2, scales.data<float>(), scales_data); height_scale_ = scales_data[0]; width_scale_ = scales_data[1]; } int output_width = input_width * width_scale_; int output_height = input_height * height_scale_; auto* Y = Output(0, {batch_size, num_channels, output_height, output_width}, at::dtype<float>()); const auto size = Y->size(); hipLaunchKernelGGL(( NearestNeighborKernel), dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, num_channels, input_height, input_width, output_height, output_width, height_scale_, width_scale_, X.data<float>(), Y->template mutable_data<float>()); return true; } template <> bool ResizeNearestGradientOp<float, CUDAContext>::RunOnDevice() { const auto& dY = Input(0); const auto& X = Input(1); const auto inputDims = dY.sizes(); CAFFE_ENFORCE_EQ(4, inputDims.size()); const int batch_size = dY.dim32(0), num_channels = dY.dim32(1), input_height = dY.dim32(2), input_width = dY.dim32(3); int output_height = X.dim32(2); int output_width = X.dim32(3); if (InputSize() == 3) { const auto& scales = Input(2); CAFFE_ENFORCE_EQ(scales.dim(), 1); CAFFE_ENFORCE_EQ(scales.size(), 2); float scales_data[2]; context_.CopyToCPU<float>(2, scales.data<float>(), scales_data); height_scale_ = scales_data[0]; width_scale_ = scales_data[1]; } auto* dX = Output(0, {batch_size, num_channels, output_height, output_width}, at::dtype<float>()); math::Set<float, CUDAContext>( dX->size(), 0.0f, dX->template mutable_data<float>(), &context_); const auto size = dY.size(); hipLaunchKernelGGL(( NearestNeighborGradientKernel), dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, num_channels, input_height, input_width, output_height, output_width, height_scale_, width_scale_, dY.data<float>(), dX->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(ResizeNearest, ResizeNearestOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( ResizeNearestGradient, ResizeNearestGradientOp<float, CUDAContext>); } // namespace caffe2
af6fdbfd9107bcc4b6c44f1d8833553ad77f72b7.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/operators/resize_op.h" namespace caffe2 { namespace { __global__ void NearestNeighborKernel( const int size, const int num_channels, const int input_height, const int input_width, const int output_height, const int output_width, const float height_scale, const float width_scale, const float* X, float* Y) { CUDA_1D_KERNEL_LOOP(index, size) { int indexTemp = index; const int w = indexTemp % output_width; indexTemp /= output_width; const int h = indexTemp % output_height; indexTemp /= output_height; const int c = indexTemp % num_channels; indexTemp /= num_channels; const int n = indexTemp; const int in_y = fminf(h / height_scale, input_height - 1); const int in_x = fminf(w / width_scale, input_width - 1); Y[index] = X[((n * num_channels + c) * input_height + in_y) * input_width + in_x]; } } __global__ void NearestNeighborGradientKernel( const int size, const int num_channels, const int input_height, const int input_width, const int output_height, const int output_width, const float height_scale, const float width_scale, const float* dY, float* dX) { CUDA_1D_KERNEL_LOOP(index, size) { int indexTemp = index; const int x = indexTemp % input_width; indexTemp /= input_width; const int y = indexTemp % input_height; indexTemp /= input_height; const int c = indexTemp % num_channels; indexTemp /= num_channels; const int n = indexTemp; const int out_y = fminf(y / height_scale, output_height - 1); const int out_x = fminf(x / width_scale, output_width - 1); const int out_index = ((n * num_channels + c) * output_height + out_y) * output_width + out_x; #if __CUDA_ARCH__ >= 350 atomicAdd(dX + out_index, __ldg(dY + index)); #else atomicAdd(dX + out_index, *(dY + index)); #endif } } } // namespace template <> bool ResizeNearestOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); const auto inputDims = X.sizes(); CAFFE_ENFORCE_EQ(4, inputDims.size()); const int batch_size = X.dim32(0), num_channels = X.dim32(1), input_height = X.dim32(2), input_width = X.dim32(3); if (InputSize() == 2) { const auto& scales = Input(1); CAFFE_ENFORCE_EQ(scales.dim(), 1); CAFFE_ENFORCE_EQ(scales.size(), 2); float scales_data[2]; context_.CopyToCPU<float>(2, scales.data<float>(), scales_data); height_scale_ = scales_data[0]; width_scale_ = scales_data[1]; } int output_width = input_width * width_scale_; int output_height = input_height * height_scale_; auto* Y = Output(0, {batch_size, num_channels, output_height, output_width}, at::dtype<float>()); const auto size = Y->size(); NearestNeighborKernel<<< CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, num_channels, input_height, input_width, output_height, output_width, height_scale_, width_scale_, X.data<float>(), Y->template mutable_data<float>()); return true; } template <> bool ResizeNearestGradientOp<float, CUDAContext>::RunOnDevice() { const auto& dY = Input(0); const auto& X = Input(1); const auto inputDims = dY.sizes(); CAFFE_ENFORCE_EQ(4, inputDims.size()); const int batch_size = dY.dim32(0), num_channels = dY.dim32(1), input_height = dY.dim32(2), input_width = dY.dim32(3); int output_height = X.dim32(2); int output_width = X.dim32(3); if (InputSize() == 3) { const auto& scales = Input(2); CAFFE_ENFORCE_EQ(scales.dim(), 1); CAFFE_ENFORCE_EQ(scales.size(), 2); float scales_data[2]; context_.CopyToCPU<float>(2, scales.data<float>(), scales_data); height_scale_ = scales_data[0]; width_scale_ = scales_data[1]; } auto* dX = Output(0, {batch_size, num_channels, output_height, output_width}, at::dtype<float>()); math::Set<float, CUDAContext>( dX->size(), 0.0f, dX->template mutable_data<float>(), &context_); const auto size = dY.size(); NearestNeighborGradientKernel<<< CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, num_channels, input_height, input_width, output_height, output_width, height_scale_, width_scale_, dY.data<float>(), dX->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(ResizeNearest, ResizeNearestOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( ResizeNearestGradient, ResizeNearestGradientOp<float, CUDAContext>); } // namespace caffe2
3e90182789da41c001ed7ad2e795f4410f5b41b9.hip
// !!! This is a file automatically generated by hipify!!! #include "RecognitionOnGPU.cuh" #include <hip/hip_runtime.h> #include <stdio.h> #include <hip/hip_runtime_api.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <assert.h> #include <stdlib.h> #include <math.h> void ParallelizationElements(TXMLCascade* cascade, TStage* stages, TFeature* features, TRect* rects, unsigned short width_original, unsigned short height_original, unsigned char* char_picture_original, unsigned short byte_pixel_original, unsigned short bit_width_original, float step_factor, int* pixel_intensity, int* pixel_intensity_sqr) { float factor = 1.0; float *dev_factor; hipMalloc((void**)&dev_factor, sizeof(float)); hipMemcpy(dev_factor, &factor, sizeof(float), hipMemcpyHostToDevice); TXMLCascade* dev_cascade; hipMalloc((void**)&dev_cascade, sizeof(TXMLCascade)); hipMemcpy(dev_cascade, cascade, sizeof(TXMLCascade), hipMemcpyHostToDevice); TStage* dev_stages; hipMalloc((void**)&dev_stages, sizeof(TStage) * 30); hipMemcpy(dev_stages, stages, sizeof(TStage) * 30, hipMemcpyHostToDevice); TFeature* dev_features; hipMalloc((void**)&dev_features, sizeof(TFeature) * 7000); hipMemcpy(dev_features, features, sizeof(TFeature) * 7000, hipMemcpyHostToDevice); TRect* dev_rects; hipMalloc((void**)&dev_rects, sizeof(TRect) * 11000); hipMemcpy(dev_rects, rects, sizeof(TRect) * 11000, hipMemcpyHostToDevice); unsigned short* dev_width_original; hipMalloc((void**)&dev_width_original, sizeof(unsigned short)); hipMemcpy(dev_width_original, &width_original, sizeof(unsigned short), hipMemcpyHostToDevice); unsigned short* dev_height_original; hipMalloc((void**)&dev_height_original, sizeof(unsigned short)); hipMemcpy(dev_height_original, &height_original, sizeof(unsigned short), hipMemcpyHostToDevice); unsigned char* dev_char_picture_original; hipMalloc((void**)&dev_char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original); hipMemcpy(dev_char_picture_original, char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original, hipMemcpyHostToDevice); unsigned short* dev_byte_pixel_original; hipMalloc((void**)&dev_byte_pixel_original, sizeof(unsigned short)); hipMemcpy(dev_byte_pixel_original, &byte_pixel_original, sizeof(unsigned short), hipMemcpyHostToDevice); unsigned short* dev_bit_width_original; hipMalloc((void**)&dev_bit_width_original, sizeof(unsigned short)); hipMemcpy(dev_bit_width_original, &bit_width_original, sizeof(unsigned short), hipMemcpyHostToDevice); float* dev_step_factor; hipMalloc((void**)&dev_step_factor, sizeof(float)); hipMemcpy(dev_step_factor, &step_factor, sizeof(float), hipMemcpyHostToDevice); int* dev_pixel_intensity; hipMalloc((void**)&dev_pixel_intensity, width_original*height_original * 4); hipMemcpy(dev_pixel_intensity, pixel_intensity, width_original*height_original * 4, hipMemcpyHostToDevice); int* dev_pixel_intensity_sqr; hipMalloc((void**)&dev_pixel_intensity_sqr, width_original*height_original * 4); hipMemcpy(dev_pixel_intensity_sqr, pixel_intensity_sqr, width_original*height_original * 4, hipMemcpyHostToDevice); int window_w = cascade->window_w_mini; int window_h = cascade->window_h_mini; do { hipLaunchKernelGGL(( ParallelizationElementsKernel) , dim3(512), dim3(512) , 0, 0, dev_cascade, dev_stages, dev_features, dev_rects, dev_width_original, dev_height_original, dev_char_picture_original, dev_byte_pixel_original, dev_bit_width_original, dev_step_factor, dev_pixel_intensity, dev_pixel_intensity_sqr, dev_factor); factor *= step_factor; hipMemcpy(dev_factor, &factor, sizeof(float), hipMemcpyHostToDevice); window_w = floor(cascade->window_w_mini*factor); window_h = floor(cascade->window_h_mini*factor); } while (min(width_original, height_original) >= min(window_w, window_h)); printf("Error: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(char_picture_original, dev_char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original, hipMemcpyDeviceToHost); } __global__ void ParallelizationElementsKernel(TXMLCascade* cascade, TStage* stages, TFeature* features, TRect* rects, unsigned short* width_original, unsigned short* height_original, unsigned char* char_picture_original, unsigned short* byte_pixel_original, unsigned short* bit_width_original, float* step_factor, int* pixel_intensity, int* pixel_intensity_sqr, float* factor) { int window_w = floor(cascade->window_w_mini**factor); int window_h = floor(cascade->window_h_mini**factor); unsigned short x1, y1; int x_step = ((1) >= (((4)<((window_w) / 10) ? (4) : ((window_w) / 10))) ? (1) : (((4)<((window_w) / 10) ? (4) : ((window_w) / 10)))); int y_step = ((1) >= (((4)<((window_h) / 10) ? (4) : ((window_h) / 10))) ? (1) : (((4)<((window_h) / 10) ? (4) : ((window_h) / 10)))); y1 = y_step * blockIdx.x; if (y1 < *height_original - window_h) { int x_thread_step = 0; for (x1 = 0; x1 < *width_original - window_w; x_thread_step += 512) { x1 = x_thread_step + x_step * threadIdx.x; if (x1 < 512 + x_thread_step) { float mean = intensity_window_device(x1, y1, window_w, window_h, pixel_intensity, *width_original) * 1 / float(window_w*window_h); float variance = sqr_intensity_window_device(x1, y1, window_w, window_h, pixel_intensity_sqr, *width_original) * 1 / float(window_w*window_h) - (mean*mean); float stddev = 1.0; stddev = sqrt(variance); if (stddev > 10.0) { bool f_failed = false; for (int i_stage = 0; i_stage < cascade->n_stages; i_stage = i_stage + 1) { float sum_stage = 0.0; for (int i_feature = stages[i_stage].i_feature_start; i_feature <= stages[i_stage].i_feature_finish; i_feature = i_feature + 1) { int sum_feature = 0.0; for (int i_rect = features[i_feature].i_rect_start; i_rect <= features[i_feature].i_rect_finish; i_rect = i_rect + 1) { sum_feature += (intensity_window_device(x1 + rects[i_rect].x**factor, y1 + rects[i_rect].y**factor, rects[i_rect].w**factor, rects[i_rect].h**factor, pixel_intensity, *width_original)*rects[i_rect].weight); } if (sum_feature * 1 / float(window_w*window_h) < features[i_feature].feature_threshold * stddev) sum_stage += features[i_feature].left_val; else sum_stage += features[i_feature].right_val; } if (sum_stage < stages[i_stage].stage_threshold) { f_failed = true; break; } } if (f_failed == false) { printf("%d %d %d %d \n", x1, y1, x1 + window_w, y1 + window_h); unsigned short x2 = x1 + window_w; unsigned short y2 = y1 + window_h; for (int x = x1; x <= x2; x++) { char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 2] = 0x00; } for (int x = x1; x <= x2; x++) { char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 2] = 0x00; } for (int y = y1; y <= y2; y++) { char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 2] = 0x00; } for (int y = y1; y <= y2; y++) { char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 2] = 0x00; } } } } } } } void ParallelizationScale(TXMLCascade* cascade, TStage* stages, TFeature* features, TRect* rects, unsigned short width_original, unsigned short height_original, unsigned char* char_picture_original, unsigned short byte_pixel_original, unsigned short bit_width_original, float step_factor, int* pixel_intensity, int* pixel_intensity_sqr, int* mas_pointer, int** mas) { unsigned char* dev_char_picture_original; hipMalloc((void**)&dev_char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original); hipMemcpy(dev_char_picture_original, char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original, hipMemcpyHostToDevice); int* dev_mas_pointer; hipMalloc((void**)&dev_mas_pointer, sizeof(int)); int* dev_mas; hipMalloc((void**)&dev_mas, sizeof(int) * 512); float factor = 1.0; float *dev_factor; hipMalloc((void**)&dev_factor, sizeof(float)); hipMemcpy(dev_factor, &factor, sizeof(float), hipMemcpyHostToDevice); TXMLCascade* dev_cascade; hipMalloc((void**)&dev_cascade, sizeof(TXMLCascade)); hipMemcpy(dev_cascade, cascade, sizeof(TXMLCascade), hipMemcpyHostToDevice); TStage* dev_stages; hipMalloc((void**)&dev_stages, sizeof(TStage) * 30); hipMemcpy(dev_stages, stages, sizeof(TStage) * 30, hipMemcpyHostToDevice); TFeature* dev_features; hipMalloc((void**)&dev_features, sizeof(TFeature) * 7000); hipMemcpy(dev_features, features, sizeof(TFeature) * 7000, hipMemcpyHostToDevice); TRect* dev_rects; hipMalloc((void**)&dev_rects, sizeof(TRect) * 11000); hipMemcpy(dev_rects, rects, sizeof(TRect) * 11000, hipMemcpyHostToDevice); unsigned short* dev_width_original; hipMalloc((void**)&dev_width_original, sizeof(unsigned short)); hipMemcpy(dev_width_original, &width_original, sizeof(unsigned short), hipMemcpyHostToDevice); unsigned short* dev_height_original; hipMalloc((void**)&dev_height_original, sizeof(unsigned short)); hipMemcpy(dev_height_original, &height_original, sizeof(unsigned short), hipMemcpyHostToDevice); unsigned short* dev_byte_pixel_original; hipMalloc((void**)&dev_byte_pixel_original, sizeof(unsigned short)); hipMemcpy(dev_byte_pixel_original, &byte_pixel_original, sizeof(unsigned short), hipMemcpyHostToDevice); unsigned short* dev_bit_width_original; hipMalloc((void**)&dev_bit_width_original, sizeof(unsigned short)); hipMemcpy(dev_bit_width_original, &bit_width_original, sizeof(unsigned short), hipMemcpyHostToDevice); float* dev_step_factor; hipMalloc((void**)&dev_step_factor, sizeof(float)); hipMemcpy(dev_step_factor, &step_factor, sizeof(float), hipMemcpyHostToDevice); int* dev_pixel_intensity; hipMalloc((void**)&dev_pixel_intensity, width_original*height_original * 4); hipMemcpy(dev_pixel_intensity, pixel_intensity, width_original*height_original * 4, hipMemcpyHostToDevice); int* dev_pixel_intensity_sqr; hipMalloc((void**)&dev_pixel_intensity_sqr, width_original*height_original * 4); hipMemcpy(dev_pixel_intensity_sqr, pixel_intensity_sqr, width_original*height_original * 4, hipMemcpyHostToDevice); int window_w, window_h; float scale[100]; int k = 0; do { window_w = (int)floor(cascade->window_w_mini*factor); window_h = (int)floor(cascade->window_h_mini*factor); scale[k] = factor; factor *= step_factor; k++; } while (min(width_original, height_original) >= min(window_w, window_h)); printf("block = %d; \n", k); float* dev_scale; hipMalloc((void**)&dev_scale, sizeof(float) * 100); hipMemcpy(dev_scale, scale, sizeof(float) * 100, hipMemcpyHostToDevice); hipLaunchKernelGGL(( ParallelizationScaleKernel) , dim3(k), dim3(512) , 0, 0, dev_cascade, dev_stages, dev_features, dev_rects, dev_width_original, dev_height_original, dev_char_picture_original, dev_byte_pixel_original, dev_bit_width_original, dev_step_factor, dev_pixel_intensity, dev_pixel_intensity_sqr, dev_factor, dev_scale, dev_mas_pointer, dev_mas); hipDeviceSynchronize(); printf("Error 1: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(char_picture_original, dev_char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original, hipMemcpyDeviceToHost); /*printf("Error 2: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy( &mas, dev_mas, sizeof(int)*512*k,hipMemcpyDeviceToHost); printf("Error 3: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(mas_pointer, dev_mas_pointer, sizeof(int), hipMemcpyDeviceToHost); printf("Error 4: %s\n", hipGetErrorString(hipGetLastError()));*/ } __global__ void ParallelizationScaleKernel(TXMLCascade* cascade, TStage* stages, TFeature* features, TRect* rects, unsigned short* width_original, unsigned short* height_original, unsigned char* char_picture_original, unsigned short* byte_pixel_original, unsigned short* bit_width_original, float* step_factor, int* pixel_intensity, int* pixel_intensity_sqr, float* factor, float* scale, int* mas_pointer, int* mas) { int window_w = floor(cascade->window_w_mini*scale[blockIdx.x]); int window_h = floor(cascade->window_h_mini*scale[blockIdx.x]); unsigned short x1, y1; int x_step = ((1) >= (((4)<((window_w) / 10) ? (4) : ((window_w) / 10))) ? (1) : (((4)<((window_w) / 10) ? (4) : ((window_w) / 10)))); int y_step = ((1) >= (((4)<((window_h) / 10) ? (4) : ((window_h) / 10))) ? (1) : (((4)<((window_h) / 10) ? (4) : ((window_h) / 10)))); for (y1 = 0; y1 <= *height_original - 1 - window_h; y1 += y_step) { x1 = x_step * threadIdx.x; if (x1 < *width_original - 1 - window_w) { float mean = intensity_window_device(x1, y1, window_w, window_h, pixel_intensity, *width_original) * 1 / float(window_w*window_h); float variance = sqr_intensity_window_device(x1, y1, window_w, window_h, pixel_intensity_sqr, *width_original) * 1 / float(window_w*window_h) - (mean*mean); float stddev = 1.0; stddev = sqrt(variance); if (stddev < 10.0) continue; int f_failed = 0; for (int i_stage = 0; i_stage < cascade->n_stages; i_stage = i_stage + 1) { float sum_stage = 0.0; for (int i_feature = stages[i_stage].i_feature_start; i_feature <= stages[i_stage].i_feature_finish; i_feature = i_feature + 1) { int sum_feature = 0.0; for (int i_rect = features[i_feature].i_rect_start; i_rect <= features[i_feature].i_rect_finish; i_rect = i_rect + 1) { sum_feature += (intensity_window_device(x1 + rects[i_rect].x*scale[blockIdx.x], y1 + rects[i_rect].y*scale[blockIdx.x], rects[i_rect].w*scale[blockIdx.x], rects[i_rect].h*scale[blockIdx.x], pixel_intensity, *width_original)*rects[i_rect].weight); } float leafth = features[i_feature].feature_threshold * stddev; if (sum_feature * 1 / float(window_w*window_h) < leafth) sum_stage += features[i_feature].left_val; else sum_stage += features[i_feature].right_val; } if (sum_stage < stages[i_stage].stage_threshold) { f_failed = 1; break; } } if (f_failed == false) { printf("%d %d %d %d \n", x1, y1, x1 + window_w, y1 + window_h); unsigned short x2 = x1 + window_w; unsigned short y2 = y1 + window_h; /* atomicAdd( &mas[*mas_pointer], x1); atomicAdd( &mas[*mas_pointer+1] , y1 ); atomicAdd( &mas[*mas_pointer+2] , x2 ); atomicAdd( &mas[*mas_pointer+3] , y2 ); atomicAdd(mas_pointer, 4); printf("dev_mas_pointer=%d \n", *mas_pointer);*/ for (int x = x1; x <= x2; x++) { char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 2] = 0x00; } for (int x = x1; x <= x2; x++) { char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 2] = 0x00; } for (int y = y1; y <= y2; y++) { char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 2] = 0x00; } for (int y = y1; y <= y2; y++) { char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 2] = 0x00; } } } } } __device__ int intensity_window_device(int x, int y, int w_window, int h_window, int* pixel_intensity, unsigned short width_original) { int pixel_4 = pixel_intensity[width_original*(y + h_window) + (x + w_window)]; int pixel_1 = pixel_intensity[width_original*(y + 0) + (x + 0)]; int pixel_2 = pixel_intensity[width_original*(y + 0) + (x + w_window)]; int pixel_3 = pixel_intensity[width_original*(y + h_window) + (x + 0)]; return (pixel_4 + pixel_1 - pixel_2 - pixel_3); } __device__ int sqr_intensity_window_device(int x, int y, int w_window, int h_window, int* pixel_intensity_sqr, unsigned short width_original) { int pixel_4 = pixel_intensity_sqr[width_original*(y + h_window) + (x + w_window)]; int pixel_1 = pixel_intensity_sqr[width_original*(y + 0) + (x + 0)]; int pixel_2 = pixel_intensity_sqr[width_original*(y + 0) + (x + w_window)]; int pixel_3 = pixel_intensity_sqr[width_original*(y + h_window) + (x + 0)]; return (pixel_4 + pixel_1 - pixel_2 - pixel_3); }
3e90182789da41c001ed7ad2e795f4410f5b41b9.cu
#include "RecognitionOnGPU.cuh" #include <cuda.h> #include <stdio.h> #include <cuda_runtime_api.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <assert.h> #include <stdlib.h> #include <math.h> void ParallelizationElements(TXMLCascade* cascade, TStage* stages, TFeature* features, TRect* rects, unsigned short width_original, unsigned short height_original, unsigned char* char_picture_original, unsigned short byte_pixel_original, unsigned short bit_width_original, float step_factor, int* pixel_intensity, int* pixel_intensity_sqr) { float factor = 1.0; float *dev_factor; cudaMalloc((void**)&dev_factor, sizeof(float)); cudaMemcpy(dev_factor, &factor, sizeof(float), cudaMemcpyHostToDevice); TXMLCascade* dev_cascade; cudaMalloc((void**)&dev_cascade, sizeof(TXMLCascade)); cudaMemcpy(dev_cascade, cascade, sizeof(TXMLCascade), cudaMemcpyHostToDevice); TStage* dev_stages; cudaMalloc((void**)&dev_stages, sizeof(TStage) * 30); cudaMemcpy(dev_stages, stages, sizeof(TStage) * 30, cudaMemcpyHostToDevice); TFeature* dev_features; cudaMalloc((void**)&dev_features, sizeof(TFeature) * 7000); cudaMemcpy(dev_features, features, sizeof(TFeature) * 7000, cudaMemcpyHostToDevice); TRect* dev_rects; cudaMalloc((void**)&dev_rects, sizeof(TRect) * 11000); cudaMemcpy(dev_rects, rects, sizeof(TRect) * 11000, cudaMemcpyHostToDevice); unsigned short* dev_width_original; cudaMalloc((void**)&dev_width_original, sizeof(unsigned short)); cudaMemcpy(dev_width_original, &width_original, sizeof(unsigned short), cudaMemcpyHostToDevice); unsigned short* dev_height_original; cudaMalloc((void**)&dev_height_original, sizeof(unsigned short)); cudaMemcpy(dev_height_original, &height_original, sizeof(unsigned short), cudaMemcpyHostToDevice); unsigned char* dev_char_picture_original; cudaMalloc((void**)&dev_char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original); cudaMemcpy(dev_char_picture_original, char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original, cudaMemcpyHostToDevice); unsigned short* dev_byte_pixel_original; cudaMalloc((void**)&dev_byte_pixel_original, sizeof(unsigned short)); cudaMemcpy(dev_byte_pixel_original, &byte_pixel_original, sizeof(unsigned short), cudaMemcpyHostToDevice); unsigned short* dev_bit_width_original; cudaMalloc((void**)&dev_bit_width_original, sizeof(unsigned short)); cudaMemcpy(dev_bit_width_original, &bit_width_original, sizeof(unsigned short), cudaMemcpyHostToDevice); float* dev_step_factor; cudaMalloc((void**)&dev_step_factor, sizeof(float)); cudaMemcpy(dev_step_factor, &step_factor, sizeof(float), cudaMemcpyHostToDevice); int* dev_pixel_intensity; cudaMalloc((void**)&dev_pixel_intensity, width_original*height_original * 4); cudaMemcpy(dev_pixel_intensity, pixel_intensity, width_original*height_original * 4, cudaMemcpyHostToDevice); int* dev_pixel_intensity_sqr; cudaMalloc((void**)&dev_pixel_intensity_sqr, width_original*height_original * 4); cudaMemcpy(dev_pixel_intensity_sqr, pixel_intensity_sqr, width_original*height_original * 4, cudaMemcpyHostToDevice); int window_w = cascade->window_w_mini; int window_h = cascade->window_h_mini; do { ParallelizationElementsKernel <<< 512, 512 >>>(dev_cascade, dev_stages, dev_features, dev_rects, dev_width_original, dev_height_original, dev_char_picture_original, dev_byte_pixel_original, dev_bit_width_original, dev_step_factor, dev_pixel_intensity, dev_pixel_intensity_sqr, dev_factor); factor *= step_factor; cudaMemcpy(dev_factor, &factor, sizeof(float), cudaMemcpyHostToDevice); window_w = floor(cascade->window_w_mini*factor); window_h = floor(cascade->window_h_mini*factor); } while (min(width_original, height_original) >= min(window_w, window_h)); printf("Error: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(char_picture_original, dev_char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original, cudaMemcpyDeviceToHost); } __global__ void ParallelizationElementsKernel(TXMLCascade* cascade, TStage* stages, TFeature* features, TRect* rects, unsigned short* width_original, unsigned short* height_original, unsigned char* char_picture_original, unsigned short* byte_pixel_original, unsigned short* bit_width_original, float* step_factor, int* pixel_intensity, int* pixel_intensity_sqr, float* factor) { int window_w = floor(cascade->window_w_mini**factor); int window_h = floor(cascade->window_h_mini**factor); unsigned short x1, y1; int x_step = ((1) >= (((4)<((window_w) / 10) ? (4) : ((window_w) / 10))) ? (1) : (((4)<((window_w) / 10) ? (4) : ((window_w) / 10)))); int y_step = ((1) >= (((4)<((window_h) / 10) ? (4) : ((window_h) / 10))) ? (1) : (((4)<((window_h) / 10) ? (4) : ((window_h) / 10)))); y1 = y_step * blockIdx.x; if (y1 < *height_original - window_h) { int x_thread_step = 0; for (x1 = 0; x1 < *width_original - window_w; x_thread_step += 512) { x1 = x_thread_step + x_step * threadIdx.x; if (x1 < 512 + x_thread_step) { float mean = intensity_window_device(x1, y1, window_w, window_h, pixel_intensity, *width_original) * 1 / float(window_w*window_h); float variance = sqr_intensity_window_device(x1, y1, window_w, window_h, pixel_intensity_sqr, *width_original) * 1 / float(window_w*window_h) - (mean*mean); float stddev = 1.0; stddev = sqrt(variance); if (stddev > 10.0) { bool f_failed = false; for (int i_stage = 0; i_stage < cascade->n_stages; i_stage = i_stage + 1) { float sum_stage = 0.0; for (int i_feature = stages[i_stage].i_feature_start; i_feature <= stages[i_stage].i_feature_finish; i_feature = i_feature + 1) { int sum_feature = 0.0; for (int i_rect = features[i_feature].i_rect_start; i_rect <= features[i_feature].i_rect_finish; i_rect = i_rect + 1) { sum_feature += (intensity_window_device(x1 + rects[i_rect].x**factor, y1 + rects[i_rect].y**factor, rects[i_rect].w**factor, rects[i_rect].h**factor, pixel_intensity, *width_original)*rects[i_rect].weight); } if (sum_feature * 1 / float(window_w*window_h) < features[i_feature].feature_threshold * stddev) sum_stage += features[i_feature].left_val; else sum_stage += features[i_feature].right_val; } if (sum_stage < stages[i_stage].stage_threshold) { f_failed = true; break; } } if (f_failed == false) { printf("%d %d %d %d \n", x1, y1, x1 + window_w, y1 + window_h); unsigned short x2 = x1 + window_w; unsigned short y2 = y1 + window_h; for (int x = x1; x <= x2; x++) { char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 2] = 0x00; } for (int x = x1; x <= x2; x++) { char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 2] = 0x00; } for (int y = y1; y <= y2; y++) { char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 2] = 0x00; } for (int y = y1; y <= y2; y++) { char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 2] = 0x00; } } } } } } } void ParallelizationScale(TXMLCascade* cascade, TStage* stages, TFeature* features, TRect* rects, unsigned short width_original, unsigned short height_original, unsigned char* char_picture_original, unsigned short byte_pixel_original, unsigned short bit_width_original, float step_factor, int* pixel_intensity, int* pixel_intensity_sqr, int* mas_pointer, int** mas) { unsigned char* dev_char_picture_original; cudaMalloc((void**)&dev_char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original); cudaMemcpy(dev_char_picture_original, char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original, cudaMemcpyHostToDevice); int* dev_mas_pointer; cudaMalloc((void**)&dev_mas_pointer, sizeof(int)); int* dev_mas; cudaMalloc((void**)&dev_mas, sizeof(int) * 512); float factor = 1.0; float *dev_factor; cudaMalloc((void**)&dev_factor, sizeof(float)); cudaMemcpy(dev_factor, &factor, sizeof(float), cudaMemcpyHostToDevice); TXMLCascade* dev_cascade; cudaMalloc((void**)&dev_cascade, sizeof(TXMLCascade)); cudaMemcpy(dev_cascade, cascade, sizeof(TXMLCascade), cudaMemcpyHostToDevice); TStage* dev_stages; cudaMalloc((void**)&dev_stages, sizeof(TStage) * 30); cudaMemcpy(dev_stages, stages, sizeof(TStage) * 30, cudaMemcpyHostToDevice); TFeature* dev_features; cudaMalloc((void**)&dev_features, sizeof(TFeature) * 7000); cudaMemcpy(dev_features, features, sizeof(TFeature) * 7000, cudaMemcpyHostToDevice); TRect* dev_rects; cudaMalloc((void**)&dev_rects, sizeof(TRect) * 11000); cudaMemcpy(dev_rects, rects, sizeof(TRect) * 11000, cudaMemcpyHostToDevice); unsigned short* dev_width_original; cudaMalloc((void**)&dev_width_original, sizeof(unsigned short)); cudaMemcpy(dev_width_original, &width_original, sizeof(unsigned short), cudaMemcpyHostToDevice); unsigned short* dev_height_original; cudaMalloc((void**)&dev_height_original, sizeof(unsigned short)); cudaMemcpy(dev_height_original, &height_original, sizeof(unsigned short), cudaMemcpyHostToDevice); unsigned short* dev_byte_pixel_original; cudaMalloc((void**)&dev_byte_pixel_original, sizeof(unsigned short)); cudaMemcpy(dev_byte_pixel_original, &byte_pixel_original, sizeof(unsigned short), cudaMemcpyHostToDevice); unsigned short* dev_bit_width_original; cudaMalloc((void**)&dev_bit_width_original, sizeof(unsigned short)); cudaMemcpy(dev_bit_width_original, &bit_width_original, sizeof(unsigned short), cudaMemcpyHostToDevice); float* dev_step_factor; cudaMalloc((void**)&dev_step_factor, sizeof(float)); cudaMemcpy(dev_step_factor, &step_factor, sizeof(float), cudaMemcpyHostToDevice); int* dev_pixel_intensity; cudaMalloc((void**)&dev_pixel_intensity, width_original*height_original * 4); cudaMemcpy(dev_pixel_intensity, pixel_intensity, width_original*height_original * 4, cudaMemcpyHostToDevice); int* dev_pixel_intensity_sqr; cudaMalloc((void**)&dev_pixel_intensity_sqr, width_original*height_original * 4); cudaMemcpy(dev_pixel_intensity_sqr, pixel_intensity_sqr, width_original*height_original * 4, cudaMemcpyHostToDevice); int window_w, window_h; float scale[100]; int k = 0; do { window_w = (int)floor(cascade->window_w_mini*factor); window_h = (int)floor(cascade->window_h_mini*factor); scale[k] = factor; factor *= step_factor; k++; } while (min(width_original, height_original) >= min(window_w, window_h)); printf("block = %d; \n", k); float* dev_scale; cudaMalloc((void**)&dev_scale, sizeof(float) * 100); cudaMemcpy(dev_scale, scale, sizeof(float) * 100, cudaMemcpyHostToDevice); ParallelizationScaleKernel <<< k, 512 >>>(dev_cascade, dev_stages, dev_features, dev_rects, dev_width_original, dev_height_original, dev_char_picture_original, dev_byte_pixel_original, dev_bit_width_original, dev_step_factor, dev_pixel_intensity, dev_pixel_intensity_sqr, dev_factor, dev_scale, dev_mas_pointer, dev_mas); cudaThreadSynchronize(); printf("Error 1: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(char_picture_original, dev_char_picture_original, sizeof(unsigned char) * 3 * height_original*width_original, cudaMemcpyDeviceToHost); /*printf("Error 2: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy( &mas, dev_mas, sizeof(int)*512*k,cudaMemcpyDeviceToHost); printf("Error 3: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(mas_pointer, dev_mas_pointer, sizeof(int), cudaMemcpyDeviceToHost); printf("Error 4: %s\n", cudaGetErrorString(cudaGetLastError()));*/ } __global__ void ParallelizationScaleKernel(TXMLCascade* cascade, TStage* stages, TFeature* features, TRect* rects, unsigned short* width_original, unsigned short* height_original, unsigned char* char_picture_original, unsigned short* byte_pixel_original, unsigned short* bit_width_original, float* step_factor, int* pixel_intensity, int* pixel_intensity_sqr, float* factor, float* scale, int* mas_pointer, int* mas) { int window_w = floor(cascade->window_w_mini*scale[blockIdx.x]); int window_h = floor(cascade->window_h_mini*scale[blockIdx.x]); unsigned short x1, y1; int x_step = ((1) >= (((4)<((window_w) / 10) ? (4) : ((window_w) / 10))) ? (1) : (((4)<((window_w) / 10) ? (4) : ((window_w) / 10)))); int y_step = ((1) >= (((4)<((window_h) / 10) ? (4) : ((window_h) / 10))) ? (1) : (((4)<((window_h) / 10) ? (4) : ((window_h) / 10)))); for (y1 = 0; y1 <= *height_original - 1 - window_h; y1 += y_step) { x1 = x_step * threadIdx.x; if (x1 < *width_original - 1 - window_w) { float mean = intensity_window_device(x1, y1, window_w, window_h, pixel_intensity, *width_original) * 1 / float(window_w*window_h); float variance = sqr_intensity_window_device(x1, y1, window_w, window_h, pixel_intensity_sqr, *width_original) * 1 / float(window_w*window_h) - (mean*mean); float stddev = 1.0; stddev = sqrt(variance); if (stddev < 10.0) continue; int f_failed = 0; for (int i_stage = 0; i_stage < cascade->n_stages; i_stage = i_stage + 1) { float sum_stage = 0.0; for (int i_feature = stages[i_stage].i_feature_start; i_feature <= stages[i_stage].i_feature_finish; i_feature = i_feature + 1) { int sum_feature = 0.0; for (int i_rect = features[i_feature].i_rect_start; i_rect <= features[i_feature].i_rect_finish; i_rect = i_rect + 1) { sum_feature += (intensity_window_device(x1 + rects[i_rect].x*scale[blockIdx.x], y1 + rects[i_rect].y*scale[blockIdx.x], rects[i_rect].w*scale[blockIdx.x], rects[i_rect].h*scale[blockIdx.x], pixel_intensity, *width_original)*rects[i_rect].weight); } float leafth = features[i_feature].feature_threshold * stddev; if (sum_feature * 1 / float(window_w*window_h) < leafth) sum_stage += features[i_feature].left_val; else sum_stage += features[i_feature].right_val; } if (sum_stage < stages[i_stage].stage_threshold) { f_failed = 1; break; } } if (f_failed == false) { printf("%d %d %d %d \n", x1, y1, x1 + window_w, y1 + window_h); unsigned short x2 = x1 + window_w; unsigned short y2 = y1 + window_h; /* atomicAdd( &mas[*mas_pointer], x1); atomicAdd( &mas[*mas_pointer+1] , y1 ); atomicAdd( &mas[*mas_pointer+2] , x2 ); atomicAdd( &mas[*mas_pointer+3] , y2 ); atomicAdd(mas_pointer, 4); printf("dev_mas_pointer=%d \n", *mas_pointer);*/ for (int x = x1; x <= x2; x++) { char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y1) + *byte_pixel_original*x + 2] = 0x00; } for (int x = x1; x <= x2; x++) { char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y2) + *byte_pixel_original*x + 2] = 0x00; } for (int y = y1; y <= y2; y++) { char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x1 + 2] = 0x00; } for (int y = y1; y <= y2; y++) { char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 0] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 1] = 0x00; char_picture_original[*bit_width_original*(*height_original - y) + *byte_pixel_original*x2 + 2] = 0x00; } } } } } __device__ int intensity_window_device(int x, int y, int w_window, int h_window, int* pixel_intensity, unsigned short width_original) { int pixel_4 = pixel_intensity[width_original*(y + h_window) + (x + w_window)]; int pixel_1 = pixel_intensity[width_original*(y + 0) + (x + 0)]; int pixel_2 = pixel_intensity[width_original*(y + 0) + (x + w_window)]; int pixel_3 = pixel_intensity[width_original*(y + h_window) + (x + 0)]; return (pixel_4 + pixel_1 - pixel_2 - pixel_3); } __device__ int sqr_intensity_window_device(int x, int y, int w_window, int h_window, int* pixel_intensity_sqr, unsigned short width_original) { int pixel_4 = pixel_intensity_sqr[width_original*(y + h_window) + (x + w_window)]; int pixel_1 = pixel_intensity_sqr[width_original*(y + 0) + (x + 0)]; int pixel_2 = pixel_intensity_sqr[width_original*(y + 0) + (x + w_window)]; int pixel_3 = pixel_intensity_sqr[width_original*(y + h_window) + (x + 0)]; return (pixel_4 + pixel_1 - pixel_2 - pixel_3); }
a44a82035d72e7f1c1944c15444ed9d3483d42e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "check.h" #define N 8 // Matrix dimension #define TILES 4 // How many tiles along one coordinate __global__ void transpose(int *M, int *T) { //__shared__ int cache[N/TILES*N/TILES]; // 8x8 // Global position int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int idy = (blockIdx.y * blockDim.y) + threadIdx.y; int tid1 = ((gridDim.x * blockDim.x) * idy) + idx; // Value we want to copy // Transposed within tile position int idx2 = (blockIdx.x * blockDim.x) + threadIdx.y; int idy2 = (blockIdx.y * blockDim.y) + threadIdx.x; int tid2 = ((gridDim.x * blockDim.x) * idy2) + idx2; if (tid1 < N*N) { T[tid1] = M[tid2]; } } int main(int argc, char **argv) { int M[N][N], T[N][N]; int *dev_M, *dev_T; // Allocate arrays on the GPU CHECK( hipMalloc( (void**)&dev_M, N*N*sizeof(int)) ); CHECK( hipMalloc( (void**)&dev_T, N*N*sizeof(int)) ); // Initialise the values of the matrix on the CPU int k = 0; for (int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { M[i][j] = k++; } } // Copy the input arrays from host to device CHECK( hipMemcpy( dev_M, M, N*N*sizeof(int), hipMemcpyHostToDevice) ); CHECK( hipMemcpy( dev_T, T, N*N*sizeof(int), hipMemcpyHostToDevice) ); dim3 grid(N/TILES, N/TILES); int threads_dim = N / TILES; // Threads per tile across one coordinate. dim3 threads(TILES, TILES); printf( "Matrix of (%d by %d) with (%d by %d) thread blocks.\n", N, N, threads_dim, threads_dim); hipLaunchKernelGGL(( transpose), dim3(grid),dim3(threads), 0, 0, dev_M, dev_T ); // Copy the results back from device to host array CHECK( hipMemcpy( T, dev_T, N*N*sizeof(int), hipMemcpyDeviceToHost) ); // Output result for (int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { printf( "%d ", T[i][j]); } printf("\n"); } // Clean up device. hipFree( dev_M ); hipFree( dev_T ); return 0; }
a44a82035d72e7f1c1944c15444ed9d3483d42e4.cu
#include <stdio.h> #include "check.h" #define N 8 // Matrix dimension #define TILES 4 // How many tiles along one coordinate __global__ void transpose(int *M, int *T) { //__shared__ int cache[N/TILES*N/TILES]; // 8x8 // Global position int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int idy = (blockIdx.y * blockDim.y) + threadIdx.y; int tid1 = ((gridDim.x * blockDim.x) * idy) + idx; // Value we want to copy // Transposed within tile position int idx2 = (blockIdx.x * blockDim.x) + threadIdx.y; int idy2 = (blockIdx.y * blockDim.y) + threadIdx.x; int tid2 = ((gridDim.x * blockDim.x) * idy2) + idx2; if (tid1 < N*N) { T[tid1] = M[tid2]; } } int main(int argc, char **argv) { int M[N][N], T[N][N]; int *dev_M, *dev_T; // Allocate arrays on the GPU CHECK( cudaMalloc( (void**)&dev_M, N*N*sizeof(int)) ); CHECK( cudaMalloc( (void**)&dev_T, N*N*sizeof(int)) ); // Initialise the values of the matrix on the CPU int k = 0; for (int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { M[i][j] = k++; } } // Copy the input arrays from host to device CHECK( cudaMemcpy( dev_M, M, N*N*sizeof(int), cudaMemcpyHostToDevice) ); CHECK( cudaMemcpy( dev_T, T, N*N*sizeof(int), cudaMemcpyHostToDevice) ); dim3 grid(N/TILES, N/TILES); int threads_dim = N / TILES; // Threads per tile across one coordinate. dim3 threads(TILES, TILES); printf( "Matrix of (%d by %d) with (%d by %d) thread blocks.\n", N, N, threads_dim, threads_dim); transpose<<<grid,threads>>>( dev_M, dev_T ); // Copy the results back from device to host array CHECK( cudaMemcpy( T, dev_T, N*N*sizeof(int), cudaMemcpyDeviceToHost) ); // Output result for (int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { printf( "%d ", T[i][j]); } printf("\n"); } // Clean up device. cudaFree( dev_M ); cudaFree( dev_T ); return 0; }
5763ddcbf81a97966d08137723246179bcb02062.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "accumulateColsKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int channels = 1; int h = YSIZE; int w = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( accumulateColsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,channels,h,w); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( accumulateColsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,channels,h,w); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( accumulateColsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,channels,h,w); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5763ddcbf81a97966d08137723246179bcb02062.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "accumulateColsKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int channels = 1; int h = YSIZE; int w = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); accumulateColsKernel<<<gridBlock,threadBlock>>>(input,output,channels,h,w); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { accumulateColsKernel<<<gridBlock,threadBlock>>>(input,output,channels,h,w); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { accumulateColsKernel<<<gridBlock,threadBlock>>>(input,output,channels,h,w); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
666896a3a2c4ed475a53f1c48329c7855a72fe0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "getPoses.h" #include <thrust/functional.h> #include <thrust/extrema.h> #include <thrust/count.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include "device_common.h" #include <iostream> using namespace std; struct isLessTest { __host__ __device__ bool operator()(const thrust::tuple<float4, float2, bool>& a ) { return (thrust::get<2>(a) == false); }; }; __global__ void isLess_kernel(bool* isEasLess, float* Eas, const float threshold, const int numPoses) { const int tIdx = threadIdx.x; const int Idx = blockIdx.x * BLOCK_SIZE + tIdx; if (Idx >= numPoses) return; isEasLess[Idx] = (Eas[Idx] < threshold)? true : false; } thrust::device_vector<float>::iterator findMin(thrust::device_vector<float>* Eas) { return thrust::min_element(Eas->begin(), Eas->end()); } bool getPoses(thrust::device_vector<float4>* Poses4, thrust::device_vector<float2>* Poses2, thrust::device_vector<float>* Eas, float minEa, const float& delta, int* numPoses) { // get initial threhold const float thresh = 0.1869 * delta + 0.0161 - 0.002; minEa += thresh; // count reductions bool tooHighPercentage = false; bool first = true; int count = INT_MAX; thrust::device_vector<bool> isEasLess(*numPoses, false); const int BLOCK_NUM = (*numPoses - 1) / BLOCK_SIZE + 1; while (true) { hipLaunchKernelGGL(( isLess_kernel) , dim3(BLOCK_NUM), dim3(BLOCK_SIZE) , 0, 0, thrust::raw_pointer_cast(isEasLess.data()), thrust::raw_pointer_cast(Eas->data()), minEa, Eas->size()); count = thrust::count(isEasLess.begin(), isEasLess.end(), true); if (first) { float percentage = count / *numPoses; tooHighPercentage = (percentage > 0.1); } if (count < 27000) { // cut poses4 and poses2 typedef thrust::tuple< thrust::device_vector< float4 >::iterator, thrust::device_vector< float2 >::iterator, thrust::device_vector< bool >::iterator > TupleIt; typedef thrust::zip_iterator< TupleIt > ZipIt; ZipIt Zend = thrust::remove_if( thrust::make_zip_iterator(thrust::make_tuple(Poses4->begin(), Poses2->begin(), isEasLess.begin())), thrust::make_zip_iterator(thrust::make_tuple(Poses4->end(), Poses2->end(), isEasLess.end())), isLessTest() ); Poses4->erase(thrust::get<0>(Zend.get_iterator_tuple()), Poses4->end()); Poses2->erase(thrust::get<1>(Zend.get_iterator_tuple()), Poses2->end()); *numPoses = count; break; } minEa *= 0.99; } return tooHighPercentage; }
666896a3a2c4ed475a53f1c48329c7855a72fe0d.cu
#include "getPoses.h" #include <thrust/functional.h> #include <thrust/extrema.h> #include <thrust/count.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include "device_common.h" #include <iostream> using namespace std; struct isLessTest { __host__ __device__ bool operator()(const thrust::tuple<float4, float2, bool>& a ) { return (thrust::get<2>(a) == false); }; }; __global__ void isLess_kernel(bool* isEasLess, float* Eas, const float threshold, const int numPoses) { const int tIdx = threadIdx.x; const int Idx = blockIdx.x * BLOCK_SIZE + tIdx; if (Idx >= numPoses) return; isEasLess[Idx] = (Eas[Idx] < threshold)? true : false; } thrust::device_vector<float>::iterator findMin(thrust::device_vector<float>* Eas) { return thrust::min_element(Eas->begin(), Eas->end()); } bool getPoses(thrust::device_vector<float4>* Poses4, thrust::device_vector<float2>* Poses2, thrust::device_vector<float>* Eas, float minEa, const float& delta, int* numPoses) { // get initial threhold const float thresh = 0.1869 * delta + 0.0161 - 0.002; minEa += thresh; // count reductions bool tooHighPercentage = false; bool first = true; int count = INT_MAX; thrust::device_vector<bool> isEasLess(*numPoses, false); const int BLOCK_NUM = (*numPoses - 1) / BLOCK_SIZE + 1; while (true) { isLess_kernel <<< BLOCK_NUM, BLOCK_SIZE >>> (thrust::raw_pointer_cast(isEasLess.data()), thrust::raw_pointer_cast(Eas->data()), minEa, Eas->size()); count = thrust::count(isEasLess.begin(), isEasLess.end(), true); if (first) { float percentage = count / *numPoses; tooHighPercentage = (percentage > 0.1); } if (count < 27000) { // cut poses4 and poses2 typedef thrust::tuple< thrust::device_vector< float4 >::iterator, thrust::device_vector< float2 >::iterator, thrust::device_vector< bool >::iterator > TupleIt; typedef thrust::zip_iterator< TupleIt > ZipIt; ZipIt Zend = thrust::remove_if( thrust::make_zip_iterator(thrust::make_tuple(Poses4->begin(), Poses2->begin(), isEasLess.begin())), thrust::make_zip_iterator(thrust::make_tuple(Poses4->end(), Poses2->end(), isEasLess.end())), isLessTest() ); Poses4->erase(thrust::get<0>(Zend.get_iterator_tuple()), Poses4->end()); Poses2->erase(thrust::get<1>(Zend.get_iterator_tuple()), Poses2->end()); *numPoses = count; break; } minEa *= 0.99; } return tooHighPercentage; }
d01d5c3ebcb9c57e507bfc123d1999605ec080a4.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> __global__ void mandelKernel(int* d_data, int width, float stepX, float stepY, float lowerX, float lowerY, int count) { // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int thisX = blockIdx.x * blockDim.x + threadIdx.x; int thisY = blockIdx.y * blockDim.y + threadIdx.y; float c_x = lowerX + thisX * stepX; float c_y = lowerY + thisY * stepY; float z_x = c_x; float z_y = c_y; int iter; for (iter = 0; iter < count; ++iter){ if (z_x * z_x + z_y * z_y > 4.f) break; float new_x = z_x * z_x - z_y * z_y; float new_y = 2.f * z_x * z_y; z_x = c_x + new_x; z_y = c_y + new_y; } int idx = thisX + thisY * width; d_data[idx] = iter; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int N = resX * resY; int size = N * sizeof(int); int *d_data; hipMalloc(&d_data, size); dim3 threadsPerBlock(25, 25); dim3 numBlocks(resX / threadsPerBlock.x, resY / threadsPerBlock.y); hipLaunchKernelGGL(( mandelKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_data, resX, stepX, stepY, lowerX, lowerY, maxIterations); hipMemcpy(img, d_data, size, hipMemcpyDeviceToHost); hipFree(d_data); }
d01d5c3ebcb9c57e507bfc123d1999605ec080a4.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void mandelKernel(int* d_data, int width, float stepX, float stepY, float lowerX, float lowerY, int count) { // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int thisX = blockIdx.x * blockDim.x + threadIdx.x; int thisY = blockIdx.y * blockDim.y + threadIdx.y; float c_x = lowerX + thisX * stepX; float c_y = lowerY + thisY * stepY; float z_x = c_x; float z_y = c_y; int iter; for (iter = 0; iter < count; ++iter){ if (z_x * z_x + z_y * z_y > 4.f) break; float new_x = z_x * z_x - z_y * z_y; float new_y = 2.f * z_x * z_y; z_x = c_x + new_x; z_y = c_y + new_y; } int idx = thisX + thisY * width; d_data[idx] = iter; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int N = resX * resY; int size = N * sizeof(int); int *d_data; cudaMalloc(&d_data, size); dim3 threadsPerBlock(25, 25); dim3 numBlocks(resX / threadsPerBlock.x, resY / threadsPerBlock.y); mandelKernel<<<numBlocks, threadsPerBlock>>>(d_data, resX, stepX, stepY, lowerX, lowerY, maxIterations); cudaMemcpy(img, d_data, size, cudaMemcpyDeviceToHost); cudaFree(d_data); }
1befc46948acf817bd9f6543fb00870e555c26b0.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> static const unsigned int c1 = 0xcc9e2d51; static const unsigned int c2 = 0x1b873593; static const unsigned int r1 = 15; static const unsigned int r2 = 13; static const unsigned int m = 5; static const unsigned int n = 0xe6546b64; __device__ inline unsigned int h1(unsigned int k, unsigned int hash) { k *= c1; k = (k << r1) | (k >> (32-r1)); k *= c2; hash ^= k; hash = ((hash << r2) | (hash >> (32-r2)) * m) + n; return hash; } __device__ inline unsigned int mmhash(unsigned int v1, unsigned int v2, unsigned int v3, unsigned int mod, unsigned int seed) { unsigned int hash = seed; hash = h1(v1, hash); hash = h1(v2, hash); hash = h1(v3, hash); hash ^= (hash >> 16); hash *= 0x85ebca6b; hash ^= (hash >> 13); hash *= 0xc2b2ae35; hash ^= (hash >> 16); return (hash % mod); } #define DBSIZE (8*1024) __global__ void __treePack(int *idata, int *treenodes, int *icats, int *jc, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps) { __shared__ int dbuff[DBSIZE]; __shared__ int fl[32]; int j, k, ic, ival; int seed = 45123421; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { fl[tid] = fieldlens[tid]; } __syncthreads(); int vshift = fl[5]; int ishift = fl[4] + vshift; int jshift = fl[3] + ishift; int nshift = fl[2] + jshift; int tshift = fl[1] + nshift; int cmask = (1 << fl[5]) - 1; int vmask = (1 << fl[4]) - 1; int imask = (1 << fl[3]) - 1; int jmask = (1 << fl[2]) - 1; int nmask = (1 << fl[1]) - 1; int tmask = (1 << fl[0]) - 1; int nc = (DBSIZE / nrows); int itree = threadIdx.y; int jfeat = threadIdx.x; for (int i = nc * blockIdx.x; i < ncols; i += nc * gridDim.x) { int ctodo = min(nc, ncols - i); for (j = tid; j < nrows * ctodo; j += blockDim.x*blockDim.y) { dbuff[j] = idata[j + i * nrows]; } __syncthreads(); for (j = i; j < i + ctodo; j++) { for (itree = threadIdx.y; itree < ntrees; itree += blockDim.y) { int inode = treenodes[itree + j * ntrees]; int ifeat = mmhash(itree, inode, jfeat, nrows, seed); long long hdr = (((long long)(tmask & itree)) << tshift) | (((long long)(nmask & inode)) << nshift) | (((long long)(jmask & jfeat)) << jshift) | (((long long)(imask & ifeat)) << ishift) ; for (k = jc[j]; k < jc[j+1]; k++) { ic = icats[k]; if (jfeat < nsamps) { ival = dbuff[ifeat + (j - i) * nrows]; out[jfeat + nsamps * (itree + ntrees * k)] = hdr | (((long long)(vmask & ival)) << vshift) | ((long long)(ic & cmask)); } } } } __syncthreads(); } } int treePack(int *fdata, int *treenodes, int *icats, int *jc, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); hipLaunchKernelGGL(( __treePack), dim3(nb),dim3(bdim), 0, 0, fdata, treenodes, icats, jc, out, fieldlens, nrows, ncols, ntrees, nsamps); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } class entImpty { public: static __device__ inline float fupdate(int v) { return (float)v * logf((float)max(1, v)); } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return logf(vs) - vacc / vs; } }; class giniImpty { public: static __device__ inline float fupdate(int v) { return (float)v * (float)v; } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return 1.0f - vacc / (vs*vs); } }; #if __CUDA_ARCH__ >= 300 __device__ inline void accumup2(int &cnt, float &update) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; cnt += tmp; } } } __device__ inline void accumup3(int &cnt, float &update, float &updatet) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); float tmpy = __shfl_up(updatet, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void accumdown3(int &cnt, float &update, float &updatet, int bound) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_down(update, h); float tmpy = __shfl_down(updatet, h); int tmp = __shfl_down(cnt, h); if (threadIdx.x + h <= bound) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void minup2(float &impty, int &ival) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(impty, h); int tmp = __shfl_up(ival, h); if (threadIdx.x >= h && tmpx < impty) { impty = tmpx; ival = tmp; } } } __device__ inline void maxup2(int &v, int &indx) { #pragma unroll for (int h = 1; h < 32; h = h + h) { int tmpv = __shfl_up(v, h); int tmpi = __shfl_up(indx, h); if (threadIdx.x >= h && tmpv > v) { v = tmpv; indx = tmpi; } } } template<typename T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE/2]; __shared__ int cattot[DBSIZE/2]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, jc0, jc1, jlast; long long key; int cold, ctot, ctt, ctotall, cnew, cnt, ival, icat, lastival, bestival, tmp, maxcnt, imaxcnt; float update, updatet, cacc, cact, caccall, impty, minimpty, lastimpty, tmpx; for (i = threadIdx.y + blockDim.y * blockIdx.x; i < nnodes*nsamps; i += blockDim.y * gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts for this group for (j = tid; j < DBSIZE/2; j += blockDim.x * blockDim.y) { catcnt[j] = 0; cattot[j] = 0; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this block ctot = 0; cacc = 0.0f; maxcnt = -1; imaxcnt = -1; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = cattot[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k cnew = cold + cnt; cattot[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); accumup2(cnt,update); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); if (cnew > maxcnt) { // Compute and distribute the max cnt maxcnt = cnew; imaxcnt = icat; } maxup2(maxcnt, imaxcnt); maxcnt = __shfl(maxcnt, jlast); imaxcnt = __shfl(imaxcnt, jlast); } __syncthreads(); // if (threadIdx.x == 0 && i < 32) printf("cuda %d %d %f\n", i, ctot, cacc); // Second pass to compute impurity at every input point caccall = cacc; // Save the total count and (ci)log(ci) sum cact = cacc; ctotall = ctot; ctot = 0; cacc = 0.0f; lastival = -1; lastimpty = 1e7f; minimpty = 1e7f; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value ival = ((int)(key >> vshift)) & vmask; } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + cnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); // Compute the impurity updates for this input updatet = T::fupdate(ctt-cnew) - T::fupdate(ctt-cold); accumup3(cnt, update, updatet); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; cact += updatet; impty = T::fresult(cacc, ctot) + T::fresult(cact, ctotall-ctot); // And the impurity for this input // if (i == 0) printf("cuda pos %d impty %f icat %d cnts %d %d cacc %f %d\n", j + threadIdx.x, impty, icat, cold, cnew, cacc, ctot); tmp = __shfl_up(ival, 1); // Need the last impurity and ival in order tmpx = __shfl_up(impty, 1); // to restrict the partition feature to a value boundary if (threadIdx.x > 0) { lastival = tmp; lastimpty = tmpx; } if (ival == lastival) lastimpty = 1e7f; // Eliminate values which are not at value boundaries if (lastimpty < minimpty) { minimpty = lastimpty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); // Carefully copy the last active thread to all threads, needed outside this loop bestival = __shfl(bestival, jlast); ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); cact = __shfl(cact, jlast); lastival = __shfl(ival, jlast); lastimpty = __shfl(impty, jlast); } if (threadIdx.x == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = ((int)(key >> ishift)) & imask; // Save the feature index outg[i] = T::fresult(caccall, ctotall) - minimpty; // And the impurity gain outc[i] = imaxcnt; } } } template<typename T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE]; __shared__ int cattot[DBSIZE/4]; __shared__ int stott[32]; __shared__ float sacct[32]; __shared__ int slastival[64]; __shared__ int sbestival[32]; __shared__ float sminimpty[32]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, h, jc0, jc1, ilast, jlast; long long key; int cold, tot, ctt, tott, cnew, cnt, ncnt, tcnt, ival, icat, lastival, bestival, tmp; float update, updatet, acc, acct, impty, minimpty; for (i = blockIdx.x; i < nnodes*nsamps; i += gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts and totals for (j = threadIdx.x; j < ncats; j += blockDim.x) { catcnt[j + threadIdx.y * blockDim.x] = 0; if (threadIdx.y == 0) cattot[j] = 0; } if (threadIdx.y == 0) { sminimpty[threadIdx.x] = 1e7f; sbestival[threadIdx.x] = -1; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this entire ifeat group for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id atomicAdd(&cattot[icat + threadIdx.y * ncats], cnt); // Update count totals } } __syncthreads(); tott = 0; // Compute total count and (c)log(c) for the entire ifeat group acct = 0; if (threadIdx.y == 0) { for (k = 0; k < ncats; k += blockDim.x) { if (k + threadIdx.x < ncats) { tcnt = cattot[k + threadIdx.x]; update = T::fupdate(tcnt); } else { tcnt = 0; update = 0; } accumup2(tcnt,update); ilast = min(31, ncats - k - 1); tcnt = __shfl(tcnt, ilast); update = __shfl(update, ilast); tott += tcnt; acct += update; } stott[threadIdx.x] = tott; sacct[threadIdx.x] = acct; } tott = stott[threadIdx.x]; // if (tid == 0 && i < 32) printf("cuda %d %d %f\n", i, tott, acct); // Main loop, work on blocks of 1024 (ideally) for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { for (k = 0; k < ncats; k += blockDim.x) { // copy cumcounts from last row of last iteration to the first row tmp = catcnt[k + threadIdx.x + (blockDim.y -1) * ncats]; __syncthreads(); if (threadIdx.y == 0) { catcnt[k + threadIdx.x] = tmp; } else { catcnt[k + threadIdx.x + threadIdx.y * ncats] = 0; } __syncthreads(); } if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id and integer value; ival = ((int)(key >> vshift)) & vmask; atomicAdd(&catcnt[icat + threadIdx.y * ncats], cnt); // Update count totals } jlast = min(31, jc1 - j - threadIdx.y * 32 - 1); // Save the last value in this group if (threadIdx.x == jlast) { slastival[threadIdx.y + 1] = ival; } __syncthreads(); for (k = 0; k < ncats; k += blockDim.x) { // Form the cumsum along columns of catcnts for (h = 1; h < blockDim.y; h = h + h) { if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { tmp = catcnt[k + threadIdx.x + ncats * threadIdx.y]; } __syncthreads(); if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { catcnt[k + threadIdx.x + ncats * (threadIdx.y + h)] += tmp; } __syncthreads(); } } tot = 0; // Local to a yblock (row) of catcnts acc = 0.0f; acct = 0.0f; for (k = 0; k < ncats; k += blockDim.x) { // Now sum within a row (yblock) if (k + threadIdx.x < ncats) { cnt = catcnt[k + threadIdx.x + threadIdx.y * ncats]; update = T::fupdate(cnt); updatet = T::fupdate(cattot[k + threadIdx.x] - cnt); } else { cnt = 0; update = 0; updatet = 0; } accumup3(cnt,update,updatet); ilast = min(31, ncats - k - 1); update = __shfl(update, ilast); updatet = __shfl(updatet, ilast); cnt = __shfl(cnt, ilast); tot += cnt; acc += update; acct += updatet; } __syncthreads(); // OK, we have everything needed now to compute impurity for the rows in this yblock: // tot, acc, acct at the end of the block lastival = -1; minimpty = 1e7f; ncnt = -cnt; for (k = jlast; k >= 0; k--) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + ncnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); updatet = T::fupdate(ctt - cnew) - T::fupdate(ctt - cold); accumdown3(ncnt,update,updatet,jlast); tot += cnt; // Now update the total c and total ci log ci sums acc += update; acct += updatet; impty = T::fresult(acc, tot) + T::fresult(acct, tott - tot); // And the impurity for this input tmp = __shfl_up(ival, 1); if (threadIdx.x > 0) { // Get the last ival to check for a boundary lastival = tmp; } else { lastival = slastival[threadIdx.y]; } __syncthreads(); if (tid == 0) { tmp = slastival[33]; slastival[0] = tmp; } __syncthreads(); if (ival == lastival) impty = 1e7f; // Eliminate values which are not at value boundaries if (impty < minimpty) { minimpty = impty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); bestival = __shfl(bestival, jlast); if (threadIdx.x == 0) { sminimpty[threadIdx.y] = minimpty; sbestival[threadIdx.y] = bestival; } __syncthreads(); if (threadIdx.y == 0) { minimpty = sminimpty[threadIdx.x]; bestival = sbestival[threadIdx.x]; minup2(minimpty,bestival); minimpty = __shfl(minimpty, blockDim.y - 1); bestival = __shfl(bestival, blockDim.y - 1); sminimpty[threadIdx.x] = minimpty; sbestival[threadIdx.x] = bestival; } __syncthreads(); } if (tid == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = (int)((key >> ishift) & imask); // Save the feature index // outg[i] = T::fresult(sacct[0], tott) - minimpty; // And the impurity gain outg[i] = T::fresult(sacct[0], tott); // And the impurity gain } __syncthreads(); } } #else template<class T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} template<class T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} #endif int minImpurity(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps, int impType) { // Note: its safe to round ncats up to a multiple of 32, since its only used to split shmem int ny = min(32, DBSIZE/ncats/2); dim3 tdim(32, ny, 1); int ng = min(64, nnodes*nsamps); if ((impType & 2) == 0) { if ((impType & 1) == 0) { hipLaunchKernelGGL(( __minImpuritya<entImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { hipLaunchKernelGGL(( __minImpuritya<giniImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } else { if ((impType & 1) == 0) { hipLaunchKernelGGL(( __minImpurityb<entImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { hipLaunchKernelGGL(( __minImpurityb<giniImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } fflush(stdout); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { __shared__ int dbuff[1024]; int i, j, iv, lasti; int imin = ((int)(32 * ((((long long)n) * blockIdx.x) / (gridDim.x * 32)))); int imax = min(n, ((int)(32 * ((((long long)n) * (blockIdx.x + 1)) / (gridDim.x * 32) + 1)))); int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid == 0 && blockIdx.x == 0) { jc[0] = 0; } __syncthreads(); lasti = 0x7fffffff; for (i = imin; i <= imax; i += blockDim.x * blockDim.y) { iv = njc; if (i + tid < imax) { iv = (int)(keys[i + tid] >> shift); dbuff[tid] = iv; } __syncthreads(); if (i + tid < imax || i + tid == n) { if (tid > 0) lasti = dbuff[tid - 1]; if (iv > lasti) { for (j = lasti+1; j <= iv; j++) { jc[j] = i + tid; } } if (tid == 0) { lasti = dbuff[blockDim.x * blockDim.y - 1]; } } __syncthreads(); } } int findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { int ny = min(32, 1 + (n-1)/32); dim3 tdim(32, ny, 1); int ng = min(64, 1+n/32/ny); hipLaunchKernelGGL(( __findBoundaries), dim3(ng),dim3(tdim), 0, 0, keys, jc, n, njc, shift); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } template<typename T> __global__ void __mergeIndsP1(T *keys, int *cspine, T *ispine, T *vspine, int n) { __shared__ T dbuff[1024]; int i, j, itodo, doit, total; T thisval, lastval, endval, tmp; int tid = threadIdx.x + threadIdx.y * blockDim.x; int imin = (int)(((long long)n) * blockIdx.x / gridDim.x); int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x); total = 0; if (tid == 0) { lastval = keys[imin]; ispine[blockIdx.x] = lastval; } for (i = imin; i < imax; i += blockDim.x * blockDim.y) { itodo = min(blockDim.x * blockDim.y, imax - i); __syncthreads(); if (i + tid < imax) { thisval = keys[i + tid]; dbuff[tid] = thisval; } __syncthreads(); if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1]; if (tid == 0) endval = dbuff[itodo-1]; __syncthreads(); if (i + tid < imax) { dbuff[tid] = (thisval == lastval) ? 0 : 1; } __syncthreads(); for (j = 1; j < itodo; j = j + j) { doit = tid + j < itodo && (tid & ((j + j)-1)) == 0; if (doit) { tmp = dbuff[tid] + dbuff[tid + j]; } __syncthreads(); if (doit) { dbuff[tid] = tmp; } __syncthreads(); } if (tid == 0) { total += dbuff[0]; lastval = endval; } __syncthreads(); } if (tid == 0) { cspine[blockIdx.x] = total; vspine[blockIdx.x] = endval; } } template<typename T> __global__ void __fixSpine(int *cspine, T *ispine, T *vspine, int n) { __shared__ int counts[1024]; int tid = threadIdx.x + threadIdx.y * blockDim.x; int i, tmp; if (tid < n) { counts[tid] = cspine[tid]; } __syncthreads(); if (tid < n - 1) { if (ispine[tid + 1] != vspine[tid]) { counts[tid] += 1; } } __syncthreads(); for (i = 1; i < n; i = i << 1) { if (tid >= i) { tmp = counts[tid - i]; } __syncthreads(); if (tid >= i) { counts[tid] += tmp; } __syncthreads(); } if (tid == 0) { counts[n-1] += 1; } __syncthreads(); if (tid < n) { cspine[tid] = counts[tid]; } } template<typename T> __global__ void __mergeIndsP2(T *keys, T *okeys, int *counts, int *cspine, int n) { __shared__ T dbuff[1024]; __shared__ T obuff[2048]; __shared__ int ocnts[2048]; __shared__ int icnts[1024]; int i, j, itodo, doit, lastcnt, lastocnt, obase, odone, total, coff; T thisval, lastval, tmp; int tid = threadIdx.x + threadIdx.y * blockDim.x; int imin = (int)(((long long)n) * blockIdx.x / gridDim.x); int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x); int nbthreads = blockDim.x * blockDim.y; if (blockIdx.x == 0) { odone = 0; } else { odone = cspine[blockIdx.x - 1]; } obase = 0; lastocnt = imin; if (tid == 0) { lastval = keys[imin]; } for (i = imin; i < imax; i += nbthreads) { itodo = min(nbthreads, imax - i); __syncthreads(); if (i + tid < imax) { // Copy a block of input data into dbuff thisval = keys[i + tid]; dbuff[tid] = thisval; } __syncthreads(); if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1]; __syncthreads(); if (i + tid < imax) { icnts[tid] = (thisval == lastval) ? 0 : 1; // Bit that indicates a change of index } __syncthreads(); for (j = 1; j < itodo; j = j << 1) { // Cumsum of these bits = where to put key doit = tid + j < itodo; if (doit) { tmp = icnts[tid] + icnts[tid + j]; } __syncthreads(); if (doit) { icnts[tid + j] = tmp; } __syncthreads(); } total = icnts[itodo-1]; __syncthreads(); if (i + tid < imax && thisval != lastval) { // and save the key/counts there in buffer memory if (tid > 0) { lastcnt = icnts[tid-1]; } else { lastcnt = 0; } obuff[obase + lastcnt] = lastval; ocnts[obase + lastcnt] = i + tid; } __syncthreads(); obase += total; if (obase >= nbthreads) { // Buffer full so flush it okeys[odone+tid] = obuff[tid]; if (tid > 0) lastocnt = ocnts[tid-1]; coff = ocnts[tid] - lastocnt; atomicAdd(&counts[odone+tid], coff); lastocnt = ocnts[nbthreads-1]; odone += nbthreads; } __syncthreads(); if (obase >= nbthreads) { // Copy top to bottom of buffer obuff[tid] = obuff[tid+nbthreads]; ocnts[tid] = ocnts[tid+nbthreads]; obase -= nbthreads; } __syncthreads(); } if (tid == itodo-1) { obuff[obase] = thisval; ocnts[obase] = i - nbthreads + tid + 1; } __syncthreads(); if (tid <= obase) { // Flush out anything that's left okeys[odone+tid] = obuff[tid]; if (tid > 0) lastocnt = ocnts[tid-1]; coff = ocnts[tid] - lastocnt; atomicAdd(&counts[odone+tid], coff); } } // // Accepts an array of int64 keys which should be sorted. Outputs an array okeys with unique copies of each key, // with corresponding counts in the *counts* array. cspine is a working storage array in GPUmem which should be // passed in. The size of cspine should be at least nb32 * 32 bytes with nb32 as below (maximum 2048 bytes). // Returns the length of the output in cspine[0]. // int mergeInds(long long *keys, long long *okeys, int *counts, int n, int *cspine) { hipError_t err; int nthreads = min(n, 1024); int nt32 = 32*(1 + (nthreads-1)/32); int nblocks = min(1 + (n-1)/nthreads, 64); int nb32 = 32*(1+(nblocks - 1)/32); long long *ispine = (long long *)&cspine[2*nb32]; long long *vspine = (long long *)&cspine[4*nb32]; hipLaunchKernelGGL(( __mergeIndsP1<long long>), dim3(nblocks),dim3(nt32), 0, 0, keys, cspine, ispine, vspine, n); hipDeviceSynchronize(); err = hipGetLastError(); if (err == 0) { hipLaunchKernelGGL(( __fixSpine<long long>), dim3(1),dim3(nblocks), 0, 0, cspine, ispine, vspine, nblocks); hipDeviceSynchronize(); err = hipGetLastError(); } if (err == 0) { hipLaunchKernelGGL(( __mergeIndsP2<long long>), dim3(nblocks),dim3(nt32), 0, 0, keys, okeys, counts, cspine, n); hipDeviceSynchronize(); err = hipGetLastError(); } if (err == 0) { hipMemcpy(cspine, &cspine[nblocks-1], 4, hipMemcpyDeviceToDevice); hipDeviceSynchronize(); err = hipGetLastError(); } return err; } // // Support function for mergeInds. Returns the length of the output arrays in cspine[0]. // cspine is a working storage array in GPUmem which should be passed in. // The size of cspine should be at least nb32 * 32 bytes with nb32 as below (maximum 2048 bytes). // int getMergeIndsLen(long long *keys, int n, int *cspine) { hipError_t err; int nthreads = min(n, 1024); int nt32 = 32*(1 + (nthreads-1)/32); int nblocks = min(1 + (n-1)/nthreads, 64); int nb32 = 32*(1+(nblocks - 1)/32); long long *ispine = (long long *)&cspine[2*nb32]; long long *vspine = (long long *)&cspine[4*nb32]; hipLaunchKernelGGL(( __mergeIndsP1<long long>), dim3(nblocks),dim3(nt32), 0, 0, keys, cspine, ispine, vspine, n); hipDeviceSynchronize(); err = hipGetLastError(); if (err == 0) { hipLaunchKernelGGL(( __fixSpine<long long>), dim3(1),dim3(nblocks), 0, 0, cspine, ispine, vspine, nblocks); hipDeviceSynchronize(); err = hipGetLastError(); } if (err == 0) { hipMemcpy(cspine, &cspine[nblocks-1], 4, hipMemcpyDeviceToDevice); hipDeviceSynchronize(); err = hipGetLastError(); } return err; }
1befc46948acf817bd9f6543fb00870e555c26b0.cu
#include <cuda_runtime.h> #include <curand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> static const unsigned int c1 = 0xcc9e2d51; static const unsigned int c2 = 0x1b873593; static const unsigned int r1 = 15; static const unsigned int r2 = 13; static const unsigned int m = 5; static const unsigned int n = 0xe6546b64; __device__ inline unsigned int h1(unsigned int k, unsigned int hash) { k *= c1; k = (k << r1) | (k >> (32-r1)); k *= c2; hash ^= k; hash = ((hash << r2) | (hash >> (32-r2)) * m) + n; return hash; } __device__ inline unsigned int mmhash(unsigned int v1, unsigned int v2, unsigned int v3, unsigned int mod, unsigned int seed) { unsigned int hash = seed; hash = h1(v1, hash); hash = h1(v2, hash); hash = h1(v3, hash); hash ^= (hash >> 16); hash *= 0x85ebca6b; hash ^= (hash >> 13); hash *= 0xc2b2ae35; hash ^= (hash >> 16); return (hash % mod); } #define DBSIZE (8*1024) __global__ void __treePack(int *idata, int *treenodes, int *icats, int *jc, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps) { __shared__ int dbuff[DBSIZE]; __shared__ int fl[32]; int j, k, ic, ival; int seed = 45123421; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { fl[tid] = fieldlens[tid]; } __syncthreads(); int vshift = fl[5]; int ishift = fl[4] + vshift; int jshift = fl[3] + ishift; int nshift = fl[2] + jshift; int tshift = fl[1] + nshift; int cmask = (1 << fl[5]) - 1; int vmask = (1 << fl[4]) - 1; int imask = (1 << fl[3]) - 1; int jmask = (1 << fl[2]) - 1; int nmask = (1 << fl[1]) - 1; int tmask = (1 << fl[0]) - 1; int nc = (DBSIZE / nrows); int itree = threadIdx.y; int jfeat = threadIdx.x; for (int i = nc * blockIdx.x; i < ncols; i += nc * gridDim.x) { int ctodo = min(nc, ncols - i); for (j = tid; j < nrows * ctodo; j += blockDim.x*blockDim.y) { dbuff[j] = idata[j + i * nrows]; } __syncthreads(); for (j = i; j < i + ctodo; j++) { for (itree = threadIdx.y; itree < ntrees; itree += blockDim.y) { int inode = treenodes[itree + j * ntrees]; int ifeat = mmhash(itree, inode, jfeat, nrows, seed); long long hdr = (((long long)(tmask & itree)) << tshift) | (((long long)(nmask & inode)) << nshift) | (((long long)(jmask & jfeat)) << jshift) | (((long long)(imask & ifeat)) << ishift) ; for (k = jc[j]; k < jc[j+1]; k++) { ic = icats[k]; if (jfeat < nsamps) { ival = dbuff[ifeat + (j - i) * nrows]; out[jfeat + nsamps * (itree + ntrees * k)] = hdr | (((long long)(vmask & ival)) << vshift) | ((long long)(ic & cmask)); } } } } __syncthreads(); } } int treePack(int *fdata, int *treenodes, int *icats, int *jc, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); __treePack<<<nb,bdim>>>(fdata, treenodes, icats, jc, out, fieldlens, nrows, ncols, ntrees, nsamps); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } class entImpty { public: static __device__ inline float fupdate(int v) { return (float)v * logf((float)max(1, v)); } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return logf(vs) - vacc / vs; } }; class giniImpty { public: static __device__ inline float fupdate(int v) { return (float)v * (float)v; } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return 1.0f - vacc / (vs*vs); } }; #if __CUDA_ARCH__ >= 300 __device__ inline void accumup2(int &cnt, float &update) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; cnt += tmp; } } } __device__ inline void accumup3(int &cnt, float &update, float &updatet) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); float tmpy = __shfl_up(updatet, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void accumdown3(int &cnt, float &update, float &updatet, int bound) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_down(update, h); float tmpy = __shfl_down(updatet, h); int tmp = __shfl_down(cnt, h); if (threadIdx.x + h <= bound) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void minup2(float &impty, int &ival) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(impty, h); int tmp = __shfl_up(ival, h); if (threadIdx.x >= h && tmpx < impty) { impty = tmpx; ival = tmp; } } } __device__ inline void maxup2(int &v, int &indx) { #pragma unroll for (int h = 1; h < 32; h = h + h) { int tmpv = __shfl_up(v, h); int tmpi = __shfl_up(indx, h); if (threadIdx.x >= h && tmpv > v) { v = tmpv; indx = tmpi; } } } template<typename T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE/2]; __shared__ int cattot[DBSIZE/2]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, jc0, jc1, jlast; long long key; int cold, ctot, ctt, ctotall, cnew, cnt, ival, icat, lastival, bestival, tmp, maxcnt, imaxcnt; float update, updatet, cacc, cact, caccall, impty, minimpty, lastimpty, tmpx; for (i = threadIdx.y + blockDim.y * blockIdx.x; i < nnodes*nsamps; i += blockDim.y * gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts for this group for (j = tid; j < DBSIZE/2; j += blockDim.x * blockDim.y) { catcnt[j] = 0; cattot[j] = 0; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this block ctot = 0; cacc = 0.0f; maxcnt = -1; imaxcnt = -1; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = cattot[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k cnew = cold + cnt; cattot[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); accumup2(cnt,update); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); if (cnew > maxcnt) { // Compute and distribute the max cnt maxcnt = cnew; imaxcnt = icat; } maxup2(maxcnt, imaxcnt); maxcnt = __shfl(maxcnt, jlast); imaxcnt = __shfl(imaxcnt, jlast); } __syncthreads(); // if (threadIdx.x == 0 && i < 32) printf("cuda %d %d %f\n", i, ctot, cacc); // Second pass to compute impurity at every input point caccall = cacc; // Save the total count and (ci)log(ci) sum cact = cacc; ctotall = ctot; ctot = 0; cacc = 0.0f; lastival = -1; lastimpty = 1e7f; minimpty = 1e7f; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value ival = ((int)(key >> vshift)) & vmask; } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + cnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); // Compute the impurity updates for this input updatet = T::fupdate(ctt-cnew) - T::fupdate(ctt-cold); accumup3(cnt, update, updatet); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; cact += updatet; impty = T::fresult(cacc, ctot) + T::fresult(cact, ctotall-ctot); // And the impurity for this input // if (i == 0) printf("cuda pos %d impty %f icat %d cnts %d %d cacc %f %d\n", j + threadIdx.x, impty, icat, cold, cnew, cacc, ctot); tmp = __shfl_up(ival, 1); // Need the last impurity and ival in order tmpx = __shfl_up(impty, 1); // to restrict the partition feature to a value boundary if (threadIdx.x > 0) { lastival = tmp; lastimpty = tmpx; } if (ival == lastival) lastimpty = 1e7f; // Eliminate values which are not at value boundaries if (lastimpty < minimpty) { minimpty = lastimpty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); // Carefully copy the last active thread to all threads, needed outside this loop bestival = __shfl(bestival, jlast); ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); cact = __shfl(cact, jlast); lastival = __shfl(ival, jlast); lastimpty = __shfl(impty, jlast); } if (threadIdx.x == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = ((int)(key >> ishift)) & imask; // Save the feature index outg[i] = T::fresult(caccall, ctotall) - minimpty; // And the impurity gain outc[i] = imaxcnt; } } } template<typename T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE]; __shared__ int cattot[DBSIZE/4]; __shared__ int stott[32]; __shared__ float sacct[32]; __shared__ int slastival[64]; __shared__ int sbestival[32]; __shared__ float sminimpty[32]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, h, jc0, jc1, ilast, jlast; long long key; int cold, tot, ctt, tott, cnew, cnt, ncnt, tcnt, ival, icat, lastival, bestival, tmp; float update, updatet, acc, acct, impty, minimpty; for (i = blockIdx.x; i < nnodes*nsamps; i += gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts and totals for (j = threadIdx.x; j < ncats; j += blockDim.x) { catcnt[j + threadIdx.y * blockDim.x] = 0; if (threadIdx.y == 0) cattot[j] = 0; } if (threadIdx.y == 0) { sminimpty[threadIdx.x] = 1e7f; sbestival[threadIdx.x] = -1; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this entire ifeat group for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id atomicAdd(&cattot[icat + threadIdx.y * ncats], cnt); // Update count totals } } __syncthreads(); tott = 0; // Compute total count and (c)log(c) for the entire ifeat group acct = 0; if (threadIdx.y == 0) { for (k = 0; k < ncats; k += blockDim.x) { if (k + threadIdx.x < ncats) { tcnt = cattot[k + threadIdx.x]; update = T::fupdate(tcnt); } else { tcnt = 0; update = 0; } accumup2(tcnt,update); ilast = min(31, ncats - k - 1); tcnt = __shfl(tcnt, ilast); update = __shfl(update, ilast); tott += tcnt; acct += update; } stott[threadIdx.x] = tott; sacct[threadIdx.x] = acct; } tott = stott[threadIdx.x]; // if (tid == 0 && i < 32) printf("cuda %d %d %f\n", i, tott, acct); // Main loop, work on blocks of 1024 (ideally) for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { for (k = 0; k < ncats; k += blockDim.x) { // copy cumcounts from last row of last iteration to the first row tmp = catcnt[k + threadIdx.x + (blockDim.y -1) * ncats]; __syncthreads(); if (threadIdx.y == 0) { catcnt[k + threadIdx.x] = tmp; } else { catcnt[k + threadIdx.x + threadIdx.y * ncats] = 0; } __syncthreads(); } if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id and integer value; ival = ((int)(key >> vshift)) & vmask; atomicAdd(&catcnt[icat + threadIdx.y * ncats], cnt); // Update count totals } jlast = min(31, jc1 - j - threadIdx.y * 32 - 1); // Save the last value in this group if (threadIdx.x == jlast) { slastival[threadIdx.y + 1] = ival; } __syncthreads(); for (k = 0; k < ncats; k += blockDim.x) { // Form the cumsum along columns of catcnts for (h = 1; h < blockDim.y; h = h + h) { if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { tmp = catcnt[k + threadIdx.x + ncats * threadIdx.y]; } __syncthreads(); if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { catcnt[k + threadIdx.x + ncats * (threadIdx.y + h)] += tmp; } __syncthreads(); } } tot = 0; // Local to a yblock (row) of catcnts acc = 0.0f; acct = 0.0f; for (k = 0; k < ncats; k += blockDim.x) { // Now sum within a row (yblock) if (k + threadIdx.x < ncats) { cnt = catcnt[k + threadIdx.x + threadIdx.y * ncats]; update = T::fupdate(cnt); updatet = T::fupdate(cattot[k + threadIdx.x] - cnt); } else { cnt = 0; update = 0; updatet = 0; } accumup3(cnt,update,updatet); ilast = min(31, ncats - k - 1); update = __shfl(update, ilast); updatet = __shfl(updatet, ilast); cnt = __shfl(cnt, ilast); tot += cnt; acc += update; acct += updatet; } __syncthreads(); // OK, we have everything needed now to compute impurity for the rows in this yblock: // tot, acc, acct at the end of the block lastival = -1; minimpty = 1e7f; ncnt = -cnt; for (k = jlast; k >= 0; k--) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + ncnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); updatet = T::fupdate(ctt - cnew) - T::fupdate(ctt - cold); accumdown3(ncnt,update,updatet,jlast); tot += cnt; // Now update the total c and total ci log ci sums acc += update; acct += updatet; impty = T::fresult(acc, tot) + T::fresult(acct, tott - tot); // And the impurity for this input tmp = __shfl_up(ival, 1); if (threadIdx.x > 0) { // Get the last ival to check for a boundary lastival = tmp; } else { lastival = slastival[threadIdx.y]; } __syncthreads(); if (tid == 0) { tmp = slastival[33]; slastival[0] = tmp; } __syncthreads(); if (ival == lastival) impty = 1e7f; // Eliminate values which are not at value boundaries if (impty < minimpty) { minimpty = impty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); bestival = __shfl(bestival, jlast); if (threadIdx.x == 0) { sminimpty[threadIdx.y] = minimpty; sbestival[threadIdx.y] = bestival; } __syncthreads(); if (threadIdx.y == 0) { minimpty = sminimpty[threadIdx.x]; bestival = sbestival[threadIdx.x]; minup2(minimpty,bestival); minimpty = __shfl(minimpty, blockDim.y - 1); bestival = __shfl(bestival, blockDim.y - 1); sminimpty[threadIdx.x] = minimpty; sbestival[threadIdx.x] = bestival; } __syncthreads(); } if (tid == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = (int)((key >> ishift) & imask); // Save the feature index // outg[i] = T::fresult(sacct[0], tott) - minimpty; // And the impurity gain outg[i] = T::fresult(sacct[0], tott); // And the impurity gain } __syncthreads(); } } #else template<class T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} template<class T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} #endif int minImpurity(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps, int impType) { // Note: its safe to round ncats up to a multiple of 32, since its only used to split shmem int ny = min(32, DBSIZE/ncats/2); dim3 tdim(32, ny, 1); int ng = min(64, nnodes*nsamps); if ((impType & 2) == 0) { if ((impType & 1) == 0) { __minImpuritya<entImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { __minImpuritya<giniImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } else { if ((impType & 1) == 0) { __minImpurityb<entImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { __minImpurityb<giniImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } fflush(stdout); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { __shared__ int dbuff[1024]; int i, j, iv, lasti; int imin = ((int)(32 * ((((long long)n) * blockIdx.x) / (gridDim.x * 32)))); int imax = min(n, ((int)(32 * ((((long long)n) * (blockIdx.x + 1)) / (gridDim.x * 32) + 1)))); int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid == 0 && blockIdx.x == 0) { jc[0] = 0; } __syncthreads(); lasti = 0x7fffffff; for (i = imin; i <= imax; i += blockDim.x * blockDim.y) { iv = njc; if (i + tid < imax) { iv = (int)(keys[i + tid] >> shift); dbuff[tid] = iv; } __syncthreads(); if (i + tid < imax || i + tid == n) { if (tid > 0) lasti = dbuff[tid - 1]; if (iv > lasti) { for (j = lasti+1; j <= iv; j++) { jc[j] = i + tid; } } if (tid == 0) { lasti = dbuff[blockDim.x * blockDim.y - 1]; } } __syncthreads(); } } int findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { int ny = min(32, 1 + (n-1)/32); dim3 tdim(32, ny, 1); int ng = min(64, 1+n/32/ny); __findBoundaries<<<ng,tdim>>>(keys, jc, n, njc, shift); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } template<typename T> __global__ void __mergeIndsP1(T *keys, int *cspine, T *ispine, T *vspine, int n) { __shared__ T dbuff[1024]; int i, j, itodo, doit, total; T thisval, lastval, endval, tmp; int tid = threadIdx.x + threadIdx.y * blockDim.x; int imin = (int)(((long long)n) * blockIdx.x / gridDim.x); int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x); total = 0; if (tid == 0) { lastval = keys[imin]; ispine[blockIdx.x] = lastval; } for (i = imin; i < imax; i += blockDim.x * blockDim.y) { itodo = min(blockDim.x * blockDim.y, imax - i); __syncthreads(); if (i + tid < imax) { thisval = keys[i + tid]; dbuff[tid] = thisval; } __syncthreads(); if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1]; if (tid == 0) endval = dbuff[itodo-1]; __syncthreads(); if (i + tid < imax) { dbuff[tid] = (thisval == lastval) ? 0 : 1; } __syncthreads(); for (j = 1; j < itodo; j = j + j) { doit = tid + j < itodo && (tid & ((j + j)-1)) == 0; if (doit) { tmp = dbuff[tid] + dbuff[tid + j]; } __syncthreads(); if (doit) { dbuff[tid] = tmp; } __syncthreads(); } if (tid == 0) { total += dbuff[0]; lastval = endval; } __syncthreads(); } if (tid == 0) { cspine[blockIdx.x] = total; vspine[blockIdx.x] = endval; } } template<typename T> __global__ void __fixSpine(int *cspine, T *ispine, T *vspine, int n) { __shared__ int counts[1024]; int tid = threadIdx.x + threadIdx.y * blockDim.x; int i, tmp; if (tid < n) { counts[tid] = cspine[tid]; } __syncthreads(); if (tid < n - 1) { if (ispine[tid + 1] != vspine[tid]) { counts[tid] += 1; } } __syncthreads(); for (i = 1; i < n; i = i << 1) { if (tid >= i) { tmp = counts[tid - i]; } __syncthreads(); if (tid >= i) { counts[tid] += tmp; } __syncthreads(); } if (tid == 0) { counts[n-1] += 1; } __syncthreads(); if (tid < n) { cspine[tid] = counts[tid]; } } template<typename T> __global__ void __mergeIndsP2(T *keys, T *okeys, int *counts, int *cspine, int n) { __shared__ T dbuff[1024]; __shared__ T obuff[2048]; __shared__ int ocnts[2048]; __shared__ int icnts[1024]; int i, j, itodo, doit, lastcnt, lastocnt, obase, odone, total, coff; T thisval, lastval, tmp; int tid = threadIdx.x + threadIdx.y * blockDim.x; int imin = (int)(((long long)n) * blockIdx.x / gridDim.x); int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x); int nbthreads = blockDim.x * blockDim.y; if (blockIdx.x == 0) { odone = 0; } else { odone = cspine[blockIdx.x - 1]; } obase = 0; lastocnt = imin; if (tid == 0) { lastval = keys[imin]; } for (i = imin; i < imax; i += nbthreads) { itodo = min(nbthreads, imax - i); __syncthreads(); if (i + tid < imax) { // Copy a block of input data into dbuff thisval = keys[i + tid]; dbuff[tid] = thisval; } __syncthreads(); if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1]; __syncthreads(); if (i + tid < imax) { icnts[tid] = (thisval == lastval) ? 0 : 1; // Bit that indicates a change of index } __syncthreads(); for (j = 1; j < itodo; j = j << 1) { // Cumsum of these bits = where to put key doit = tid + j < itodo; if (doit) { tmp = icnts[tid] + icnts[tid + j]; } __syncthreads(); if (doit) { icnts[tid + j] = tmp; } __syncthreads(); } total = icnts[itodo-1]; __syncthreads(); if (i + tid < imax && thisval != lastval) { // and save the key/counts there in buffer memory if (tid > 0) { lastcnt = icnts[tid-1]; } else { lastcnt = 0; } obuff[obase + lastcnt] = lastval; ocnts[obase + lastcnt] = i + tid; } __syncthreads(); obase += total; if (obase >= nbthreads) { // Buffer full so flush it okeys[odone+tid] = obuff[tid]; if (tid > 0) lastocnt = ocnts[tid-1]; coff = ocnts[tid] - lastocnt; atomicAdd(&counts[odone+tid], coff); lastocnt = ocnts[nbthreads-1]; odone += nbthreads; } __syncthreads(); if (obase >= nbthreads) { // Copy top to bottom of buffer obuff[tid] = obuff[tid+nbthreads]; ocnts[tid] = ocnts[tid+nbthreads]; obase -= nbthreads; } __syncthreads(); } if (tid == itodo-1) { obuff[obase] = thisval; ocnts[obase] = i - nbthreads + tid + 1; } __syncthreads(); if (tid <= obase) { // Flush out anything that's left okeys[odone+tid] = obuff[tid]; if (tid > 0) lastocnt = ocnts[tid-1]; coff = ocnts[tid] - lastocnt; atomicAdd(&counts[odone+tid], coff); } } // // Accepts an array of int64 keys which should be sorted. Outputs an array okeys with unique copies of each key, // with corresponding counts in the *counts* array. cspine is a working storage array in GPUmem which should be // passed in. The size of cspine should be at least nb32 * 32 bytes with nb32 as below (maximum 2048 bytes). // Returns the length of the output in cspine[0]. // int mergeInds(long long *keys, long long *okeys, int *counts, int n, int *cspine) { cudaError_t err; int nthreads = min(n, 1024); int nt32 = 32*(1 + (nthreads-1)/32); int nblocks = min(1 + (n-1)/nthreads, 64); int nb32 = 32*(1+(nblocks - 1)/32); long long *ispine = (long long *)&cspine[2*nb32]; long long *vspine = (long long *)&cspine[4*nb32]; __mergeIndsP1<long long><<<nblocks,nt32>>>(keys, cspine, ispine, vspine, n); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err == 0) { __fixSpine<long long><<<1,nblocks>>>(cspine, ispine, vspine, nblocks); cudaDeviceSynchronize(); err = cudaGetLastError(); } if (err == 0) { __mergeIndsP2<long long><<<nblocks,nt32>>>(keys, okeys, counts, cspine, n); cudaDeviceSynchronize(); err = cudaGetLastError(); } if (err == 0) { cudaMemcpy(cspine, &cspine[nblocks-1], 4, cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); err = cudaGetLastError(); } return err; } // // Support function for mergeInds. Returns the length of the output arrays in cspine[0]. // cspine is a working storage array in GPUmem which should be passed in. // The size of cspine should be at least nb32 * 32 bytes with nb32 as below (maximum 2048 bytes). // int getMergeIndsLen(long long *keys, int n, int *cspine) { cudaError_t err; int nthreads = min(n, 1024); int nt32 = 32*(1 + (nthreads-1)/32); int nblocks = min(1 + (n-1)/nthreads, 64); int nb32 = 32*(1+(nblocks - 1)/32); long long *ispine = (long long *)&cspine[2*nb32]; long long *vspine = (long long *)&cspine[4*nb32]; __mergeIndsP1<long long><<<nblocks,nt32>>>(keys, cspine, ispine, vspine, n); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err == 0) { __fixSpine<long long><<<1,nblocks>>>(cspine, ispine, vspine, nblocks); cudaDeviceSynchronize(); err = cudaGetLastError(); } if (err == 0) { cudaMemcpy(cspine, &cspine[nblocks-1], 4, cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); err = cudaGetLastError(); } return err; }
641f040ef9e08890500f88037d8ca007eac7dfe7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @precisions normal z -> c d s */ #include "common_magma.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void zgecsrmv_kernel( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; dy[ row ] = dot *alpha + beta * dy[ row ]; } } // shifted CSR-SpMV kernel __global__ void zgecsrmv_kernel_shift( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR (val, row, col). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( zgecsrmv_kernel), dim3(grid), dim3(threads), 0, queue , m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU. It is a shifted version of the CSR-SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] lambda magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDoubleComplex_ptr output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsrmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( zgecsrmv_kernel_shift), dim3(grid), dim3(threads), 0, queue , m, n, alpha, lambda, dval, drowptr, dcolind, dx, beta, offset, blocksize, addrows, dy); return MAGMA_SUCCESS; }
641f040ef9e08890500f88037d8ca007eac7dfe7.cu
/* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @precisions normal z -> c d s */ #include "common_magma.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void zgecsrmv_kernel( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; dy[ row ] = dot *alpha + beta * dy[ row ]; } } // shifted CSR-SpMV kernel __global__ void zgecsrmv_kernel_shift( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR (val, row, col). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; zgecsrmv_kernel<<< grid, threads, 0, queue >>> (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU. It is a shifted version of the CSR-SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] lambda magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDoubleComplex_ptr output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsrmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; zgecsrmv_kernel_shift<<< grid, threads, 0, queue >>> (m, n, alpha, lambda, dval, drowptr, dcolind, dx, beta, offset, blocksize, addrows, dy); return MAGMA_SUCCESS; }
b4ad40bf53383afb00005e6b1258e9445bc702b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2021 Stanford University, NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "realm_saxpy.h" __global__ void gpu_saxpy(const float alpha, //const int num_elements, Rect<1> bounds, AffineAccessor<float, 1> ra_x, AffineAccessor<float, 1> ra_y, AffineAccessor<float, 1> ra_z) // const float *x, const float *y, float *z) { int p = bounds.lo + (blockIdx.x * blockDim.x) + threadIdx.x; if (p <= bounds.hi) ra_z[p] += alpha * ra_x[p] + ra_y[p]; } __host__ void gpu_saxpy_task(const void *args, size_t arglen, const void *userdata, size_t userlen, Processor p) { assert(arglen == sizeof(SaxpyArgs)); const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args; printf("Running GPU Saxpy Task\n\n"); // get affine accessors for each of our three instances AffineAccessor<float, 1> ra_x = AffineAccessor<float, 1>(saxpy_args->x_inst, FID_X); AffineAccessor<float, 1> ra_y = AffineAccessor<float, 1>(saxpy_args->y_inst, FID_Y); AffineAccessor<float, 1> ra_z = AffineAccessor<float, 1>(saxpy_args->z_inst, FID_Z); size_t num_elements = saxpy_args->bounds.volume(); size_t cta_threads = 256; size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads; hipLaunchKernelGGL(( gpu_saxpy), dim3(total_ctas), dim3(cta_threads), 0, 0, saxpy_args->alpha, saxpy_args->bounds, ra_x, ra_y, ra_z); // LOOK: NO WAIT! :) }
b4ad40bf53383afb00005e6b1258e9445bc702b9.cu
/* Copyright 2021 Stanford University, NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "realm_saxpy.h" __global__ void gpu_saxpy(const float alpha, //const int num_elements, Rect<1> bounds, AffineAccessor<float, 1> ra_x, AffineAccessor<float, 1> ra_y, AffineAccessor<float, 1> ra_z) // const float *x, const float *y, float *z) { int p = bounds.lo + (blockIdx.x * blockDim.x) + threadIdx.x; if (p <= bounds.hi) ra_z[p] += alpha * ra_x[p] + ra_y[p]; } __host__ void gpu_saxpy_task(const void *args, size_t arglen, const void *userdata, size_t userlen, Processor p) { assert(arglen == sizeof(SaxpyArgs)); const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args; printf("Running GPU Saxpy Task\n\n"); // get affine accessors for each of our three instances AffineAccessor<float, 1> ra_x = AffineAccessor<float, 1>(saxpy_args->x_inst, FID_X); AffineAccessor<float, 1> ra_y = AffineAccessor<float, 1>(saxpy_args->y_inst, FID_Y); AffineAccessor<float, 1> ra_z = AffineAccessor<float, 1>(saxpy_args->z_inst, FID_Z); size_t num_elements = saxpy_args->bounds.volume(); size_t cta_threads = 256; size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads; gpu_saxpy<<<total_ctas, cta_threads>>>(saxpy_args->alpha, saxpy_args->bounds, ra_x, ra_y, ra_z); // LOOK: NO WAIT! :) }
d077c7b3624f49c8cbf6e5726c398549bbdc2c90.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "poli2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *poli = NULL; hipMalloc(&poli, XSIZE*YSIZE); const int N = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( poli2), dim3(gridBlock),dim3(threadBlock), 0, 0, poli,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( poli2), dim3(gridBlock),dim3(threadBlock), 0, 0, poli,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( poli2), dim3(gridBlock),dim3(threadBlock), 0, 0, poli,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d077c7b3624f49c8cbf6e5726c398549bbdc2c90.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "poli2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *poli = NULL; cudaMalloc(&poli, XSIZE*YSIZE); const int N = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); poli2<<<gridBlock,threadBlock>>>(poli,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { poli2<<<gridBlock,threadBlock>>>(poli,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { poli2<<<gridBlock,threadBlock>>>(poli,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b2306e7825a771b6664c892975f58dd75d0b9b3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <ctime> //Function that verify cuda calls and return cuda error if any #define gpuCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } //Initialise ascendant array with random values in void init_array(int* array, int size, int const adder=10) { array[0] = rand()%adder; for(int i = 0; i < size;i++) { array[i] = array[i-1] + rand()%adder; } } //Function that initialise array with random values void init_array_no_order(int* array, int size, int const adder=10) { array[0] = rand()%adder; for(int i = 0; i < size;i++) { array[i] = rand()%adder; } } //Function that copy array in another void copy_array(int* a, int* a_copy, int n){ for(int i = 0;i < n;i++){ a_copy[i] = a[i]; } } //Function that print an array of size size void print_array(int* a, int size) { printf("["); for(int i = 0; i < size;i++) { //printf("i = %d | v = %d " ,i, a[i]); printf("%d " ,a[i]); } printf("]\n"); } //Device version of parallel merge of a and b in m with |m|<1024 __global__ void mergeSmall_k(int* m, int n, int size) { int gbx = blockIdx.x; int tidx = threadIdx.x; int i = gbx * blockDim.x + tidx; if(i < n) { int L1, R1, L2, R2; L1 = gbx*blockDim.x; R1 = gbx*blockDim.x + size-1; L2 = gbx*blockDim.x + size; R2 = gbx*blockDim.x + 2*size-1; if(L2 < n) { // printf("L1 : %d, R1 : %d, L2 : %d, R2 : %d\n", L1, R1, L2, R2); if(R2 >= n){ R2 = n-1; } __shared__ int *d_a, *d_b; int n_a = R1-L1+1; int n_b = R2-L2+1; int n_m = n_a+n_b; d_a = (int*)malloc(n_a*sizeof(int)); d_b = (int*)malloc(n_b*sizeof(int)); __syncthreads(); // printf("tidx : %d, n_a : %d\n", tidx, n_a); if (tidx < n_a) { // printf("m[%d] : %d\n", i, m[i]); d_a[tidx] = m[i]; // printf("d_a_%d[%d] = %d\n", gbx, tidx, d_a[tidx]); } else if (tidx < n_m) { d_b[tidx - n_a] = m[i]; // printf("d_b_%d[%d] = %d\n", gbx, tidx - n_a, d_b[tidx - n_a]); } __syncthreads(); int2 K; int2 P; int2 Q; // printf("n_a : %d, n_b : %d\n", n_a, n_b); if(tidx > n_a) { K.x = tidx - n_a; K.y = n_a; P.x = n_a; P.y = tidx - n_a; } else { K.x = 0; K.y = tidx; P.x = tidx; P.y = 0; } int offset = 0; while(1) { offset = abs(K.y - P.y)/2; Q.x = K.x + offset; Q.y = K.y - offset; if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1])) { if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x]) { if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x])) { m[i] = d_a[Q.y]; // printf("## m[%d] : %d, d_a_%d[%d] : %d\n",i, m[i], gbx, Q.y, d_a[Q.y]); } else { m[i] = d_b[Q.x]; // printf("## m[%d] : %d, d_b_%d[%d] : %d\n",i, m[i], gbx, Q.x, d_b[Q.x]); } break; } else { K.x = Q.x + 1; K.y = Q.y - 1; } } else { P.x = Q.x - 1; P.y = Q.y + 1; } } __syncthreads(); } } } /** Parallel version of merge of A and B with |A| + |B| <= 1024 @param d_a, d_b : device versions of arrays to merge d_m : device version of merge of a and b n_a, n_b, n_b : respective sizes of d_a, d_b, d_m */ __device__ void mergeSmall_k(int* d_a, int* d_b, int* d_m, int n_a, int n_b, int n_m){ int i = threadIdx.x; if(i < n_m) { int2 K; int2 P; int2 Q; if(i > n_a) { K.x = i - n_a; K.y = n_a; P.x = n_a; P.y = i - n_a; } else { K.x = 0; K.y = i; P.x = i; P.y = 0; } int offset = 0; while(1) { offset = abs(K.y - P.y)/2; Q.x = K.x + offset; Q.y = K.y - offset; if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1])) { if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x]) { if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x])) { d_m[i] = d_a[Q.y]; } else { d_m[i] = d_b[Q.x]; } break; } else { K.x = Q.x + 1; K.y = Q.y - 1; } } else { P.x = Q.x - 1; P.y = Q.y + 1; } } } } //Giving a path ( from pathBig_k ) each block merge (with mergeParallel) each piece a_k and b_k in m_k of a and b. Then it replace elements in m __global__ void mergeBig_k(int *m, int n_m, int *a, int n_a, int *b, int n_b, int2 *path, int n_path, int nbPartitions, int size) { int blockId = blockIdx.x; int threadId = threadIdx.x; int i = blockId * blockDim.x + threadId; if (blockId <= nbPartitions)//On utilise un block pour chaque partition { int x0, y0, x1, y1; x0 = path[blockId].x; y0 = path[blockId].y; x1 = path[blockId+1].x; y1 = path[blockId+1].y; const int dimx=x1-x0; const int dimy = y1-y0; //A modifier par dimx dimy dimx+dimy __shared__ int a_k[1024]; __shared__ int b_k[1024]; __shared__ int m_k[1024]; if (threadId < dimx) //On rempli a_k[i] : 0 <= i < dimx { a_k[threadId] = a[x0+threadId]; } else if (threadId < dimy+dimx)//On rempli b_k[i] : indice dimx <= i < dimx+dimy+1 { b_k[threadId-dimx] = b[y0+threadId-dimx]; } __syncthreads(); // mergeParallel(m_k, dimx+dimy, size); m[i] = m_k[threadId]; } } //Function that generate a path to break down m into pieces that could be merge without conflict //On appelle |m|/TPB blocks avec chacun un seul thread. Chaque thread s'occupe de la diagonale thread __global__ void pathBig_k(int pas, int2* path, int n_path , int* d_a, int n_a ,int* d_b, int n_b) { int thread_i = blockIdx.x * blockDim.x + threadIdx.x; if(thread_i <= (n_a + n_b)/pas) //<------------//On vrifie que l'indice du thread est infrieur la taille du tableau de retour et qu'il est un multiple du pas { int i = thread_i*pas; int2 K; int2 P; int2 Q; if(i > n_a) { K.x = i - n_a; K.y = n_a; P.x = n_a; P.y = i - n_a; } else { K.x = 0; K.y = i; P.x = i; P.y = 0; } int offset = 0; while(1) { //Calcul des coordonnes du milieu de P et K offset = abs(K.y - P.y)/2; Q.x = K.x + offset; Q.y = K.y - offset; // if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1])) { // if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x]) { break; } else { K.x = Q.x + 1; K.y = Q.y - 1; } } else { P.x = Q.x - 1; P.y = Q.y + 1; } } //printf("thread : %d => (%d, %d)\n", thread_i, Q.y, Q.x); //!\\ Problme ordre x et y path[thread_i].x=Q.y; path[thread_i].y=Q.x; } //Si |m| n'est pas un mutliple de pas, le thread 0 ajoute (n_a, n_b) la fin du tableau if (thread_i==0 && (n_a+n_b)%pas!=0) { //printf("thread : %d => (%d, %d)\n", thread_i, n_a, n_b); path[n_path-1].x=n_a; path[n_path-1].y=n_b; } } //Function that sort any array void fusionSort(int *mGPU, int n_m) { //L1 : indice du premier lment de m_part1 //R1 : indice du dernier lment de m_part1 //L2 : indice du premier lment de m_part2 //R2 : indice du dernier lment de m_part2 int size = 1; int i; int *m = (int*)malloc(n_m*sizeof(int)); while (size < n_m) { i = 0; if (size < 1024) { printf("Size : %d\n", size); hipLaunchKernelGGL(( mergeSmall_k), dim3(n_m/(2*size) + 1), dim3(2*size), 0, 0, mGPU, n_m, size); gpuCheck(hipMemcpy(m, mGPU, n_m*sizeof(int), hipMemcpyDeviceToHost)); hipDeviceSynchronize(); print_array(m, n_m); } size *= 2; } } void fusionMergeSeq(int* A, int* tmp, int L1, int R1, int L2, int R2){ int i = 0; while(L1 <= R1 && L2 <= R2){ if(A[L1] <= A[L2]){ tmp[i] = A[L1]; i++; L1++; } else{ tmp[i] = A[L2]; i++; L2++; } } while(L1 <= R1){ tmp[i] = A[L1]; i++; L1++; } while(L2 <= R2){ tmp[i] = A[L2]; i++; L2++; } } void fusionSortSeq(int* A, int n){ int len = 1; int i; int L1, R1, L2, R2; int* tmp = (int*)malloc(n*sizeof(int)); while(len < n){ i = 0; while(i < n){ L1 = i; R1 = i + len - 1; L2 = i + len; R2 = i + 2*len - 1; tmp = (int*)realloc(tmp, (R2-L1+1)*sizeof(int)); if(L2 >= n){ break; } if(R2 >= n){ R2 = n - 1; } fusionMergeSeq(A, tmp, L1, R1, L2, R2); for(int j = 0;j < R2-L1+1;j++){ A[i+j] = tmp[j]; } i = i + 2*len; } len *= 2; } free(tmp); } //Fonction qui trie un tableau M en parallle par tri fusion itratif (question 3) //Fonctions de vrification //Fonction qui vrifie qu'un tableau est bien tri (tous ses lments rangs dans l'ordre croissant) int assertOrder(int *tab, int size){ for (int i=0; i<size-1; i++){ if (tab[i] > tab[i+1]){ printf("WARNING : Unsuccessful merge or sort ... : unordered array on indice %d ...\n", i); printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]); return 0; } } return 1; } //Fonction qui vrifie qu'on retrouve bien dans le nouveau tableau tous les lments des deux tableaux qu'on veut fusionner int assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size) { int verif[size]; //tableau avec des 1 l o l'on a dj vrifi qu'il correspond dj un lment de a ou de b for(int i = 0;i<size;i++){ verif[i] = 0; } for (int i=0; i<size; i++){ for(int j = 0;j < n1;j++){ if(tab[j] == m[i] && verif[i] == 0){ //si il y a une valeur identique et que celle-ci n'a pas t vrifie verif[i] = 1; } } } for (int i=0; i<size; i++){ for(int j = 0;j < n2;j++){ if(tab2[j] == m[i] && verif[i] == 0){ verif[i] = 1; } } } for(int i = 0;i<size;i++){ if(verif[i] != 1){ printf("\nWARNING : Unsuccessful merge : incorrect elements...\n"); return 0; } } return 1; } //Fonction qui vrifie qu'on retrouve bien dans le nouveau tableau tous les lments du tableau qu'on veut trier int assertSortAllValuesPresent(int* m, int* m_sorted, int size){ int verif[size]; //tableau avec des 1 l o l'on a dj vrifi qu'il correspond dj un lment de a ou de b for(int i = 0;i<size;i++){ verif[i] = 0; } for (int i=0; i<size; i++){ for(int j = 0;j < size;j++){ if(m_sorted[j] == m[i]){ //si il y a une valeur identique verif[i] = 1; } } } for(int i = 0;i<size;i++){ if(verif[i] != 1){ printf("i : %d\n", i); printf("\nWARNING : Unsuccessful sort : incorrect elements...\n"); return 0; } } return 1; } //Fonction qui vrifie qu'un tableau est bien tri et la fusion de deux tableaux //tab et tab2 : les deux tableaux qu'on veut fusionner //m : le tableau qui est la fusion trie de tab et tab2 int assertMerge(int *tab, int n1, int *tab2, int n2, int* m, int size){ int successfulOrder = assertOrder(m, size); int successfulElements = assertMergeAllValuesPresent(tab, n1, tab2, n2, m, size); //assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size) if(successfulOrder && successfulElements){ printf("\nSuccessful merge !\n"); return 1; } else{ printf("\nUnsuccessful merge !\n"); return 0; } } //Fonction qui vrifie qu'un tableau est bien tri //m : le tableau non tri qu'on veut trier //m_sorted : le tableau m soi-disant tri (on veut vrifier si c'est bien le cas) //size : la taille du tableau int assertSorted(int* m, int* m_sorted, int size) { int successfulOrder = assertOrder(m_sorted, size); // les lments du tableau sont ils bien dans le bon ordre ? int successfulElements = assertSortAllValuesPresent(m, m_sorted, size); //retrouve t-on bien toutes les valeurs ? if(successfulOrder && successfulElements){ printf("\nSuccessful sort !\n"); return 1; } else{ printf("\nUnsuccessful sort !\n"); return 0; } } int main(int argc, char *argv[]) { std::clock_t startS, endS; float seqMergeTime, parMergeTime; srand(time(NULL)); int n_m = 200; int *m, *mseq, *mref, *mGPU; if(argc==2) { n_m = atoi(argv[1]); } printf("========== Path Sort : =========\n"); printf("* Size of array : %d\n\n", n_m); //int* mseq; m = (int*)malloc(n_m*sizeof(int)); init_array_no_order(m, n_m, n_m*10); gpuCheck(hipMalloc(&mGPU, n_m*sizeof(int))); gpuCheck(hipMemcpy(mGPU, m, n_m*sizeof(int), hipMemcpyHostToDevice)); print_array(m, n_m); mseq = (int*)malloc(n_m*sizeof(int)); //copie de m copy_array(m, mseq, n_m); mref = (int*)malloc(n_m*sizeof(int)); //copie de m copy_array(m, mref, n_m); //Partie des calculs1024 //================ Paral1024lel : =======================\\ //Etape de prtraitement : startS = std::clock(); fusionSort(mGPU, n_m); hipDeviceSynchronize(); endS = std::clock(); parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC; gpuCheck(hipMemcpy(m, mGPU, n_m*sizeof(int), hipMemcpyDeviceToHost)); //Etape du tri fusion : startS = std::clock(); fusionSortSeq(mseq, n_m); endS = std::clock(); seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC; printf("========= Parallel sort : =============\n"); printf("Total time elapsed : %f s\n", parMergeTime); assertSorted(mref, m, n_m); printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime); printf("Parrallel merge is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime); printf("========= Sequential sort : =============\n"); printf("Total time elapsed : %f s\n", seqMergeTime); // assertSorted(mref, mseq, n_m); return 0; }
b2306e7825a771b6664c892975f58dd75d0b9b3f.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <ctime> //Function that verify cuda calls and return cuda error if any #define gpuCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } //Initialise ascendant array with random values in void init_array(int* array, int size, int const adder=10) { array[0] = rand()%adder; for(int i = 0; i < size;i++) { array[i] = array[i-1] + rand()%adder; } } //Function that initialise array with random values void init_array_no_order(int* array, int size, int const adder=10) { array[0] = rand()%adder; for(int i = 0; i < size;i++) { array[i] = rand()%adder; } } //Function that copy array in another void copy_array(int* a, int* a_copy, int n){ for(int i = 0;i < n;i++){ a_copy[i] = a[i]; } } //Function that print an array of size size void print_array(int* a, int size) { printf("["); for(int i = 0; i < size;i++) { //printf("i = %d | v = %d " ,i, a[i]); printf("%d " ,a[i]); } printf("]\n"); } //Device version of parallel merge of a and b in m with |m|<1024 __global__ void mergeSmall_k(int* m, int n, int size) { int gbx = blockIdx.x; int tidx = threadIdx.x; int i = gbx * blockDim.x + tidx; if(i < n) { int L1, R1, L2, R2; L1 = gbx*blockDim.x; R1 = gbx*blockDim.x + size-1; L2 = gbx*blockDim.x + size; R2 = gbx*blockDim.x + 2*size-1; if(L2 < n) { // printf("L1 : %d, R1 : %d, L2 : %d, R2 : %d\n", L1, R1, L2, R2); if(R2 >= n){ R2 = n-1; } __shared__ int *d_a, *d_b; int n_a = R1-L1+1; int n_b = R2-L2+1; int n_m = n_a+n_b; d_a = (int*)malloc(n_a*sizeof(int)); d_b = (int*)malloc(n_b*sizeof(int)); __syncthreads(); // printf("tidx : %d, n_a : %d\n", tidx, n_a); if (tidx < n_a) { // printf("m[%d] : %d\n", i, m[i]); d_a[tidx] = m[i]; // printf("d_a_%d[%d] = %d\n", gbx, tidx, d_a[tidx]); } else if (tidx < n_m) { d_b[tidx - n_a] = m[i]; // printf("d_b_%d[%d] = %d\n", gbx, tidx - n_a, d_b[tidx - n_a]); } __syncthreads(); int2 K; int2 P; int2 Q; // printf("n_a : %d, n_b : %d\n", n_a, n_b); if(tidx > n_a) { K.x = tidx - n_a; K.y = n_a; P.x = n_a; P.y = tidx - n_a; } else { K.x = 0; K.y = tidx; P.x = tidx; P.y = 0; } int offset = 0; while(1) { offset = abs(K.y - P.y)/2; Q.x = K.x + offset; Q.y = K.y - offset; if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1])) { if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x]) { if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x])) { m[i] = d_a[Q.y]; // printf("## m[%d] : %d, d_a_%d[%d] : %d\n",i, m[i], gbx, Q.y, d_a[Q.y]); } else { m[i] = d_b[Q.x]; // printf("## m[%d] : %d, d_b_%d[%d] : %d\n",i, m[i], gbx, Q.x, d_b[Q.x]); } break; } else { K.x = Q.x + 1; K.y = Q.y - 1; } } else { P.x = Q.x - 1; P.y = Q.y + 1; } } __syncthreads(); } } } /** Parallel version of merge of A and B with |A| + |B| <= 1024 @param d_a, d_b : device versions of arrays to merge d_m : device version of merge of a and b n_a, n_b, n_b : respective sizes of d_a, d_b, d_m */ __device__ void mergeSmall_k(int* d_a, int* d_b, int* d_m, int n_a, int n_b, int n_m){ int i = threadIdx.x; if(i < n_m) { int2 K; int2 P; int2 Q; if(i > n_a) { K.x = i - n_a; K.y = n_a; P.x = n_a; P.y = i - n_a; } else { K.x = 0; K.y = i; P.x = i; P.y = 0; } int offset = 0; while(1) { offset = abs(K.y - P.y)/2; Q.x = K.x + offset; Q.y = K.y - offset; if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1])) { if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x]) { if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x])) { d_m[i] = d_a[Q.y]; } else { d_m[i] = d_b[Q.x]; } break; } else { K.x = Q.x + 1; K.y = Q.y - 1; } } else { P.x = Q.x - 1; P.y = Q.y + 1; } } } } //Giving a path ( from pathBig_k ) each block merge (with mergeParallel) each piece a_k and b_k in m_k of a and b. Then it replace elements in m __global__ void mergeBig_k(int *m, int n_m, int *a, int n_a, int *b, int n_b, int2 *path, int n_path, int nbPartitions, int size) { int blockId = blockIdx.x; int threadId = threadIdx.x; int i = blockId * blockDim.x + threadId; if (blockId <= nbPartitions)//On utilise un block pour chaque partition { int x0, y0, x1, y1; x0 = path[blockId].x; y0 = path[blockId].y; x1 = path[blockId+1].x; y1 = path[blockId+1].y; const int dimx=x1-x0; const int dimy = y1-y0; //A modifier par dimx dimy dimx+dimy __shared__ int a_k[1024]; __shared__ int b_k[1024]; __shared__ int m_k[1024]; if (threadId < dimx) //On rempli a_k[i] : 0 <= i < dimx { a_k[threadId] = a[x0+threadId]; } else if (threadId < dimy+dimx)//On rempli b_k[i] : indice dimx <= i < dimx+dimy+1 { b_k[threadId-dimx] = b[y0+threadId-dimx]; } __syncthreads(); // mergeParallel(m_k, dimx+dimy, size); m[i] = m_k[threadId]; } } //Function that generate a path to break down m into pieces that could be merge without conflict //On appelle |m|/TPB blocks avec chacun un seul thread. Chaque thread s'occupe de la diagonale thread __global__ void pathBig_k(int pas, int2* path, int n_path , int* d_a, int n_a ,int* d_b, int n_b) { int thread_i = blockIdx.x * blockDim.x + threadIdx.x; if(thread_i <= (n_a + n_b)/pas) //<------------//On vérifie que l'indice du thread est inférieur à la taille du tableau de retour et qu'il est un multiple du pas { int i = thread_i*pas; int2 K; int2 P; int2 Q; if(i > n_a) { K.x = i - n_a; K.y = n_a; P.x = n_a; P.y = i - n_a; } else { K.x = 0; K.y = i; P.x = i; P.y = 0; } int offset = 0; while(1) { //Calcul des coordonnées du milieu de P et K offset = abs(K.y - P.y)/2; Q.x = K.x + offset; Q.y = K.y - offset; // if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1])) { // if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x]) { break; } else { K.x = Q.x + 1; K.y = Q.y - 1; } } else { P.x = Q.x - 1; P.y = Q.y + 1; } } //printf("thread : %d => (%d, %d)\n", thread_i, Q.y, Q.x); //!\\ Problème ordre x et y path[thread_i].x=Q.y; path[thread_i].y=Q.x; } //Si |m| n'est pas un mutliple de pas, le thread 0 ajoute (n_a, n_b) à la fin du tableau if (thread_i==0 && (n_a+n_b)%pas!=0) { //printf("thread : %d => (%d, %d)\n", thread_i, n_a, n_b); path[n_path-1].x=n_a; path[n_path-1].y=n_b; } } //Function that sort any array void fusionSort(int *mGPU, int n_m) { //L1 : indice du premier élément de m_part1 //R1 : indice du dernier élément de m_part1 //L2 : indice du premier élément de m_part2 //R2 : indice du dernier élément de m_part2 int size = 1; int i; int *m = (int*)malloc(n_m*sizeof(int)); while (size < n_m) { i = 0; if (size < 1024) { printf("Size : %d\n", size); mergeSmall_k<<<n_m/(2*size) + 1, 2*size>>>(mGPU, n_m, size); gpuCheck(cudaMemcpy(m, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); print_array(m, n_m); } size *= 2; } } void fusionMergeSeq(int* A, int* tmp, int L1, int R1, int L2, int R2){ int i = 0; while(L1 <= R1 && L2 <= R2){ if(A[L1] <= A[L2]){ tmp[i] = A[L1]; i++; L1++; } else{ tmp[i] = A[L2]; i++; L2++; } } while(L1 <= R1){ tmp[i] = A[L1]; i++; L1++; } while(L2 <= R2){ tmp[i] = A[L2]; i++; L2++; } } void fusionSortSeq(int* A, int n){ int len = 1; int i; int L1, R1, L2, R2; int* tmp = (int*)malloc(n*sizeof(int)); while(len < n){ i = 0; while(i < n){ L1 = i; R1 = i + len - 1; L2 = i + len; R2 = i + 2*len - 1; tmp = (int*)realloc(tmp, (R2-L1+1)*sizeof(int)); if(L2 >= n){ break; } if(R2 >= n){ R2 = n - 1; } fusionMergeSeq(A, tmp, L1, R1, L2, R2); for(int j = 0;j < R2-L1+1;j++){ A[i+j] = tmp[j]; } i = i + 2*len; } len *= 2; } free(tmp); } //Fonction qui trie un tableau M en parallèle par tri fusion itératif (question 3) //Fonctions de vérification //Fonction qui vérifie qu'un tableau est bien trié (tous ses éléments rangés dans l'ordre croissant) int assertOrder(int *tab, int size){ for (int i=0; i<size-1; i++){ if (tab[i] > tab[i+1]){ printf("WARNING : Unsuccessful merge or sort ... : unordered array on indice %d ...\n", i); printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]); return 0; } } return 1; } //Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments des deux tableaux qu'on veut fusionner int assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size) { int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b for(int i = 0;i<size;i++){ verif[i] = 0; } for (int i=0; i<size; i++){ for(int j = 0;j < n1;j++){ if(tab[j] == m[i] && verif[i] == 0){ //si il y a une valeur identique et que celle-ci n'a pas été vérifiée verif[i] = 1; } } } for (int i=0; i<size; i++){ for(int j = 0;j < n2;j++){ if(tab2[j] == m[i] && verif[i] == 0){ verif[i] = 1; } } } for(int i = 0;i<size;i++){ if(verif[i] != 1){ printf("\nWARNING : Unsuccessful merge : incorrect elements...\n"); return 0; } } return 1; } //Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments du tableau qu'on veut trier int assertSortAllValuesPresent(int* m, int* m_sorted, int size){ int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b for(int i = 0;i<size;i++){ verif[i] = 0; } for (int i=0; i<size; i++){ for(int j = 0;j < size;j++){ if(m_sorted[j] == m[i]){ //si il y a une valeur identique verif[i] = 1; } } } for(int i = 0;i<size;i++){ if(verif[i] != 1){ printf("i : %d\n", i); printf("\nWARNING : Unsuccessful sort : incorrect elements...\n"); return 0; } } return 1; } //Fonction qui vérifie qu'un tableau est bien trié et la fusion de deux tableaux //tab et tab2 : les deux tableaux qu'on veut fusionner //m : le tableau qui est la fusion triée de tab et tab2 int assertMerge(int *tab, int n1, int *tab2, int n2, int* m, int size){ int successfulOrder = assertOrder(m, size); int successfulElements = assertMergeAllValuesPresent(tab, n1, tab2, n2, m, size); //assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size) if(successfulOrder && successfulElements){ printf("\nSuccessful merge !\n"); return 1; } else{ printf("\nUnsuccessful merge !\n"); return 0; } } //Fonction qui vérifie qu'un tableau est bien trié //m : le tableau non trié qu'on veut trier //m_sorted : le tableau m soi-disant trié (on veut vérifier si c'est bien le cas) //size : la taille du tableau int assertSorted(int* m, int* m_sorted, int size) { int successfulOrder = assertOrder(m_sorted, size); // les éléments du tableau sont ils bien dans le bon ordre ? int successfulElements = assertSortAllValuesPresent(m, m_sorted, size); //retrouve t-on bien toutes les valeurs ? if(successfulOrder && successfulElements){ printf("\nSuccessful sort !\n"); return 1; } else{ printf("\nUnsuccessful sort !\n"); return 0; } } int main(int argc, char *argv[]) { std::clock_t startS, endS; float seqMergeTime, parMergeTime; srand(time(NULL)); int n_m = 200; int *m, *mseq, *mref, *mGPU; if(argc==2) { n_m = atoi(argv[1]); } printf("========== Path Sort : =========\n"); printf("* Size of array : %d\n\n", n_m); //int* mseq; m = (int*)malloc(n_m*sizeof(int)); init_array_no_order(m, n_m, n_m*10); gpuCheck(cudaMalloc(&mGPU, n_m*sizeof(int))); gpuCheck(cudaMemcpy(mGPU, m, n_m*sizeof(int), cudaMemcpyHostToDevice)); print_array(m, n_m); mseq = (int*)malloc(n_m*sizeof(int)); //copie de m copy_array(m, mseq, n_m); mref = (int*)malloc(n_m*sizeof(int)); //copie de m copy_array(m, mref, n_m); //Partie des calculs1024 //================ Paral1024lel : =======================\\ //Etape de prétraitement : startS = std::clock(); fusionSort(mGPU, n_m); cudaDeviceSynchronize(); endS = std::clock(); parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC; gpuCheck(cudaMemcpy(m, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost)); //Etape du tri fusion : startS = std::clock(); fusionSortSeq(mseq, n_m); endS = std::clock(); seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC; printf("========= Parallel sort : =============\n"); printf("Total time elapsed : %f s\n", parMergeTime); assertSorted(mref, m, n_m); printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime); printf("Parrallel merge is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime); printf("========= Sequential sort : =============\n"); printf("Total time elapsed : %f s\n", seqMergeTime); // assertSorted(mref, mseq, n_m); return 0; }
c4ad77bccf3ed13dd74e01e19aa107a5646a884a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * globalCopy.cu * * Microbenchmark for copy bandwidth of global memory. * * Build with: nvcc -I ../chLib <options> globalCopy.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <chError.h> #include <chCommandLine.h> template<class T, const int n> __global__ void GlobalCopy( T *out, const T *in, size_t N ) { T temp[n]; size_t i; for ( i = n*blockIdx.x*blockDim.x+threadIdx.x; i < N-n*blockDim.x*gridDim.x; i += n*blockDim.x*gridDim.x ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; temp[j] = in[index]; } for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; out[index] = temp[j]; } } // to avoid the (index<N) conditional in the inner loop, // we left off some work at the end for ( int j = 0; j < n; j++ ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) temp[j] = in[index]; } for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) out[index] = temp[j]; } } } template<class T, const int n, bool bOffsetDst, bool bOffsetSrc> double BandwidthCopy( T *deviceOut, T *deviceIn, T *hostOut, T *hostIn, size_t N, hipEvent_t evStart, hipEvent_t evStop, int cBlocks, int cThreads ) { double ret = 0.0; double elapsedTime; float ms; int cIterations; hipError_t status; for ( int i = 0; i < N; i++ ) { int r = rand(); hostIn[i] = *(T *)(&r); // for small ints, LSBs; for int2 and int4, some stack cruft } memset( hostOut, 0, N*sizeof(T) ); cuda(Memcpy( deviceIn, hostIn, N*sizeof(T), hipMemcpyHostToDevice ) ); { // confirm that kernel launch with this configuration writes correct result hipLaunchKernelGGL(( GlobalCopy<T,n>), dim3(cBlocks),dim3(cThreads), 0, 0, deviceOut+bOffsetDst, deviceIn+bOffsetSrc, N-bOffsetDst-bOffsetSrc ); cuda(Memcpy( hostOut, deviceOut, N*sizeof(T), hipMemcpyDeviceToHost ) ); cuda(GetLastError() ); if ( memcmp( hostOut+bOffsetDst, hostIn+bOffsetSrc, (N-bOffsetDst-bOffsetSrc)*sizeof(T) ) ) { printf( "Incorrect copy performed!\n" ); goto Error; } } cIterations = 10; hipEventRecord( evStart ); for ( int i = 0; i < cIterations; i++ ) { hipLaunchKernelGGL(( GlobalCopy<T,n>), dim3(cBlocks),dim3(cThreads), 0, 0, deviceOut+bOffsetDst, deviceIn+bOffsetSrc, N-bOffsetDst-bOffsetSrc ); } hipEventRecord( evStop ); cuda(DeviceSynchronize() ); // make configurations that cannot launch error-out with 0 bandwidth cuda(GetLastError() ); cuda(EventElapsedTime( &ms, evStart, evStop ) ); elapsedTime = ms/1000.0f; // bytes per second ret = ((double)2*N*cIterations*sizeof(T)) / elapsedTime; // gigabytes per second ret /= 1024.0*1048576.0; Error: return ret; } template<class T, const int n, bool bOffsetDst, bool bOffsetSrc> double ReportRow( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { T *deviceIn = 0; T *deviceOut = 0; T *hostIn = 0; T *hostOut = 0; hipEvent_t evStart = 0; hipEvent_t evStop = 0; hipError_t status; int maxThreads = 0; double maxBW = 0.0; cuda(Malloc( &deviceIn, N*sizeof(T) ) ); cuda(Malloc( &deviceOut, N*sizeof(T) ) ); cuda(Memset( deviceOut, 0, N*sizeof(T) ) ); hostIn = new T[N]; if ( ! hostIn ) goto Error; hostOut = new T[N]; if ( ! hostOut ) goto Error; cuda(EventCreate( &evStart ) ); cuda(EventCreate( &evStop ) ); printf( "%d\t", n ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { double bw = BandwidthCopy<T,n,bOffsetDst,bOffsetSrc>( deviceOut, deviceIn, hostOut, hostIn, N, evStart, evStop, cBlocks, cThreads ); if ( bw > maxBW ) { maxBW = bw; maxThreads = cThreads; } printf( "%.2f\t", bw ); } printf( "%.2f\t%d\n", maxBW, maxThreads ); Error: if ( hostIn ) delete[] hostIn; if ( hostOut ) delete[] hostOut; hipEventDestroy( evStart ); hipEventDestroy( evStop ); hipFree( deviceIn ); hipFree( deviceOut ); return maxBW; } template<class T, bool bOffsetDst, bool bOffsetSrc> void Shmoo( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { printf( "Operand size: %d byte%c\n", (int) sizeof(T), sizeof(T)==1 ? '\0' : 's' ); printf( "Input size: %dM operands\n", (int) (N>>20) ); printf( " Block Size\n" ); printf( "Unroll\t" ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { printf( "%d\t", cThreads ); } printf( "maxBW\tmaxThreads\n" ); ReportRow<T, 1, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 2, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 3, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 4, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 5, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 6, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 7, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 8, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 9, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,10, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,11, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,12, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,13, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,14, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,15, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,16, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); } int main( int argc, char *argv[] ) { int device = 0; int size = 16; if ( chCommandLineGet( &device, "device", argc, argv ) ) { printf( "Using device %d...\n", device ); } hipSetDevice(device); if ( chCommandLineGet( &size, "size", argc, argv ) ) { printf( "Using %dM operands ...\n", size ); } if ( chCommandLineGetBool( "uncoalesced_read", argc, argv ) ) { if ( chCommandLineGetBool( "uncoalesced_write", argc, argv ) ) { printf( "Using uncoalesced reads and writes\n" ); Shmoo< char, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4, true, true>( (size_t) size*1048576, 32, 512, 150 ); } else { printf( "Using coalesced writes and uncoalesced reads\n" ); Shmoo< char,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4,false, true>( (size_t) size*1048576, 32, 512, 150 ); } } else { if ( chCommandLineGetBool( "uncoalesced_write", argc, argv ) ) { printf( "Using uncoalesced writes and coalesced reads\n" ); Shmoo< char, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4, true,false>( (size_t) size*1048576, 32, 512, 150 ); } else { printf( "Using coalesced reads and writes\n" ); Shmoo< char,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4,false,false>( (size_t) size*1048576, 32, 512, 150 ); } } return 0; }
c4ad77bccf3ed13dd74e01e19aa107a5646a884a.cu
/* * * globalCopy.cu * * Microbenchmark for copy bandwidth of global memory. * * Build with: nvcc -I ../chLib <options> globalCopy.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <chError.h> #include <chCommandLine.h> template<class T, const int n> __global__ void GlobalCopy( T *out, const T *in, size_t N ) { T temp[n]; size_t i; for ( i = n*blockIdx.x*blockDim.x+threadIdx.x; i < N-n*blockDim.x*gridDim.x; i += n*blockDim.x*gridDim.x ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; temp[j] = in[index]; } for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; out[index] = temp[j]; } } // to avoid the (index<N) conditional in the inner loop, // we left off some work at the end for ( int j = 0; j < n; j++ ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) temp[j] = in[index]; } for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) out[index] = temp[j]; } } } template<class T, const int n, bool bOffsetDst, bool bOffsetSrc> double BandwidthCopy( T *deviceOut, T *deviceIn, T *hostOut, T *hostIn, size_t N, cudaEvent_t evStart, cudaEvent_t evStop, int cBlocks, int cThreads ) { double ret = 0.0; double elapsedTime; float ms; int cIterations; cudaError_t status; for ( int i = 0; i < N; i++ ) { int r = rand(); hostIn[i] = *(T *)(&r); // for small ints, LSBs; for int2 and int4, some stack cruft } memset( hostOut, 0, N*sizeof(T) ); cuda(Memcpy( deviceIn, hostIn, N*sizeof(T), cudaMemcpyHostToDevice ) ); { // confirm that kernel launch with this configuration writes correct result GlobalCopy<T,n><<<cBlocks,cThreads>>>( deviceOut+bOffsetDst, deviceIn+bOffsetSrc, N-bOffsetDst-bOffsetSrc ); cuda(Memcpy( hostOut, deviceOut, N*sizeof(T), cudaMemcpyDeviceToHost ) ); cuda(GetLastError() ); if ( memcmp( hostOut+bOffsetDst, hostIn+bOffsetSrc, (N-bOffsetDst-bOffsetSrc)*sizeof(T) ) ) { printf( "Incorrect copy performed!\n" ); goto Error; } } cIterations = 10; cudaEventRecord( evStart ); for ( int i = 0; i < cIterations; i++ ) { GlobalCopy<T,n><<<cBlocks,cThreads>>>( deviceOut+bOffsetDst, deviceIn+bOffsetSrc, N-bOffsetDst-bOffsetSrc ); } cudaEventRecord( evStop ); cuda(DeviceSynchronize() ); // make configurations that cannot launch error-out with 0 bandwidth cuda(GetLastError() ); cuda(EventElapsedTime( &ms, evStart, evStop ) ); elapsedTime = ms/1000.0f; // bytes per second ret = ((double)2*N*cIterations*sizeof(T)) / elapsedTime; // gigabytes per second ret /= 1024.0*1048576.0; Error: return ret; } template<class T, const int n, bool bOffsetDst, bool bOffsetSrc> double ReportRow( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { T *deviceIn = 0; T *deviceOut = 0; T *hostIn = 0; T *hostOut = 0; cudaEvent_t evStart = 0; cudaEvent_t evStop = 0; cudaError_t status; int maxThreads = 0; double maxBW = 0.0; cuda(Malloc( &deviceIn, N*sizeof(T) ) ); cuda(Malloc( &deviceOut, N*sizeof(T) ) ); cuda(Memset( deviceOut, 0, N*sizeof(T) ) ); hostIn = new T[N]; if ( ! hostIn ) goto Error; hostOut = new T[N]; if ( ! hostOut ) goto Error; cuda(EventCreate( &evStart ) ); cuda(EventCreate( &evStop ) ); printf( "%d\t", n ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { double bw = BandwidthCopy<T,n,bOffsetDst,bOffsetSrc>( deviceOut, deviceIn, hostOut, hostIn, N, evStart, evStop, cBlocks, cThreads ); if ( bw > maxBW ) { maxBW = bw; maxThreads = cThreads; } printf( "%.2f\t", bw ); } printf( "%.2f\t%d\n", maxBW, maxThreads ); Error: if ( hostIn ) delete[] hostIn; if ( hostOut ) delete[] hostOut; cudaEventDestroy( evStart ); cudaEventDestroy( evStop ); cudaFree( deviceIn ); cudaFree( deviceOut ); return maxBW; } template<class T, bool bOffsetDst, bool bOffsetSrc> void Shmoo( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { printf( "Operand size: %d byte%c\n", (int) sizeof(T), sizeof(T)==1 ? '\0' : 's' ); printf( "Input size: %dM operands\n", (int) (N>>20) ); printf( " Block Size\n" ); printf( "Unroll\t" ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { printf( "%d\t", cThreads ); } printf( "maxBW\tmaxThreads\n" ); ReportRow<T, 1, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 2, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 3, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 4, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 5, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 6, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 7, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 8, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 9, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,10, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,11, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,12, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,13, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,14, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,15, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,16, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); } int main( int argc, char *argv[] ) { int device = 0; int size = 16; if ( chCommandLineGet( &device, "device", argc, argv ) ) { printf( "Using device %d...\n", device ); } cudaSetDevice(device); if ( chCommandLineGet( &size, "size", argc, argv ) ) { printf( "Using %dM operands ...\n", size ); } if ( chCommandLineGetBool( "uncoalesced_read", argc, argv ) ) { if ( chCommandLineGetBool( "uncoalesced_write", argc, argv ) ) { printf( "Using uncoalesced reads and writes\n" ); Shmoo< char, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4, true, true>( (size_t) size*1048576, 32, 512, 150 ); } else { printf( "Using coalesced writes and uncoalesced reads\n" ); Shmoo< char,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4,false, true>( (size_t) size*1048576, 32, 512, 150 ); } } else { if ( chCommandLineGetBool( "uncoalesced_write", argc, argv ) ) { printf( "Using uncoalesced writes and coalesced reads\n" ); Shmoo< char, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4, true,false>( (size_t) size*1048576, 32, 512, 150 ); } else { printf( "Using coalesced reads and writes\n" ); Shmoo< char,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4,false,false>( (size_t) size*1048576, 32, 512, 150 ); } } return 0; }
d18e14490607709e52f4423ac7ffd02f5c86de52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define PROMOTER_LEN 51594 #define SCORE_LEN 1963 #define PNVEC_MAX 18000 #define Nchunk 5000 #define MAX_LINE_LEN 24000 #define DELIM "\t" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void tfbs_kernel(int N, int pvec_max, char *pvec, int *pvec_len, float *pwm, int pwm_len, float tf_threshold, float *bg, float *output) { int id = blockIdx.x * blockDim.x + threadIdx.x; int ind1, ind2; float wt_score1, wt_score2; float wt_score31, wt_score41; float wt_score32, wt_score42; float wt_score33, wt_score43; float wt_score34, wt_score44; int ioffset = 29; if (id < N ) { int ncol = 4; /* loop through positions */ for ( int ipos = ioffset; ipos < pvec_len[id] - ioffset; ipos++ ){ float fs1 = -99999.; float fs2 = -99999.; float fs31 = -99999.; float fs41 = -99999.; float fs32 = -99999.; float fs42 = -99999.; float fs33 = -99999.; float fs43 = -99999.; float fs34 = -99999.; float fs44 = -99999.; /* loop through all k-mers for that position */ for ( int ik = 0; ik < pwm_len; ik ++ ) { wt_score1 = 0; wt_score2 = 0; wt_score31 = 0; wt_score41=0; wt_score32 = 0; wt_score42=0; wt_score33 = 0; wt_score43=0; wt_score34 = 0; wt_score44=0; /* loop through the k-mer */ for ( int i = 0; i < pwm_len; i++){ ind1 = pvec[id * pvec_max + ipos + ik - pwm_len + 1 + i]; if ( ind1 < 1 ) { /* skip kmere if not A,C,T,G */ wt_score1 = 0; wt_score2 = 0; wt_score31 = 0; wt_score41=0; wt_score32 = 0; wt_score42=0; wt_score33 = 0; wt_score43=0; wt_score34 = 0; wt_score44=0; break; } ind2 = 5 - pvec[id * pvec_max + ipos + ik - i ]; if ( ind2 > 4 ) { /* skip kmere if not A,C,T,G */ wt_score1 = 0; wt_score2 = 0; wt_score31 = 0; wt_score41=0; wt_score32 = 0; wt_score42=0; wt_score33 = 0; wt_score43=0; wt_score34 = 0; wt_score44=0; break; } wt_score1 = wt_score1 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score2 = wt_score2 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); if ( i == (pwm_len -ik -1)) ind1 = 1; if ( i == ik) ind2 = 5 - 1; wt_score31 = wt_score31 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score41 = wt_score41 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); if ( i == (pwm_len -ik -1)) ind1 = 2; if ( i == ik) ind2 = 5 - 2; wt_score32 = wt_score32 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score42 = wt_score42 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); if ( i == (pwm_len -ik -1)) ind1 = 3; if ( i == ik) ind2 = 5 - 3; wt_score33 = wt_score33 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score43 = wt_score43 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); if ( i == (pwm_len -ik -1)) ind1 = 4; if ( i == ik) ind2 = 5 - 4; wt_score34 = wt_score34 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score44 = wt_score44 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); } /* for all kmers for this position calculate maximum scores */ fs1 = ( fs1 < wt_score1 ) ? wt_score1 : fs1; fs2 = ( fs2 < wt_score2 ) ? wt_score2 : fs2; fs31 = ( fs31 < wt_score31 ) ? wt_score31 : fs31; fs41 = ( fs41 < wt_score41 ) ? wt_score41 : fs41; fs32 = ( fs32 < wt_score32 ) ? wt_score32 : fs32; fs42 = ( fs42 < wt_score42 ) ? wt_score42 : fs42; fs33 = ( fs33 < wt_score33 ) ? wt_score33 : fs33; fs43 = ( fs43 < wt_score43 ) ? wt_score43 : fs43; fs34 = ( fs34 < wt_score34 ) ? wt_score34 : fs34; fs44 = ( fs44 < wt_score44 ) ? wt_score44 : fs44; } /* if all the scores are less than threshold do not store them */ int icol = id * (pvec_max - ioffset * 2 ) + (ipos - ioffset ); int ichunk = N * (pvec_max - ioffset * 2 ); output[ 1 * ichunk + icol ] = ipos + 1; output[ 2 * ichunk + icol ] = fs1; output[ 3 * ichunk + icol ] = fs2; if ( (fs31 >= tf_threshold | fs41 >= tf_threshold ) & pvec[ id * pvec_max + ipos] != 1 ){ output[ icol ] = 1; output[ 4 * ichunk + icol ] = fs31; output[ 5 * ichunk + icol ] = fs41; } else if( ( fs1 > tf_threshold | fs2 > tf_threshold) & pvec[ id * pvec_max + ipos] != 1 ){ output[ icol ] =0; output[ 4 * ichunk + icol ] = fs31; output[ 5 * ichunk + icol ] = fs41; } else { output[ icol ] = 0; } if ( ( fs32 >= tf_threshold | fs42 >= tf_threshold) & pvec[ id * pvec_max + ipos] != 2 ){ output[ icol ] = output[ icol ] + 10; output[ 6 * ichunk + icol ] = fs32; output[ 7 * ichunk + icol ] = fs42; } else if( ( fs1 > tf_threshold | fs2 > tf_threshold) & pvec[ id * pvec_max + ipos] != 2 ){ output[ 6 * ichunk + icol ] = fs32; output[ 7 * ichunk + icol ] = fs42; } if ( ( fs33 >= tf_threshold | fs43 >= tf_threshold) & pvec[ id * pvec_max + ipos] != 3 ){ output[ icol ] = output[ icol ] + 100; output[ 8 * ichunk + icol ] = fs33; output[ 9 * ichunk + icol ] = fs43; } else if( ( fs1 > tf_threshold | fs2 > tf_threshold) & pvec[ id * pvec_max + ipos] != 3 ){ output[ 8 * ichunk + icol ] = fs33; output[ 9 * ichunk + icol ] = fs43; } if ( ( fs34 >= tf_threshold | fs44 >= tf_threshold) & pvec[ id * pvec_max + ipos] != 4 ){ output[ icol ] = output[ icol ] + 1000; output[ 10 * ichunk + icol ] = fs34; output[ 11 * ichunk + icol ] = fs44; } else if( ( fs1 > tf_threshold | fs2 > tf_threshold) & pvec[ id * pvec_max + ipos] != 4 ){ output[ 10 * ichunk + icol ] = fs34; output[ 11 * ichunk + icol ] = fs44; } } } } void tfbs_cuda( char *pvec, // promoter int *pvec_len, int pvec_max, float *pwm, int pwm_len, float tf_threshold, float *bg, int N, float *output) {// output matrix char *d_pvec; int *d_pvec_len; float *d_output; float *d_pwm; float *d_bg; //printf("Allocating GPU memory for pvec_len\n"); gpuErrchk( hipMalloc( (void**)&d_pvec_len, N * sizeof( int)) ); //printf("Copying GPU memory for pvec_len\n"); gpuErrchk( hipMemcpy( d_pvec_len, pvec_len, N * sizeof( int), hipMemcpyHostToDevice ) ); //printf("Allocating GPU memory for pvec\n"); gpuErrchk( hipMalloc( (void**) &d_pvec, N * pvec_max * sizeof( char)) ); //printf("Copying GPU memory for pvec\n"); gpuErrchk( hipMemcpy( d_pvec, pvec, N * pvec_max * sizeof( char), hipMemcpyHostToDevice ) ); //printf("Allocating GPU memory for pwm\n"); gpuErrchk( hipMalloc( (void**)&d_pwm, 4 * pwm_len * sizeof(float)) ); //printf("Copying GPU memory for pwm\n"); gpuErrchk( hipMemcpy( d_pwm, pwm, 4 * pwm_len * sizeof(float), hipMemcpyHostToDevice ) ); //printf("Allocating GPU memory for bg\n"); gpuErrchk( hipMalloc( (void**)&d_bg, 4 * sizeof(float)) ); //printf("Copying GPU memory for bg\n"); gpuErrchk( hipMemcpy( d_bg, bg, 4 * sizeof(float), hipMemcpyHostToDevice ) ); //printf("Allocating GPU memory for result\n"); gpuErrchk( hipMalloc( (void**)&d_output, N * 12 * (pvec_max -58 ) * sizeof(float) ) ); //printf("Before kernel\n"); dim3 dimBlock( 32, 1 ); dim3 dimGrid( N/32 + 1, 1 ); //printf("Calling CUDA kernel\n"); hipLaunchKernelGGL(( tfbs_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, N, pvec_max, d_pvec, d_pvec_len, d_pwm, pwm_len, tf_threshold, d_bg, d_output); gpuErrchk( hipPeekAtLastError() ); //hipDeviceSynchronize(); //printf("After CUDA kernel\n"); gpuErrchk( hipMemcpy( output, d_output, N * 12 * (pvec_max -58 ) * sizeof(float), hipMemcpyDeviceToHost ) ); //printf("Free GPU memory \n"); hipFree( d_pvec ); hipFree( d_pvec_len ); hipFree( d_output ); hipFree( d_pwm ); hipFree( d_bg ); return; } int main( int argc, char *argv[] ){ char filename[1024]; int taskID; float bg[] = {0.25, 0.25, 0.25, 0.25}; FILE *ifp, *ofp; char in_line[MAX_LINE_LEN]; char *token; int chrom[PROMOTER_LEN]; int istart[PROMOTER_LEN]; int i, j, jj, i1, i2, j1, j2, icurr; float score_threshold[SCORE_LEN]; char **score_files; float pwm[30 * 4]; int pwm_length; int any0; float sum; char *pnvec; int *pnvec_len; float *result; int ires; /* command line processing */ taskID = atoi(argv[1]); printf("Input file name: %s\n", argv[2]); printf("tf.info file name: %s\n", argv[3]); printf("pwm directory: %s\n", argv[4]); sprintf(filename,"./%s/%s_%03d.txt", argv[5], argv[6], taskID); printf("Output file name: %s\n", filename); /* allocate memory to hold char arrays */ //sequence = (char **) malloc( PROMOTER_LEN * sizeof(char *) ); score_files = (char **) malloc( SCORE_LEN * sizeof(char *) ); result = (float *) malloc( Nchunk * 12 * ( PNVEC_MAX - 58 ) * sizeof(float) ); pnvec_len = ( int * ) malloc ( PROMOTER_LEN * sizeof( int ) ); if ( pnvec_len == 0 ) { fprintf( stderr, "ERROR allocating pnvec_len: Out of memory\n"); exit(1); } //printf("Allocating memory for pnvec\n"); pnvec = ( char * ) malloc ( PROMOTER_LEN * PNVEC_MAX * sizeof( char) ); if ( pnvec == 0 ) { fprintf( stderr, "ERROR allocating pnvec: Out of memory\n"); exit(2); } printf("Reading Input files\n"); /* read input file line by line (only 1st, 2nd and 8th columns) */ ifp = fopen(argv[2], "r"); fgets(in_line, MAX_LINE_LEN, ifp); //skip header line i=0; while( fgets(in_line, MAX_LINE_LEN, ifp )!= NULL ){ token = strtok( in_line, DELIM); sscanf( token,"chr%d", &(chrom[i])); token = strtok( NULL, DELIM); sscanf( token,"%d", istart + i ); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); //if(i > 14655) printf(" start processing letter\n"); pnvec_len[i] = strlen(token); for (j = 0; j < pnvec_len[i] ; j++) { //if (i == 14660)printf("%c",token[j]); switch( token[j] ){ case 'A': pnvec[ i * PNVEC_MAX + j ]=1; break; case 'a': pnvec[ i * PNVEC_MAX + j ]=1; break; case 'C': pnvec[ i * PNVEC_MAX + j ]=2; break; case 'c': pnvec[ i * PNVEC_MAX + j ]=2; break; case 'G': pnvec[ i * PNVEC_MAX + j ]=3; break; case 'g': pnvec[ i * PNVEC_MAX + j ]=3; break; case 'T': pnvec[ i * PNVEC_MAX + j ]=4; break; case 't': pnvec[ i * PNVEC_MAX + j ]=4; break; default: pnvec[ i * PNVEC_MAX + j ]=0; break; } } i++; //if ( i > 14600) { printf("i=%d\n",i);} } fclose(ifp); printf(" Read %d lines from the input file\n", i); /* Read tf.info file */ ifp = fopen(argv[3], "r"); i=0; while( fgets(in_line, MAX_LINE_LEN, ifp ) ){ token = strtok( in_line, DELIM); score_files[i] = (char *) malloc ( (strlen(token) + 1 ) * sizeof(char )); strcpy( score_files[i], token ); token = strtok( NULL, DELIM); score_threshold[i] = atof(token); i++; } fclose(ifp); printf(" Read %d lines from %s file\n", i, argv[3]); /* process chunks */ i1 = (taskID - 1) * 100 + 1; // was 10 originally i2 = taskID * 100; if ( i2 > SCORE_LEN ) i2 = SCORE_LEN; /* open output file */ ofp = fopen(filename,"w"); if (ofp == NULL) { fprintf( stderr, " Can't open output file\n"); exit(3); } for ( icurr = i1; icurr <= i2; icurr++){ printf(" icurr =%d\n", icurr); sprintf( filename, "./%s/%s\0", argv[4], score_files[icurr-1] ); ifp = fopen( filename , "r"); fgets(in_line, MAX_LINE_LEN, ifp ); // skip first line i = 0; any0 = 0; while( fgets(in_line, MAX_LINE_LEN, ifp ) ){ token = strtok( in_line, DELIM); //skip first value token = strtok( NULL, DELIM); pwm[i*4 + 0] = atof(token); if ( !strcmp(token, "0.0") ) any0=1; token = strtok( NULL, DELIM); pwm[i*4 + 1] = atof(token); if ( !strcmp(token, "0.0") ) any0=1; token = strtok( NULL, DELIM); pwm[i*4 + 2] = atof(token); if ( !strcmp(token, "0.0") ) any0=1; token = strtok( NULL, DELIM); pwm[i*4 + 3] = atof(token); if ( !strcmp(token, "0.0\n") ) any0=1; i++; } fclose(ifp); pwm_length = i; printf(" Read %d lines from %s file\n", i, score_files[icurr-1]); /* part of create_pwm function */ if ( any0 ) { for ( j = 0; j < i; j++ ){ sum = pwm[ j*4 + 0] + pwm[ j*4 + 1] + pwm[ j*4 + 2] + pwm[ j*4 + 3] + 0.001 * 4; pwm[ j*4 + 0] = (pwm[ j*4 + 0] + 0.001)/sum; pwm[ j*4 + 1] = (pwm[ j*4 + 1] + 0.001)/sum; pwm[ j*4 + 2] = (pwm[ j*4 + 2] + 0.001)/sum; pwm[ j*4 + 3] = (pwm[ j*4 + 3] + 0.001)/sum; } } /* inner loop in R*/ for ( j = 1; j < 12; j++ ){ j1 = (j - 1) * Nchunk + 1; j2 = j * Nchunk; if ( j2 > PROMOTER_LEN )j2 = PROMOTER_LEN; int n = j2 - j1 + 1; printf(" j = %d through %d; threshold = %f\n", j1, j2, score_threshold[icurr - 1]); tfbs_cuda (pnvec + (j1 -1) * PNVEC_MAX, pnvec_len + j1 -1, PNVEC_MAX, pwm, pwm_length, score_threshold[icurr - 1], bg, n, result); fflush(stdout); /* save result in the output file */ for (i = 0; i < n * ( PNVEC_MAX - 58 ); i++){ ires = (int) result [ i ]; int in = i/( PNVEC_MAX - 58 ); //printf("%d ",i); if (ires > 0 || (score_threshold[ icurr - 1] < result[ i + ( PNVEC_MAX - 58 ) * 2 * n] )|| (score_threshold[icurr - 1] < result[i + (PNVEC_MAX - 58 ) * 3 * n])) { fprintf(ofp,"%d ", chrom [ in ]); //unsigned int ipos = result [ i + ( PNVEC_MAX - 58 ) * n * 1] + istart[ j1 - 1 + i/( PNVEC_MAX - 58 ) ]; unsigned int ipos = 30 + i%( PNVEC_MAX - 58 ) + istart[ j1 - 1 + in ]; fprintf(ofp,"%d %d ", ipos, ipos + 1); fprintf(ofp,"%d %d ", j1 + in, icurr); fprintf(ofp,"%d ", ires ); // use %f.3 for printing results with 3 digits for ( jj = 2; jj < 11; jj++ )fprintf(ofp,"%.3f ", result [ i + ( PNVEC_MAX - 58 ) * jj * n] ); fprintf(ofp,"%.3f\n", result [ i + ( PNVEC_MAX - 58 ) * 11 * n] ); } } } // end of j loop } // end of icurr loop fclose(ofp); exit(0); }
d18e14490607709e52f4423ac7ffd02f5c86de52.cu
#include <stdio.h> #include <stdlib.h> #define PROMOTER_LEN 51594 #define SCORE_LEN 1963 #define PNVEC_MAX 18000 #define Nchunk 5000 #define MAX_LINE_LEN 24000 #define DELIM "\t" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void tfbs_kernel(int N, int pvec_max, char *pvec, int *pvec_len, float *pwm, int pwm_len, float tf_threshold, float *bg, float *output) { int id = blockIdx.x * blockDim.x + threadIdx.x; int ind1, ind2; float wt_score1, wt_score2; float wt_score31, wt_score41; float wt_score32, wt_score42; float wt_score33, wt_score43; float wt_score34, wt_score44; int ioffset = 29; if (id < N ) { int ncol = 4; /* loop through positions */ for ( int ipos = ioffset; ipos < pvec_len[id] - ioffset; ipos++ ){ float fs1 = -99999.; float fs2 = -99999.; float fs31 = -99999.; float fs41 = -99999.; float fs32 = -99999.; float fs42 = -99999.; float fs33 = -99999.; float fs43 = -99999.; float fs34 = -99999.; float fs44 = -99999.; /* loop through all k-mers for that position */ for ( int ik = 0; ik < pwm_len; ik ++ ) { wt_score1 = 0; wt_score2 = 0; wt_score31 = 0; wt_score41=0; wt_score32 = 0; wt_score42=0; wt_score33 = 0; wt_score43=0; wt_score34 = 0; wt_score44=0; /* loop through the k-mer */ for ( int i = 0; i < pwm_len; i++){ ind1 = pvec[id * pvec_max + ipos + ik - pwm_len + 1 + i]; if ( ind1 < 1 ) { /* skip kmere if not A,C,T,G */ wt_score1 = 0; wt_score2 = 0; wt_score31 = 0; wt_score41=0; wt_score32 = 0; wt_score42=0; wt_score33 = 0; wt_score43=0; wt_score34 = 0; wt_score44=0; break; } ind2 = 5 - pvec[id * pvec_max + ipos + ik - i ]; if ( ind2 > 4 ) { /* skip kmere if not A,C,T,G */ wt_score1 = 0; wt_score2 = 0; wt_score31 = 0; wt_score41=0; wt_score32 = 0; wt_score42=0; wt_score33 = 0; wt_score43=0; wt_score34 = 0; wt_score44=0; break; } wt_score1 = wt_score1 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score2 = wt_score2 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); if ( i == (pwm_len -ik -1)) ind1 = 1; if ( i == ik) ind2 = 5 - 1; wt_score31 = wt_score31 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score41 = wt_score41 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); if ( i == (pwm_len -ik -1)) ind1 = 2; if ( i == ik) ind2 = 5 - 2; wt_score32 = wt_score32 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score42 = wt_score42 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); if ( i == (pwm_len -ik -1)) ind1 = 3; if ( i == ik) ind2 = 5 - 3; wt_score33 = wt_score33 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score43 = wt_score43 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); if ( i == (pwm_len -ik -1)) ind1 = 4; if ( i == ik) ind2 = 5 - 4; wt_score34 = wt_score34 + log(pwm[ (i * ncol ) + (ind1 -1) ] / bg[ ind1 - 1 ] ); wt_score44 = wt_score44 + log(pwm[ (i * ncol ) + (ind2 -1) ] / bg[ ind2 - 1 ] ); } /* for all kmers for this position calculate maximum scores */ fs1 = ( fs1 < wt_score1 ) ? wt_score1 : fs1; fs2 = ( fs2 < wt_score2 ) ? wt_score2 : fs2; fs31 = ( fs31 < wt_score31 ) ? wt_score31 : fs31; fs41 = ( fs41 < wt_score41 ) ? wt_score41 : fs41; fs32 = ( fs32 < wt_score32 ) ? wt_score32 : fs32; fs42 = ( fs42 < wt_score42 ) ? wt_score42 : fs42; fs33 = ( fs33 < wt_score33 ) ? wt_score33 : fs33; fs43 = ( fs43 < wt_score43 ) ? wt_score43 : fs43; fs34 = ( fs34 < wt_score34 ) ? wt_score34 : fs34; fs44 = ( fs44 < wt_score44 ) ? wt_score44 : fs44; } /* if all the scores are less than threshold do not store them */ int icol = id * (pvec_max - ioffset * 2 ) + (ipos - ioffset ); int ichunk = N * (pvec_max - ioffset * 2 ); output[ 1 * ichunk + icol ] = ipos + 1; output[ 2 * ichunk + icol ] = fs1; output[ 3 * ichunk + icol ] = fs2; if ( (fs31 >= tf_threshold | fs41 >= tf_threshold ) & pvec[ id * pvec_max + ipos] != 1 ){ output[ icol ] = 1; output[ 4 * ichunk + icol ] = fs31; output[ 5 * ichunk + icol ] = fs41; } else if( ( fs1 > tf_threshold | fs2 > tf_threshold) & pvec[ id * pvec_max + ipos] != 1 ){ output[ icol ] =0; output[ 4 * ichunk + icol ] = fs31; output[ 5 * ichunk + icol ] = fs41; } else { output[ icol ] = 0; } if ( ( fs32 >= tf_threshold | fs42 >= tf_threshold) & pvec[ id * pvec_max + ipos] != 2 ){ output[ icol ] = output[ icol ] + 10; output[ 6 * ichunk + icol ] = fs32; output[ 7 * ichunk + icol ] = fs42; } else if( ( fs1 > tf_threshold | fs2 > tf_threshold) & pvec[ id * pvec_max + ipos] != 2 ){ output[ 6 * ichunk + icol ] = fs32; output[ 7 * ichunk + icol ] = fs42; } if ( ( fs33 >= tf_threshold | fs43 >= tf_threshold) & pvec[ id * pvec_max + ipos] != 3 ){ output[ icol ] = output[ icol ] + 100; output[ 8 * ichunk + icol ] = fs33; output[ 9 * ichunk + icol ] = fs43; } else if( ( fs1 > tf_threshold | fs2 > tf_threshold) & pvec[ id * pvec_max + ipos] != 3 ){ output[ 8 * ichunk + icol ] = fs33; output[ 9 * ichunk + icol ] = fs43; } if ( ( fs34 >= tf_threshold | fs44 >= tf_threshold) & pvec[ id * pvec_max + ipos] != 4 ){ output[ icol ] = output[ icol ] + 1000; output[ 10 * ichunk + icol ] = fs34; output[ 11 * ichunk + icol ] = fs44; } else if( ( fs1 > tf_threshold | fs2 > tf_threshold) & pvec[ id * pvec_max + ipos] != 4 ){ output[ 10 * ichunk + icol ] = fs34; output[ 11 * ichunk + icol ] = fs44; } } } } void tfbs_cuda( char *pvec, // promoter int *pvec_len, int pvec_max, float *pwm, int pwm_len, float tf_threshold, float *bg, int N, float *output) {// output matrix char *d_pvec; int *d_pvec_len; float *d_output; float *d_pwm; float *d_bg; //printf("Allocating GPU memory for pvec_len\n"); gpuErrchk( cudaMalloc( (void**)&d_pvec_len, N * sizeof( int)) ); //printf("Copying GPU memory for pvec_len\n"); gpuErrchk( cudaMemcpy( d_pvec_len, pvec_len, N * sizeof( int), cudaMemcpyHostToDevice ) ); //printf("Allocating GPU memory for pvec\n"); gpuErrchk( cudaMalloc( (void**) &d_pvec, N * pvec_max * sizeof( char)) ); //printf("Copying GPU memory for pvec\n"); gpuErrchk( cudaMemcpy( d_pvec, pvec, N * pvec_max * sizeof( char), cudaMemcpyHostToDevice ) ); //printf("Allocating GPU memory for pwm\n"); gpuErrchk( cudaMalloc( (void**)&d_pwm, 4 * pwm_len * sizeof(float)) ); //printf("Copying GPU memory for pwm\n"); gpuErrchk( cudaMemcpy( d_pwm, pwm, 4 * pwm_len * sizeof(float), cudaMemcpyHostToDevice ) ); //printf("Allocating GPU memory for bg\n"); gpuErrchk( cudaMalloc( (void**)&d_bg, 4 * sizeof(float)) ); //printf("Copying GPU memory for bg\n"); gpuErrchk( cudaMemcpy( d_bg, bg, 4 * sizeof(float), cudaMemcpyHostToDevice ) ); //printf("Allocating GPU memory for result\n"); gpuErrchk( cudaMalloc( (void**)&d_output, N * 12 * (pvec_max -58 ) * sizeof(float) ) ); //printf("Before kernel\n"); dim3 dimBlock( 32, 1 ); dim3 dimGrid( N/32 + 1, 1 ); //printf("Calling CUDA kernel\n"); tfbs_kernel<<<dimGrid,dimBlock>>>(N, pvec_max, d_pvec, d_pvec_len, d_pwm, pwm_len, tf_threshold, d_bg, d_output); gpuErrchk( cudaPeekAtLastError() ); //cudaDeviceSynchronize(); //printf("After CUDA kernel\n"); gpuErrchk( cudaMemcpy( output, d_output, N * 12 * (pvec_max -58 ) * sizeof(float), cudaMemcpyDeviceToHost ) ); //printf("Free GPU memory \n"); cudaFree( d_pvec ); cudaFree( d_pvec_len ); cudaFree( d_output ); cudaFree( d_pwm ); cudaFree( d_bg ); return; } int main( int argc, char *argv[] ){ char filename[1024]; int taskID; float bg[] = {0.25, 0.25, 0.25, 0.25}; FILE *ifp, *ofp; char in_line[MAX_LINE_LEN]; char *token; int chrom[PROMOTER_LEN]; int istart[PROMOTER_LEN]; int i, j, jj, i1, i2, j1, j2, icurr; float score_threshold[SCORE_LEN]; char **score_files; float pwm[30 * 4]; int pwm_length; int any0; float sum; char *pnvec; int *pnvec_len; float *result; int ires; /* command line processing */ taskID = atoi(argv[1]); printf("Input file name: %s\n", argv[2]); printf("tf.info file name: %s\n", argv[3]); printf("pwm directory: %s\n", argv[4]); sprintf(filename,"./%s/%s_%03d.txt", argv[5], argv[6], taskID); printf("Output file name: %s\n", filename); /* allocate memory to hold char arrays */ //sequence = (char **) malloc( PROMOTER_LEN * sizeof(char *) ); score_files = (char **) malloc( SCORE_LEN * sizeof(char *) ); result = (float *) malloc( Nchunk * 12 * ( PNVEC_MAX - 58 ) * sizeof(float) ); pnvec_len = ( int * ) malloc ( PROMOTER_LEN * sizeof( int ) ); if ( pnvec_len == 0 ) { fprintf( stderr, "ERROR allocating pnvec_len: Out of memory\n"); exit(1); } //printf("Allocating memory for pnvec\n"); pnvec = ( char * ) malloc ( PROMOTER_LEN * PNVEC_MAX * sizeof( char) ); if ( pnvec == 0 ) { fprintf( stderr, "ERROR allocating pnvec: Out of memory\n"); exit(2); } printf("Reading Input files\n"); /* read input file line by line (only 1st, 2nd and 8th columns) */ ifp = fopen(argv[2], "r"); fgets(in_line, MAX_LINE_LEN, ifp); //skip header line i=0; while( fgets(in_line, MAX_LINE_LEN, ifp )!= NULL ){ token = strtok( in_line, DELIM); sscanf( token,"chr%d", &(chrom[i])); token = strtok( NULL, DELIM); sscanf( token,"%d", istart + i ); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); token = strtok( NULL, DELIM); //if(i > 14655) printf(" start processing letter\n"); pnvec_len[i] = strlen(token); for (j = 0; j < pnvec_len[i] ; j++) { //if (i == 14660)printf("%c",token[j]); switch( token[j] ){ case 'A': pnvec[ i * PNVEC_MAX + j ]=1; break; case 'a': pnvec[ i * PNVEC_MAX + j ]=1; break; case 'C': pnvec[ i * PNVEC_MAX + j ]=2; break; case 'c': pnvec[ i * PNVEC_MAX + j ]=2; break; case 'G': pnvec[ i * PNVEC_MAX + j ]=3; break; case 'g': pnvec[ i * PNVEC_MAX + j ]=3; break; case 'T': pnvec[ i * PNVEC_MAX + j ]=4; break; case 't': pnvec[ i * PNVEC_MAX + j ]=4; break; default: pnvec[ i * PNVEC_MAX + j ]=0; break; } } i++; //if ( i > 14600) { printf("i=%d\n",i);} } fclose(ifp); printf(" Read %d lines from the input file\n", i); /* Read tf.info file */ ifp = fopen(argv[3], "r"); i=0; while( fgets(in_line, MAX_LINE_LEN, ifp ) ){ token = strtok( in_line, DELIM); score_files[i] = (char *) malloc ( (strlen(token) + 1 ) * sizeof(char )); strcpy( score_files[i], token ); token = strtok( NULL, DELIM); score_threshold[i] = atof(token); i++; } fclose(ifp); printf(" Read %d lines from %s file\n", i, argv[3]); /* process chunks */ i1 = (taskID - 1) * 100 + 1; // was 10 originally i2 = taskID * 100; if ( i2 > SCORE_LEN ) i2 = SCORE_LEN; /* open output file */ ofp = fopen(filename,"w"); if (ofp == NULL) { fprintf( stderr, " Can't open output file\n"); exit(3); } for ( icurr = i1; icurr <= i2; icurr++){ printf(" icurr =%d\n", icurr); sprintf( filename, "./%s/%s\0", argv[4], score_files[icurr-1] ); ifp = fopen( filename , "r"); fgets(in_line, MAX_LINE_LEN, ifp ); // skip first line i = 0; any0 = 0; while( fgets(in_line, MAX_LINE_LEN, ifp ) ){ token = strtok( in_line, DELIM); //skip first value token = strtok( NULL, DELIM); pwm[i*4 + 0] = atof(token); if ( !strcmp(token, "0.0") ) any0=1; token = strtok( NULL, DELIM); pwm[i*4 + 1] = atof(token); if ( !strcmp(token, "0.0") ) any0=1; token = strtok( NULL, DELIM); pwm[i*4 + 2] = atof(token); if ( !strcmp(token, "0.0") ) any0=1; token = strtok( NULL, DELIM); pwm[i*4 + 3] = atof(token); if ( !strcmp(token, "0.0\n") ) any0=1; i++; } fclose(ifp); pwm_length = i; printf(" Read %d lines from %s file\n", i, score_files[icurr-1]); /* part of create_pwm function */ if ( any0 ) { for ( j = 0; j < i; j++ ){ sum = pwm[ j*4 + 0] + pwm[ j*4 + 1] + pwm[ j*4 + 2] + pwm[ j*4 + 3] + 0.001 * 4; pwm[ j*4 + 0] = (pwm[ j*4 + 0] + 0.001)/sum; pwm[ j*4 + 1] = (pwm[ j*4 + 1] + 0.001)/sum; pwm[ j*4 + 2] = (pwm[ j*4 + 2] + 0.001)/sum; pwm[ j*4 + 3] = (pwm[ j*4 + 3] + 0.001)/sum; } } /* inner loop in R*/ for ( j = 1; j < 12; j++ ){ j1 = (j - 1) * Nchunk + 1; j2 = j * Nchunk; if ( j2 > PROMOTER_LEN )j2 = PROMOTER_LEN; int n = j2 - j1 + 1; printf(" j = %d through %d; threshold = %f\n", j1, j2, score_threshold[icurr - 1]); tfbs_cuda (pnvec + (j1 -1) * PNVEC_MAX, pnvec_len + j1 -1, PNVEC_MAX, pwm, pwm_length, score_threshold[icurr - 1], bg, n, result); fflush(stdout); /* save result in the output file */ for (i = 0; i < n * ( PNVEC_MAX - 58 ); i++){ ires = (int) result [ i ]; int in = i/( PNVEC_MAX - 58 ); //printf("%d ",i); if (ires > 0 || (score_threshold[ icurr - 1] < result[ i + ( PNVEC_MAX - 58 ) * 2 * n] )|| (score_threshold[icurr - 1] < result[i + (PNVEC_MAX - 58 ) * 3 * n])) { fprintf(ofp,"%d ", chrom [ in ]); //unsigned int ipos = result [ i + ( PNVEC_MAX - 58 ) * n * 1] + istart[ j1 - 1 + i/( PNVEC_MAX - 58 ) ]; unsigned int ipos = 30 + i%( PNVEC_MAX - 58 ) + istart[ j1 - 1 + in ]; fprintf(ofp,"%d %d ", ipos, ipos + 1); fprintf(ofp,"%d %d ", j1 + in, icurr); fprintf(ofp,"%d ", ires ); // use %f.3 for printing results with 3 digits for ( jj = 2; jj < 11; jj++ )fprintf(ofp,"%.3f ", result [ i + ( PNVEC_MAX - 58 ) * jj * n] ); fprintf(ofp,"%.3f\n", result [ i + ( PNVEC_MAX - 58 ) * 11 * n] ); } } } // end of j loop } // end of icurr loop fclose(ofp); exit(0); }
26724dd8dc28844403365e2f2ebcd56ff106943c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdlib.h> #include <stdio.h> #include "unistd.h" #include "time.h" #include "string.h" // Stores output value computed in inner loop for each thread __shared__ float localvalue[4008]; // Stores temporary shift values __constant__ float dm_shifts[1024]; // -------------------------- The Dedispersion Loop ----------------------------------- __global__ void dedisperse_loop(float *outuff, float *buff, int nsamp, int nchans, float tsamp, int chanfactor, float startdm, float dmstep, int inshift, int outshift) { int samp, s, c, indx, soffset; float shift_temp; /* dedispersing loop over all samples in this buffer */ s = threadIdx.x + blockIdx.x * blockDim.x; shift_temp = (startdm + blockIdx.y * dmstep) / tsamp; for (samp = 0; s + samp < nsamp; samp += blockDim.x * gridDim.x) { soffset = (s + samp); /* clear array element for storing dedispersed subband */ localvalue[threadIdx.x] = 0.0; /* loop over the channels */ for (c = 0; c < nchans; c ++) { indx = (soffset + (int)(dm_shifts[c * chanfactor] * shift_temp)) * nchans + c; localvalue[threadIdx.x] += buff[inshift + indx]; } outuff[outshift + blockIdx.y * nsamp + soffset] = localvalue[threadIdx.x]; } } // -------------------------- Main Program ----------------------------------- float fch1 = 126, foff = -6, tsamp = 5e-6, dmstep = 0.065, startdm = 0; int nchans = 128, nsamp = 1024, tdms = 128; int gridsize = 128, blocksize = 128; // Process command-line parameters void process_arguments(int argc, char *argv[]) { int i = 1; while((fopen(argv[i], "r")) != NULL) i++; while(i < argc) { if (!strcmp(argv[i], "-nchans")) nchans = atoi(argv[++i]); else if (!strcmp(argv[i], "-nsamp")) nsamp = atoi(argv[++i]); else if (!strcmp(argv[i], "-dmstep")) dmstep = atof(argv[++i]); else if (!strcmp(argv[i], "-startdm")) startdm = atof(argv[++i]); else if (!strcmp(argv[i], "-tdms")) tdms = atoi(argv[++i]); else if (!strcmp(argv[i], "-gridsize")) gridsize = atoi(argv[++i]); else if (!strcmp(argv[i], "-blocksize")) blocksize = atoi(argv[++i]); i++; } foff = foff / (float) nchans; tsamp = tsamp * nchans; } // Fill buffer with data (blocking call) void generate_data(float* buffer, int nsamp, int nchans) { for(int i = 0; i < nsamp * nchans; i++) buffer[i] = 01; } // DM delay calculation float dmdelay(float f1, float f2) { return(4148.741601 * ((1.0 / f1 / f1) - (1.0 / f2 / f2))); } int main(int argc, char *argv[]) { float *input, *output, *d_input, *d_output; int maxshift; process_arguments(argc, argv); printf("nsamp: %d, nchans: %d, tsamp: %f, startdm: %f, dmstep: %f, tdms: %d, fch1: %f, foff: %f\n", nsamp, nchans, tsamp, startdm, dmstep, tdms, fch1, foff); // Calculate temporary DM-shifts float *dmshifts = (float *) malloc(nchans * sizeof(float)); for (unsigned i = 0; i < nchans; i++) dmshifts[i] = dmdelay(fch1 + (foff * i), fch1); // Calculate maxshift maxshift = dmshifts[nchans - 1] * (startdm + dmstep * tdms) / tsamp; // Initialise input buffer input = (float *) malloc( (nsamp + maxshift) * nchans * sizeof(float)); output = (float *) malloc( nsamp * tdms * sizeof(float)); // Allocate arrays input = (float *) malloc( (nsamp + maxshift) * nchans * sizeof(float)); output = (float *) malloc( nsamp * tdms * sizeof(float)); // Initialise CUDA stuff ( hipSetDevice(0)); hipEvent_t event_start, event_stop; float timestamp, kernelTime; dim3 gridDim(gridsize, tdms); hipEventCreate(&event_start); hipEventCreate(&event_stop); // Allocate CUDA memory and copy dmshifts ( hipMalloc((void **) &d_input, (nsamp + maxshift) * nchans * sizeof(float))); ( hipMalloc((void **) &d_output, nsamp * tdms * sizeof(float))); ( hipMemset(d_output, 0, nsamp * tdms * sizeof(float))); ( hipMemcpyToSymbol(dm_shifts, dmshifts, nchans * sizeof(int)) ); time_t start = time(NULL); // Copy input to GPU hipEventRecord(event_start, 0); ( hipMemcpy(d_input, input, (nsamp + maxshift) * nchans * sizeof(float), hipMemcpyHostToDevice) ); hipEventRecord(event_stop, 0); hipEventSynchronize(event_stop); hipEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied to GPU in: %lf\n", timestamp); // Dedisperse hipEventRecord(event_start, 0); hipLaunchKernelGGL(( dedisperse_loop), dim3(gridDim), dim3(blocksize) , 0, 0, d_output, d_input, nsamp, nchans, tsamp, 1, startdm, dmstep, 0, 0); hipEventRecord(event_stop, 0); hipEventSynchronize(event_stop); hipEventElapsedTime(&timestamp, event_start, event_stop); printf("Processed in: %lf\n", timestamp); kernelTime = timestamp; // Copy output from GPU hipEventRecord(event_start, 0); ( hipMemcpy(output, d_output, nsamp * tdms * sizeof(float), hipMemcpyDeviceToHost) ); hipEventRecord(event_stop, 0); hipEventSynchronize(event_stop); hipEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied from GPU in: %lf\n", timestamp); printf("Total time: %d\n", (int) (time(NULL) - start)); printf("Performance: %lf Gflops\n", (nchans * tdms) * (nsamp * 1.0 / kernelTime / 1.0e6)); }
26724dd8dc28844403365e2f2ebcd56ff106943c.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include "unistd.h" #include "time.h" #include "string.h" // Stores output value computed in inner loop for each thread __shared__ float localvalue[4008]; // Stores temporary shift values __constant__ float dm_shifts[1024]; // -------------------------- The Dedispersion Loop ----------------------------------- __global__ void dedisperse_loop(float *outuff, float *buff, int nsamp, int nchans, float tsamp, int chanfactor, float startdm, float dmstep, int inshift, int outshift) { int samp, s, c, indx, soffset; float shift_temp; /* dedispersing loop over all samples in this buffer */ s = threadIdx.x + blockIdx.x * blockDim.x; shift_temp = (startdm + blockIdx.y * dmstep) / tsamp; for (samp = 0; s + samp < nsamp; samp += blockDim.x * gridDim.x) { soffset = (s + samp); /* clear array element for storing dedispersed subband */ localvalue[threadIdx.x] = 0.0; /* loop over the channels */ for (c = 0; c < nchans; c ++) { indx = (soffset + (int)(dm_shifts[c * chanfactor] * shift_temp)) * nchans + c; localvalue[threadIdx.x] += buff[inshift + indx]; } outuff[outshift + blockIdx.y * nsamp + soffset] = localvalue[threadIdx.x]; } } // -------------------------- Main Program ----------------------------------- float fch1 = 126, foff = -6, tsamp = 5e-6, dmstep = 0.065, startdm = 0; int nchans = 128, nsamp = 1024, tdms = 128; int gridsize = 128, blocksize = 128; // Process command-line parameters void process_arguments(int argc, char *argv[]) { int i = 1; while((fopen(argv[i], "r")) != NULL) i++; while(i < argc) { if (!strcmp(argv[i], "-nchans")) nchans = atoi(argv[++i]); else if (!strcmp(argv[i], "-nsamp")) nsamp = atoi(argv[++i]); else if (!strcmp(argv[i], "-dmstep")) dmstep = atof(argv[++i]); else if (!strcmp(argv[i], "-startdm")) startdm = atof(argv[++i]); else if (!strcmp(argv[i], "-tdms")) tdms = atoi(argv[++i]); else if (!strcmp(argv[i], "-gridsize")) gridsize = atoi(argv[++i]); else if (!strcmp(argv[i], "-blocksize")) blocksize = atoi(argv[++i]); i++; } foff = foff / (float) nchans; tsamp = tsamp * nchans; } // Fill buffer with data (blocking call) void generate_data(float* buffer, int nsamp, int nchans) { for(int i = 0; i < nsamp * nchans; i++) buffer[i] = 01; } // DM delay calculation float dmdelay(float f1, float f2) { return(4148.741601 * ((1.0 / f1 / f1) - (1.0 / f2 / f2))); } int main(int argc, char *argv[]) { float *input, *output, *d_input, *d_output; int maxshift; process_arguments(argc, argv); printf("nsamp: %d, nchans: %d, tsamp: %f, startdm: %f, dmstep: %f, tdms: %d, fch1: %f, foff: %f\n", nsamp, nchans, tsamp, startdm, dmstep, tdms, fch1, foff); // Calculate temporary DM-shifts float *dmshifts = (float *) malloc(nchans * sizeof(float)); for (unsigned i = 0; i < nchans; i++) dmshifts[i] = dmdelay(fch1 + (foff * i), fch1); // Calculate maxshift maxshift = dmshifts[nchans - 1] * (startdm + dmstep * tdms) / tsamp; // Initialise input buffer input = (float *) malloc( (nsamp + maxshift) * nchans * sizeof(float)); output = (float *) malloc( nsamp * tdms * sizeof(float)); // Allocate arrays input = (float *) malloc( (nsamp + maxshift) * nchans * sizeof(float)); output = (float *) malloc( nsamp * tdms * sizeof(float)); // Initialise CUDA stuff ( cudaSetDevice(0)); cudaEvent_t event_start, event_stop; float timestamp, kernelTime; dim3 gridDim(gridsize, tdms); cudaEventCreate(&event_start); cudaEventCreate(&event_stop); // Allocate CUDA memory and copy dmshifts ( cudaMalloc((void **) &d_input, (nsamp + maxshift) * nchans * sizeof(float))); ( cudaMalloc((void **) &d_output, nsamp * tdms * sizeof(float))); ( cudaMemset(d_output, 0, nsamp * tdms * sizeof(float))); ( cudaMemcpyToSymbol(dm_shifts, dmshifts, nchans * sizeof(int)) ); time_t start = time(NULL); // Copy input to GPU cudaEventRecord(event_start, 0); ( cudaMemcpy(d_input, input, (nsamp + maxshift) * nchans * sizeof(float), cudaMemcpyHostToDevice) ); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied to GPU in: %lf\n", timestamp); // Dedisperse cudaEventRecord(event_start, 0); dedisperse_loop<<<gridDim, blocksize >>>(d_output, d_input, nsamp, nchans, tsamp, 1, startdm, dmstep, 0, 0); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Processed in: %lf\n", timestamp); kernelTime = timestamp; // Copy output from GPU cudaEventRecord(event_start, 0); ( cudaMemcpy(output, d_output, nsamp * tdms * sizeof(float), cudaMemcpyDeviceToHost) ); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied from GPU in: %lf\n", timestamp); printf("Total time: %d\n", (int) (time(NULL) - start)); printf("Performance: %lf Gflops\n", (nchans * tdms) * (nsamp * 1.0 / kernelTime / 1.0e6)); }
fa4388b7bd8818ac1256ad9101c0a5d4f9376a64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * CuDNNBatchNorm Layer * * Created on: March 1, 2019 * Author: hujie */ #ifdef USE_CUDNN #include "caffe/layers/cudnn_batch_norm_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void CuDNNBatchNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* scale_data = this->blobs_[0]->gpu_data(); const Dtype* bias_data = this->blobs_[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); double epsilon = max(this->eps_, CUDNN_BN_MIN_EPSILON); if (this->phase_ == TEST || this->frozen_) { CUDNN_CHECK(cudnnBatchNormalizationForwardInference( this->handle_, CUDNN_BATCHNORM_SPATIAL, cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero, bottom_desc_, bottom_data, bottom_desc_, top_data, scale_bias_mean_var_desc_, scale_data, bias_data, this->blobs_[2]->gpu_data(), this->blobs_[3]->gpu_data(), epsilon)); } else { Dtype* save_mean = save_mean_.mutable_gpu_data(); Dtype* save_inv_var = save_inv_var_.mutable_gpu_data(); if (inplace_) { caffe_copy(bottom[0]->count(), bottom_data, input_dup_.mutable_gpu_data()); } CUDNN_CHECK(cudnnBatchNormalizationForwardTraining( this->handle_, mode_, cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero, bottom_desc_, bottom_data, bottom_desc_, top_data, scale_bias_mean_var_desc_, scale_data, bias_data, 1 - this->momentum_, this->blobs_[2]->mutable_gpu_data(), this->blobs_[3]->mutable_gpu_data(), epsilon, save_mean, save_inv_var)); } } template <typename Dtype> __global__ void combine_scale(const int count, const double eps, const Dtype* scale, const Dtype* var, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = scale[index] / sqrt(var[index] + eps); } } template <typename Dtype> __global__ void cudnnBatchNormalizationBackwardFrozen(const int count, const int c, const int dim, const Dtype* scale, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, count) { const int id = index / dim % c; bottom_diff[index] = scale[id] * top_diff[index]; } } template <typename Dtype> void CuDNNBatchNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = inplace_? input_dup_.gpu_data() : bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* scale_data = this->blobs_[0]->gpu_data(); double epsilon = max(this->eps_, CUDNN_BN_MIN_EPSILON); if (frozen_) { if (!combined_) { const int count = this->blobs_[0]->count(); hipLaunchKernelGGL(( combine_scale<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, epsilon, scale_data, this->blobs_[3]->gpu_data(), combined_scale_.mutable_gpu_data()); combined_ = true; } const int count = bottom[0]->count(); hipLaunchKernelGGL(( cudnnBatchNormalizationBackwardFrozen<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->shape(1), bottom[0]->count(2), combined_scale_.gpu_data(), top_diff, bottom_diff); } else { const Dtype* save_mean = save_mean_.gpu_data(); const Dtype* save_inv_var = save_inv_var_.gpu_data(); Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff(); Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); CUDNN_CHECK(cudnnBatchNormalizationBackward( this->handle_, mode_, cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero, #if CUDNN_VERSION >= 4005 cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one, #endif bottom_desc_, bottom_data, bottom_desc_, top_diff, bottom_desc_, bottom_diff, scale_bias_mean_var_desc_, scale_data, scale_diff, bias_diff, epsilon, save_mean, save_inv_var)); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNBatchNormLayer); } // namespace caffe #endif
fa4388b7bd8818ac1256ad9101c0a5d4f9376a64.cu
/* * CuDNNBatchNorm Layer * * Created on: March 1, 2019 * Author: hujie */ #ifdef USE_CUDNN #include "caffe/layers/cudnn_batch_norm_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void CuDNNBatchNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* scale_data = this->blobs_[0]->gpu_data(); const Dtype* bias_data = this->blobs_[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); double epsilon = max(this->eps_, CUDNN_BN_MIN_EPSILON); if (this->phase_ == TEST || this->frozen_) { CUDNN_CHECK(cudnnBatchNormalizationForwardInference( this->handle_, CUDNN_BATCHNORM_SPATIAL, cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero, bottom_desc_, bottom_data, bottom_desc_, top_data, scale_bias_mean_var_desc_, scale_data, bias_data, this->blobs_[2]->gpu_data(), this->blobs_[3]->gpu_data(), epsilon)); } else { Dtype* save_mean = save_mean_.mutable_gpu_data(); Dtype* save_inv_var = save_inv_var_.mutable_gpu_data(); if (inplace_) { caffe_copy(bottom[0]->count(), bottom_data, input_dup_.mutable_gpu_data()); } CUDNN_CHECK(cudnnBatchNormalizationForwardTraining( this->handle_, mode_, cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero, bottom_desc_, bottom_data, bottom_desc_, top_data, scale_bias_mean_var_desc_, scale_data, bias_data, 1 - this->momentum_, this->blobs_[2]->mutable_gpu_data(), this->blobs_[3]->mutable_gpu_data(), epsilon, save_mean, save_inv_var)); } } template <typename Dtype> __global__ void combine_scale(const int count, const double eps, const Dtype* scale, const Dtype* var, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = scale[index] / sqrt(var[index] + eps); } } template <typename Dtype> __global__ void cudnnBatchNormalizationBackwardFrozen(const int count, const int c, const int dim, const Dtype* scale, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, count) { const int id = index / dim % c; bottom_diff[index] = scale[id] * top_diff[index]; } } template <typename Dtype> void CuDNNBatchNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = inplace_? input_dup_.gpu_data() : bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* scale_data = this->blobs_[0]->gpu_data(); double epsilon = max(this->eps_, CUDNN_BN_MIN_EPSILON); if (frozen_) { if (!combined_) { const int count = this->blobs_[0]->count(); combine_scale<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, epsilon, scale_data, this->blobs_[3]->gpu_data(), combined_scale_.mutable_gpu_data()); combined_ = true; } const int count = bottom[0]->count(); cudnnBatchNormalizationBackwardFrozen<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, bottom[0]->shape(1), bottom[0]->count(2), combined_scale_.gpu_data(), top_diff, bottom_diff); } else { const Dtype* save_mean = save_mean_.gpu_data(); const Dtype* save_inv_var = save_inv_var_.gpu_data(); Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff(); Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); CUDNN_CHECK(cudnnBatchNormalizationBackward( this->handle_, mode_, cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero, #if CUDNN_VERSION >= 4005 cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one, #endif bottom_desc_, bottom_data, bottom_desc_, top_diff, bottom_desc_, bottom_diff, scale_bias_mean_var_desc_, scale_data, scale_diff, bias_diff, epsilon, save_mean, save_inv_var)); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNBatchNormLayer); } // namespace caffe #endif
565f7e27f13b46530d5ce062e81025acf61f6cf1.hip
// !!! This is a file automatically generated by hipify!!! #include "..\Prerequisites.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { char const * const errId = "GTOM:Transform:Warp2D:InvalidInput"; mxInitGPU(); if (nrhs != 3) mexErrMsgIdAndTxt(errId, "Wrong parameter count (3 expected)."); mxArrayAdapter image(prhs[0]); int3 dimsimage = MWDimsToInt3(mxGetNumberOfDimensions(image.underlyingarray), mxGetDimensions(image.underlyingarray)); uint nimages = dimsimage.z; dimsimage.z = 1; tfloat* d_image = image.GetAsManagedDeviceTFloat(); mxArrayAdapter gridx(prhs[1]); int3 dimsgridx = MWDimsToInt3(mxGetNumberOfDimensions(gridx.underlyingarray), mxGetDimensions(gridx.underlyingarray)); if (dimsgridx.z != nimages) mexErrMsgIdAndTxt(errId, "Number of warp grids should match number of images."); tfloat* h_gridx = gridx.GetAsManagedTFloat(); mxArrayAdapter gridy(prhs[2]); int3 dimsgridy = MWDimsToInt3(mxGetNumberOfDimensions(gridy.underlyingarray), mxGetDimensions(gridy.underlyingarray)); if (dimsgridy.z != nimages) mexErrMsgIdAndTxt(errId, "Number of warp grids should match number of images."); tfloat* h_gridy = gridy.GetAsManagedTFloat(); int2 dimsgrid = toInt2(dimsgridx); tfloat2* h_grid = (tfloat2*)malloc(Elements2(dimsgrid) * nimages * sizeof(tfloat2)); for (uint i = 0; i < Elements2(dimsgrid) * nimages; i++) h_grid[i] = tfloat2(h_gridx[i], h_gridy[i]); tfloat2* d_grid = (tfloat2*)CudaMallocFromHostArray(h_grid, Elements2(dimsgrid) * nimages * sizeof(tfloat2)); free(h_grid); tfloat* d_output; hipMalloc((void**)&d_output, Elements2(dimsimage) * nimages * sizeof(tfloat)); d_Warp2D(d_image, toInt2(dimsimage), d_grid, dimsgrid, d_output, nimages); mwSize outputdims[3]; outputdims[0] = dimsimage.x; outputdims[1] = dimsimage.y; outputdims[2] = nimages; mxArrayAdapter A(mxCreateNumericArray(3, outputdims, mxSINGLE_CLASS, mxREAL)); A.SetFromDeviceTFloat(d_output); plhs[0] = A.underlyingarray; hipFree(d_output); hipFree(d_grid); }
565f7e27f13b46530d5ce062e81025acf61f6cf1.cu
#include "..\Prerequisites.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { char const * const errId = "GTOM:Transform:Warp2D:InvalidInput"; mxInitGPU(); if (nrhs != 3) mexErrMsgIdAndTxt(errId, "Wrong parameter count (3 expected)."); mxArrayAdapter image(prhs[0]); int3 dimsimage = MWDimsToInt3(mxGetNumberOfDimensions(image.underlyingarray), mxGetDimensions(image.underlyingarray)); uint nimages = dimsimage.z; dimsimage.z = 1; tfloat* d_image = image.GetAsManagedDeviceTFloat(); mxArrayAdapter gridx(prhs[1]); int3 dimsgridx = MWDimsToInt3(mxGetNumberOfDimensions(gridx.underlyingarray), mxGetDimensions(gridx.underlyingarray)); if (dimsgridx.z != nimages) mexErrMsgIdAndTxt(errId, "Number of warp grids should match number of images."); tfloat* h_gridx = gridx.GetAsManagedTFloat(); mxArrayAdapter gridy(prhs[2]); int3 dimsgridy = MWDimsToInt3(mxGetNumberOfDimensions(gridy.underlyingarray), mxGetDimensions(gridy.underlyingarray)); if (dimsgridy.z != nimages) mexErrMsgIdAndTxt(errId, "Number of warp grids should match number of images."); tfloat* h_gridy = gridy.GetAsManagedTFloat(); int2 dimsgrid = toInt2(dimsgridx); tfloat2* h_grid = (tfloat2*)malloc(Elements2(dimsgrid) * nimages * sizeof(tfloat2)); for (uint i = 0; i < Elements2(dimsgrid) * nimages; i++) h_grid[i] = tfloat2(h_gridx[i], h_gridy[i]); tfloat2* d_grid = (tfloat2*)CudaMallocFromHostArray(h_grid, Elements2(dimsgrid) * nimages * sizeof(tfloat2)); free(h_grid); tfloat* d_output; cudaMalloc((void**)&d_output, Elements2(dimsimage) * nimages * sizeof(tfloat)); d_Warp2D(d_image, toInt2(dimsimage), d_grid, dimsgrid, d_output, nimages); mwSize outputdims[3]; outputdims[0] = dimsimage.x; outputdims[1] = dimsimage.y; outputdims[2] = nimages; mxArrayAdapter A(mxCreateNumericArray(3, outputdims, mxSINGLE_CLASS, mxREAL)); A.SetFromDeviceTFloat(d_output); plhs[0] = A.underlyingarray; cudaFree(d_output); cudaFree(d_grid); }
e2516b4d7fe68d77a91ea75433e3a488529fad84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <stdio.h> #include <math.h> #include <float.h> #include <vector> #include "corr_proj.h" #include <stdio.h> #define ROUND_OFF 50000 #define CUDA_NUM_THREADS 1024 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) #define GET_BLOCKS(n, t) (n+t-1) / t // == Dimension rearrangement Kernel // == Correlation Kernel template <typename scalar_t> __global__ void CorrelateData(torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input1, //query t: N, H, W, C1 torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> input2, // scene : N, L, H, W, C1 torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> query_coords,// query coords: N, 3, H, W torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> scene_coords, // scene coords: N, L, 3, H, W torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> scene_P, //projection matrix: N, L, 3, 4 torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> corr1, // corr1 : N, M, H, W torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> return_coords, // coords : N, M, 3, H, W torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> mask, // corr1 : N, M, H, W const int neighborhood_grid_radius, const int neighborhood_grid_width, int stride){ extern __shared__ char patch_data_char[]; scalar_t *feat1_data = (scalar_t *)patch_data_char; int x1 = blockIdx.x; int y1 = blockIdx.y; int item = blockIdx.z; int ch_off = threadIdx.x; for(int ch = ch_off; ch < input1.size(3); ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS feat1_data[ch] = input1[item][y1][x1][ch]; } __syncthreads(); __shared__ scalar_t sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; scalar_t x_3d=query_coords[item][0][y1][x1]; scalar_t y_3d=query_coords[item][1][y1][x1]; scalar_t z_3d=query_coords[item][2][y1][x1]; for(int l=0;l<input2.size(1);l++){ scalar_t center_x3d=scene_P[item][l][0][0]*x_3d+scene_P[item][l][0][1]*y_3d+scene_P[item][l][0][2]*z_3d+scene_P[item][l][0][3]; scalar_t center_y3d=scene_P[item][l][1][0]*x_3d+scene_P[item][l][1][1]*y_3d+scene_P[item][l][1][2]*z_3d+scene_P[item][l][1][3]; scalar_t center_z3d=scene_P[item][l][2][0]*x_3d+scene_P[item][l][2][1]*y_3d+scene_P[item][l][2][2]*z_3d+scene_P[item][l][2][3]; int center_x=round(center_x3d/(center_z3d+1e-5)); int center_y=round(center_y3d/(center_z3d+1e-5)); int min_x = center_x -neighborhood_grid_radius* stride; int max_x = center_x + neighborhood_grid_radius * stride; int min_y = center_y - neighborhood_grid_radius * stride; int max_y = center_y + neighborhood_grid_radius * stride; min_x = min_x>=0?min_x:min_x+((-min_x-1)/stride+1)*stride; max_x = max_x<input2.size(3)?max_x:(input2.size(3)-1); min_y = min_y>=0?min_y:min_y+((-min_y-1)/stride+1)*stride; max_y = max_y<input2.size(2)?max_y:(input2.size(2)-1); for(int y2 = min_y; y2 <= max_y; y2+=stride){ for(int x2 = min_x; x2 <= max_x; x2+=stride){ sum[ch_off]=0; for(int ch = ch_off; ch < input1.size(3); ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { sum[ch_off]+=feat1_data[ch]*input2[item][l][y2][x2][ch]; } if(ch_off==0){ scalar_t total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } int m = (((y2-center_y)/stride+neighborhood_grid_radius)*neighborhood_grid_width+(x2-center_x)/stride+neighborhood_grid_radius)*input2.size(1)+l; corr1[item][m][y1][x1]=total_sum; mask[item][m][y1][x1]=1; for(int i=0;i<3;i++){ return_coords[item][m][i][y1][x1]=scene_coords[item][l][i][y2][x2]; } } } } } // Aggregate } std::vector<torch::Tensor> CorrelateData_ongpu(torch::Tensor input1, //query t: N, H, W, C1 torch::Tensor input2, //scene : N, L, H, W, C1 torch::Tensor query_coords, // scene coords: N, 3, H, W torch::Tensor scene_coords, // scene coords: N, L, 3, H, W torch::Tensor scene_P, //sceneTcw*K: N, L, 3, 4 int max_displacement, int stride){ const auto N = input1.size(0); const auto H = input1.size(1); const auto W = input1.size(2); const auto C1 = input1.size(3); const auto L = input2.size(1); const int neighborhood_grid_radius_ = max_displacement/stride; const int neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1; auto corr1 = torch::zeros({N, L*neighborhood_grid_width_*neighborhood_grid_width_, H, W},torch::device(torch::kCUDA)); auto return_coords = torch::zeros({N, L*neighborhood_grid_width_*neighborhood_grid_width_, 3, H, W},torch::device(torch::kCUDA)); auto mask = torch::zeros({N, L*neighborhood_grid_width_*neighborhood_grid_width_, H, W},torch::device(torch::kCUDA)); int shared_memory_per_block = C1; dim3 totalBlocksCorr(W, H, N); dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); AT_DISPATCH_FLOATING_TYPES(input1.type(), "CorrelateData_ongpu", ([&] { hipLaunchKernelGGL(( CorrelateData<scalar_t>), dim3(totalBlocksCorr), dim3(threadsPerBlock), shared_memory_per_block * sizeof(scalar_t), 0, input1.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input2.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), query_coords.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), scene_coords.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), scene_P.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), corr1.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), return_coords.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), mask.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), neighborhood_grid_radius_, neighborhood_grid_width_, stride); })); return {corr1, return_coords, mask}; } template <typename scalar_t> __global__ void CorrelateDataBackward( torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> corr_grad, //corr grad: N, M, H, W torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input1, //query t: N, H, W, C1 torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> input2, // scene : N, L, H, W, C1 torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> query_coords,// query coords: N, 3, H, W torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> scene_P, //projection matrix: N, L, 3, 4 torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input1_grad, //N, H, W, C1 torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> input2_grad, //N, L, H, W, C1 const int neighborhood_grid_radius, const int neighborhood_grid_width, int stride, int item){ extern __shared__ char patch_data_char[]; int x1 = blockIdx.x; int y1 = blockIdx.y; int c = blockIdx.z; int ch_off = threadIdx.x; __shared__ scalar_t sum[THREADS_PER_WARP]; sum[ch_off]=0; scalar_t x_3d=query_coords[item][0][y1][x1]; scalar_t y_3d=query_coords[item][1][y1][x1]; scalar_t z_3d=query_coords[item][2][y1][x1]; for(int m=ch_off;m<corr_grad.size(1);m+=THREADS_PER_WARP){ int l=m%input2.size(1); scalar_t center_x3d=scene_P[item][l][0][0]*x_3d+scene_P[item][l][0][1]*y_3d+scene_P[item][l][0][2]*z_3d+scene_P[item][l][0][3]; scalar_t center_y3d=scene_P[item][l][1][0]*x_3d+scene_P[item][l][1][1]*y_3d+scene_P[item][l][1][2]*z_3d+scene_P[item][l][1][3]; scalar_t center_z3d=scene_P[item][l][2][0]*x_3d+scene_P[item][l][2][1]*y_3d+scene_P[item][l][2][2]*z_3d+scene_P[item][l][2][3]; int center_x=round(center_x3d/(center_z3d+1e-5)); int center_y=round(center_y3d/(center_z3d+1e-5)); int x2=(m/input2.size(1)%neighborhood_grid_width-neighborhood_grid_radius)*stride+center_x; int y2=(m/input2.size(1)/neighborhood_grid_width-neighborhood_grid_radius)*stride+center_y; if(y2>=0&&y2<input2.size(2)&&x2>=0&&x2<input2.size(3)){ sum[ch_off] += input2[item][l][y2][x2][c]*corr_grad[item][m][y1][x1]; atomicAdd(&input2_grad[item][l][y2][x2][c], corr_grad[item][m][y1][x1]*input1[item][y1][x1][c]); } } if(ch_off==0){ scalar_t total_sum = 0; for(int idx = 0; idx < THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } input1_grad[item][y1][x1][c]=total_sum; } // Aggregate } std::vector<torch::Tensor> CorrelateData_backward_ongpu(torch::Tensor grad_output1, //query t: N, H, W, C1 torch::Tensor input1, //query : N, H, W, C1 torch::Tensor input2, // scene : N, L, H, W, C1 torch::Tensor query_coords, // query coords: N, L, 3, H, W torch::Tensor scene_P, //sceneTcw*K: N, L, 3, 4 int max_displacement, int stride){ const auto N = input1.size(0); const auto H = input1.size(1); const auto W = input1.size(2); const auto C1 = input1.size(3); const auto L = input2.size(1); const auto SH = input2.size(2); const auto SW = input2.size(3); const int neighborhood_grid_radius_ = max_displacement; const int neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1; auto input1_grad = torch::zeros({N, H, W, C1},torch::device(torch::kCUDA)); auto input2_grad = torch::zeros({N, L, SH, SW, C1},torch::device(torch::kCUDA)); int shared_memory_per_block = C1; dim3 totalBlocksCorr(W, H, C1); dim3 threadsPerBlock(THREADS_PER_WARP); for(int n=0;n<N;n++){ AT_DISPATCH_FLOATING_TYPES(input1.type(), "CorrelateDatabackward_ongpu", ([&] { hipLaunchKernelGGL(( CorrelateDataBackward<scalar_t>), dim3(totalBlocksCorr), dim3(threadsPerBlock), shared_memory_per_block * sizeof(scalar_t), 0, grad_output1.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input1.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input2.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), query_coords.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), scene_P.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input1_grad.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input2_grad.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), neighborhood_grid_radius_, neighborhood_grid_width_, stride, n); })); }; // return {input1_grad, input2_grad}; }
e2516b4d7fe68d77a91ea75433e3a488529fad84.cu
#include <vector> #include <stdio.h> #include <math.h> #include <float.h> #include <vector> #include "corr_proj.h" #include <stdio.h> #define ROUND_OFF 50000 #define CUDA_NUM_THREADS 1024 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) #define GET_BLOCKS(n, t) (n+t-1) / t // == Dimension rearrangement Kernel // == Correlation Kernel template <typename scalar_t> __global__ void CorrelateData(torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input1, //query t: N, H, W, C1 torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> input2, // scene : N, L, H, W, C1 torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> query_coords,// query coords: N, 3, H, W torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> scene_coords, // scene coords: N, L, 3, H, W torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> scene_P, //projection matrix: N, L, 3, 4 torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> corr1, // corr1 : N, M, H, W torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> return_coords, // coords : N, M, 3, H, W torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> mask, // corr1 : N, M, H, W const int neighborhood_grid_radius, const int neighborhood_grid_width, int stride){ extern __shared__ char patch_data_char[]; scalar_t *feat1_data = (scalar_t *)patch_data_char; int x1 = blockIdx.x; int y1 = blockIdx.y; int item = blockIdx.z; int ch_off = threadIdx.x; for(int ch = ch_off; ch < input1.size(3); ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS feat1_data[ch] = input1[item][y1][x1][ch]; } __syncthreads(); __shared__ scalar_t sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; scalar_t x_3d=query_coords[item][0][y1][x1]; scalar_t y_3d=query_coords[item][1][y1][x1]; scalar_t z_3d=query_coords[item][2][y1][x1]; for(int l=0;l<input2.size(1);l++){ scalar_t center_x3d=scene_P[item][l][0][0]*x_3d+scene_P[item][l][0][1]*y_3d+scene_P[item][l][0][2]*z_3d+scene_P[item][l][0][3]; scalar_t center_y3d=scene_P[item][l][1][0]*x_3d+scene_P[item][l][1][1]*y_3d+scene_P[item][l][1][2]*z_3d+scene_P[item][l][1][3]; scalar_t center_z3d=scene_P[item][l][2][0]*x_3d+scene_P[item][l][2][1]*y_3d+scene_P[item][l][2][2]*z_3d+scene_P[item][l][2][3]; int center_x=round(center_x3d/(center_z3d+1e-5)); int center_y=round(center_y3d/(center_z3d+1e-5)); int min_x = center_x -neighborhood_grid_radius* stride; int max_x = center_x + neighborhood_grid_radius * stride; int min_y = center_y - neighborhood_grid_radius * stride; int max_y = center_y + neighborhood_grid_radius * stride; min_x = min_x>=0?min_x:min_x+((-min_x-1)/stride+1)*stride; max_x = max_x<input2.size(3)?max_x:(input2.size(3)-1); min_y = min_y>=0?min_y:min_y+((-min_y-1)/stride+1)*stride; max_y = max_y<input2.size(2)?max_y:(input2.size(2)-1); for(int y2 = min_y; y2 <= max_y; y2+=stride){ for(int x2 = min_x; x2 <= max_x; x2+=stride){ sum[ch_off]=0; for(int ch = ch_off; ch < input1.size(3); ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { sum[ch_off]+=feat1_data[ch]*input2[item][l][y2][x2][ch]; } if(ch_off==0){ scalar_t total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } int m = (((y2-center_y)/stride+neighborhood_grid_radius)*neighborhood_grid_width+(x2-center_x)/stride+neighborhood_grid_radius)*input2.size(1)+l; corr1[item][m][y1][x1]=total_sum; mask[item][m][y1][x1]=1; for(int i=0;i<3;i++){ return_coords[item][m][i][y1][x1]=scene_coords[item][l][i][y2][x2]; } } } } } // Aggregate } std::vector<torch::Tensor> CorrelateData_ongpu(torch::Tensor input1, //query t: N, H, W, C1 torch::Tensor input2, //scene : N, L, H, W, C1 torch::Tensor query_coords, // scene coords: N, 3, H, W torch::Tensor scene_coords, // scene coords: N, L, 3, H, W torch::Tensor scene_P, //sceneTcw*K: N, L, 3, 4 int max_displacement, int stride){ const auto N = input1.size(0); const auto H = input1.size(1); const auto W = input1.size(2); const auto C1 = input1.size(3); const auto L = input2.size(1); const int neighborhood_grid_radius_ = max_displacement/stride; const int neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1; auto corr1 = torch::zeros({N, L*neighborhood_grid_width_*neighborhood_grid_width_, H, W},torch::device(torch::kCUDA)); auto return_coords = torch::zeros({N, L*neighborhood_grid_width_*neighborhood_grid_width_, 3, H, W},torch::device(torch::kCUDA)); auto mask = torch::zeros({N, L*neighborhood_grid_width_*neighborhood_grid_width_, H, W},torch::device(torch::kCUDA)); int shared_memory_per_block = C1; dim3 totalBlocksCorr(W, H, N); dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); AT_DISPATCH_FLOATING_TYPES(input1.type(), "CorrelateData_ongpu", ([&] { CorrelateData<scalar_t><<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(scalar_t)>>>( input1.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input2.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), query_coords.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), scene_coords.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), scene_P.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), corr1.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), return_coords.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), mask.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), neighborhood_grid_radius_, neighborhood_grid_width_, stride); })); return {corr1, return_coords, mask}; } template <typename scalar_t> __global__ void CorrelateDataBackward( torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> corr_grad, //corr grad: N, M, H, W torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input1, //query t: N, H, W, C1 torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> input2, // scene : N, L, H, W, C1 torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> query_coords,// query coords: N, 3, H, W torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> scene_P, //projection matrix: N, L, 3, 4 torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input1_grad, //N, H, W, C1 torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> input2_grad, //N, L, H, W, C1 const int neighborhood_grid_radius, const int neighborhood_grid_width, int stride, int item){ extern __shared__ char patch_data_char[]; int x1 = blockIdx.x; int y1 = blockIdx.y; int c = blockIdx.z; int ch_off = threadIdx.x; __shared__ scalar_t sum[THREADS_PER_WARP]; sum[ch_off]=0; scalar_t x_3d=query_coords[item][0][y1][x1]; scalar_t y_3d=query_coords[item][1][y1][x1]; scalar_t z_3d=query_coords[item][2][y1][x1]; for(int m=ch_off;m<corr_grad.size(1);m+=THREADS_PER_WARP){ int l=m%input2.size(1); scalar_t center_x3d=scene_P[item][l][0][0]*x_3d+scene_P[item][l][0][1]*y_3d+scene_P[item][l][0][2]*z_3d+scene_P[item][l][0][3]; scalar_t center_y3d=scene_P[item][l][1][0]*x_3d+scene_P[item][l][1][1]*y_3d+scene_P[item][l][1][2]*z_3d+scene_P[item][l][1][3]; scalar_t center_z3d=scene_P[item][l][2][0]*x_3d+scene_P[item][l][2][1]*y_3d+scene_P[item][l][2][2]*z_3d+scene_P[item][l][2][3]; int center_x=round(center_x3d/(center_z3d+1e-5)); int center_y=round(center_y3d/(center_z3d+1e-5)); int x2=(m/input2.size(1)%neighborhood_grid_width-neighborhood_grid_radius)*stride+center_x; int y2=(m/input2.size(1)/neighborhood_grid_width-neighborhood_grid_radius)*stride+center_y; if(y2>=0&&y2<input2.size(2)&&x2>=0&&x2<input2.size(3)){ sum[ch_off] += input2[item][l][y2][x2][c]*corr_grad[item][m][y1][x1]; atomicAdd(&input2_grad[item][l][y2][x2][c], corr_grad[item][m][y1][x1]*input1[item][y1][x1][c]); } } if(ch_off==0){ scalar_t total_sum = 0; for(int idx = 0; idx < THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } input1_grad[item][y1][x1][c]=total_sum; } // Aggregate } std::vector<torch::Tensor> CorrelateData_backward_ongpu(torch::Tensor grad_output1, //query t: N, H, W, C1 torch::Tensor input1, //query : N, H, W, C1 torch::Tensor input2, // scene : N, L, H, W, C1 torch::Tensor query_coords, // query coords: N, L, 3, H, W torch::Tensor scene_P, //sceneTcw*K: N, L, 3, 4 int max_displacement, int stride){ const auto N = input1.size(0); const auto H = input1.size(1); const auto W = input1.size(2); const auto C1 = input1.size(3); const auto L = input2.size(1); const auto SH = input2.size(2); const auto SW = input2.size(3); const int neighborhood_grid_radius_ = max_displacement; const int neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1; auto input1_grad = torch::zeros({N, H, W, C1},torch::device(torch::kCUDA)); auto input2_grad = torch::zeros({N, L, SH, SW, C1},torch::device(torch::kCUDA)); int shared_memory_per_block = C1; dim3 totalBlocksCorr(W, H, C1); dim3 threadsPerBlock(THREADS_PER_WARP); for(int n=0;n<N;n++){ AT_DISPATCH_FLOATING_TYPES(input1.type(), "CorrelateDatabackward_ongpu", ([&] { CorrelateDataBackward<scalar_t><<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(scalar_t)>>>( grad_output1.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input1.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input2.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), query_coords.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), scene_P.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input1_grad.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(), input2_grad.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(), neighborhood_grid_radius_, neighborhood_grid_width_, stride, n); })); }; // return {input1_grad, input2_grad}; }
7feaad21e5602fb8d14711147da52b7123f8b2aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <iostream> #include <assert.h> #include <nvmatrix_kernels.cuh> #include <nvmatrix.cuh> #include <conv_util.cuh> using namespace std; __device__ inline float square(const float a) { return a * a; } /* * blockIdx.y determines module in batches of B_Y * blockIdx.x determines filter in batches of B_X * filtersPerThread * * weights: (numModules, numColors, filterPixels, numFilters) * Not fully coalesced if B_X < 32, so use cache. */ template <int B_Y, int B_X, int filtersPerThread> __global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) { const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y; const uint filterIdx = B_X * blockIdx.x + threadIdx.x; float prod[filtersPerThread]; #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = 0; } if (moduleIdx < numModules) { weights += moduleIdx * weightsPerFilter * numFilters + filterIdx; for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] += square(weights[p * numFilters + i * B_X]); } } #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = sqrtf(prod[i]); prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f; } for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { weights[p * numFilters + i * B_X] *= prod[i]; } } } } /* * weights: (numModules, numColors, filterPixels, numFilters) */ void normalizeLocalWeights(NVMatrix& weights, int numModules, float norm) { int numFilters = weights.getNumCols(); int weightsPerFilter = weights.getNumRows() / numModules; assert(numModules * weightsPerFilter == weights.getNumRows()); assert(!weights.isTrans()); assert(weights.isContiguous()); assert(numFilters % 16 == 0); int bx = numFilters % 32 == 0 ? 32 : 16; int by = bx == 32 ? 4 : 8; int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1; dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by)); dim3 threads(bx, by); if (filtersPerThread == 4) { hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else if (filtersPerThread == 2) { hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else { hipFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<8, 16, 1>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } } } /* * Block size 4x32 * blockIdx.x determines img idx in batches of 32*imgsPerThread * blockIdx.y determines channel idx, pixel idx in batches of 4 * * threadIdx.x determins case idx * threadIdx.y determines pixel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride, const uint imgSize, const uint tgtSize, const uint startY, const uint startX) { const uint imgPixels = imgSize * imgSize; const uint tgtPixels = tgtSize * tgtSize; const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4); const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y; const uint tgtPxY = tgtPixelIdx / tgtSize; const uint tgtPxX = tgtPixelIdx % tgtSize; const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX; if (tgtPixelIdx < tgtPixels) { imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx; target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx; #pragma unroll for (uint i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) { target[i * 32] = imgs[i * 32]; } } } } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * Each thread produces (y,u,v) values for a particular (r,g,b) pixel * * The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV): * * [Y] [0.2126 0.7152 0.0722 ][R] * [U] = [-0.09991 -0.33609 0.436 ][G] * [V] [0.615 -0.55861 -0.05639][B] */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V } } } } __device__ inline float labf(const float x) { if (x > 0.0088564517f) { return __powf(x, 0.3333f); } return 7.787037f * x + 0.13793103f; } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * This proceeds in two steps. * * - First, RGB values are linearly transformed to XYZ as per * http://en.wikipedia.org/wiki/CIE_XYZ_color_space * - Second, XYZ values are nonlinearly transformed to L*a*b* as per * http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation * * Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel * * The RGB --> XYZ transform is: * * [X] [0.49 0.31 0.2 ][R] * [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G] * [Z] [0 0.01 0.99 ][B] * * NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand. * * Then X_max, Y_max, Z_max = 5.6506753. * * The range of the L* values is [0, 100]. * If the center flag is given, the range will be [-50, 50]. * */ template <int imgsPerThread, bool checkCaseBounds, bool center> __global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; const float X = (0.49f * R + 0.31f * G + 0.2f * B); const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B); const float Z = (0.01f * G + 0.99f * B); const float labX = labf(X); const float labY = labf(Y); const float labZ = labf(Z); target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L* target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a* target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b* } } } } /* * Block size 16x32. * Each block produces a 4x4 chunk of the output image. * threadIdx.y determines pixel idx in 4x4 chunk. * threadIdx.x determines case idx. * blockIdx.x determines case idx in batches of 32*imgsPerThread. * blockIdx.y determines 4x4 chunk idx, channel idx. * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize (roughly) * * This is a rather naive kernel that relies on cache for speed. But all it's doing * is basic texture manipulation, which is very local in nature, so it should be ok. * Also, it will in practice be a tiny fraction of the runtime of a large convnet. * * So that is my justification for being lazy here. */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize, const int numImages, const int imgStride, const float scale, const float centerScale) { const int numChunksX = DIVUP(tgtSize, 4); const int numChunks = numChunksX * numChunksX; const int channelIdx = blockIdx.y / numChunks; const int chunkIdx = blockIdx.y % numChunks; const int chunkIdxX = chunkIdx % numChunksX; const int chunkIdxY = chunkIdx / numChunksX; const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int imgPixels = imgSize * imgSize; const int tgtPixels = tgtSize * tgtSize; const int pxX = 4 * chunkIdxX + threadIdx.y % 4; const int pxY = 4 * chunkIdxY + threadIdx.y / 4; if (pxY < tgtSize && pxX < tgtSize) { const int pxIdx = pxY * tgtSize + pxX; imgs += channelIdx * imgPixels * imgStride + caseIdx; target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx; // This will cause slight distortions at the edges when upsampling in some cases. // But I think that's not a big deal. const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale)); const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale)); const float u = floorf(srcPxX + 1) - srcPxX; const float w = srcPxY - floorf(srcPxY); // Consider doing max(0, min(imgSize, x)) here const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left const int srcPx1 = srcPx0 + 1; // top-right const int srcPx2 = srcPx0 + imgSize; // bottom-left const int srcPx3 = srcPx2 + 1; // bottom-right #pragma unroll for (int c = 0; c < imgsPerThread; ++c) { if (!checkCaseBounds || caseIdx + c * 32 < numImages) { const float val0 = imgs[srcPx0 * imgStride + c * 32]; const float val1 = imgs[srcPx1 * imgStride + c * 32]; const float val2 = imgs[srcPx2 * imgStride + c * 32]; const float val3 = imgs[srcPx3 * imgStride + c * 32]; const float c0 = u * (val0 - val1) + val1; const float c1 = u * (val2 - val3) + val3; target[32 * c] = w * (c1 - c0) + c0; } } } } /* * Block size B_YxB_X. * B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx * B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) * * target can be the same matrix as imgs. * radius must be one of 3, 5, 7, 9. * * Tried imgsPerThread, slower. */ template<int B_Y, int B_X, int radius> __global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize, const int numImages, const int imgStride, const bool horiz, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilter[radius]; const int imgPixels = imgSize * imgSize; const int ty = B_Y * blockIdx.y + threadIdx.y; const int channelIdx = ty / imgSize; const int rowIdx = ty % imgSize; const int imgIdx = B_X*blockIdx.x + threadIdx.x; const int filterWidth = 2*radius+1; // const int tidx = B_Y * threadIdx.y + threadIdx.x; if (horiz) { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx; } else { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx; } float outputs[filterWidth-1]; #pragma unroll for (int r = 0; r < filterWidth-1; r++) { outputs[r] = 0; } if (threadIdx.x < filterWidth-1) { shFilter[threadIdx.x] = filter[threadIdx.x]; } __syncthreads(); if (imgIdx < numImages) { // This writes radius*2 = filterWidth - 1 values to outputs #pragma unroll for (int col = 0; col < radius; col++) { float px = imgs[0]; #pragma unroll for (int r = 0; r < radius + 1 + col; r++) { outputs[r] += px * shFilter[radius + col - r]; } imgs += horiz ? imgStride : imgStride * imgSize; } // Unfortunately this has to be at this level of granularity if (scaleTargets != 0) { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { float* t = &target[0]; t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } else { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { target[0] = scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } } } /* * Block size B_YxB_X * blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, numOutputs, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds> __global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels, const int numImages, const int startX, const int strideX, const int outputsX, const bool reverse, const float scaleTargets, const float scaleOutput) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numChanBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread; const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread); if (myChanIdx >= numChannels) { return; } // if (blockIdx.x != 0 || blockIdx.y != 0) { // return; // } const int outputIdx = outputIdxY * outputsX + outputIdxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgPx = startImgPxY * imgSize + startImgPxX; imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx; target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx; if (scaleTargets != 0) { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } else { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } } /* * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, outputs, numImages) */ void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, bool reverse, float scaleTargets, float scaleOutput) { int numImages = reverse ? target.getNumCols() : images.getNumCols(); int imgPixels = imgSize * imgSize; assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); assert(target.isContiguous()); assert(strideX > 1); int outputsX = DIVUP(imgSize, strideX); int outputs = outputsX * outputsX; if (reverse) { assert(target.getNumRows() == numChannels * outputs); } else { assert(images.getNumRows() == numChannels * imgPixels); } if (scaleTargets == 0) { if (reverse) { images.resize(numChannels * imgPixels, numImages); images.apply(NVMatrixOps::Zero()); } else { target.resize(numChannels*outputs, numImages); } } else { if (reverse) { assert(images.getNumRows() == numChannels * outputs); assert(images.getNumCols() == numImages); } else { assert(target.getNumRows() == numChannels * outputs); assert(target.getNumCols() == numImages); } } int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; int chansPerThread = numChannels % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX); if (imgsPerThread == 4) { if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else { if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } } void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput); } void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) */ void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels, float scaleTargets, float scaleOutputs) { int numImages = images.getNumCols(); int radius = filter.getNumCols() / 2; int imgPixels = images.getNumRows() / numChannels; int imgSize = int(sqrt(imgPixels)); assert(imgPixels == imgSize * imgSize); assert(radius >= 1 && radius <= 4); assert(imgSize >= 2 * radius + 1); assert(filter.getNumRows() == 1); assert(images.getNumRows() == numChannels * imgPixels); assert(!images.isTrans()); assert(!filter.isTrans()); assert(!target.isTrans()); assert(target.isContiguous()); if (scaleTargets == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y)); if (radius == 1) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 2) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 3) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 3>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 4) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } } /* * Block size 1x128 * blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread * blockIdx.y determines pixel.y * * So each block does one output for some number of images and all the fliters. * * threadIdx.x determines img idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int imgsPerThread, int numFilters, bool checkCaseBounds> __global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y; const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += pxIdx * numImages + imgIdx; denoms += pxIdx * numImages + imgIdx; meanDiffs += imgIdx; target += pxIdx * numImages + imgIdx; float prod[numFilters][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * 128] = prod[f][i]; target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region of pixels for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { __shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -sizeX/2 + blockPxX); const int startPxY = MAX(0, -sizeX/2 + blockPxY); const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3); const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -sizeX/2 + myPxY; const int myStartPxX = -sizeX/2 + myPxX; const int myEndPxY = myPxY + DIVUP(sizeX, 2); const int myEndPxX = myPxX + DIVUP(sizeX, 2); const int imgIdx = blockImgIdx + threadIdx.x; imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]); } } } } __syncthreads(); } } // imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX; // imgs += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y */ template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked> __global__ void kFCNorm(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += pxIdx * numImages + imgIdx; denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 0; } } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += square(meanDiffs[f * imgPixels * numImages + i * B_X]); } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 1 + addScale * prod[i]; denoms[i * B_X] = prod[i]; target[i * B_X] = imgs[i * B_X] * __powf(prod[i], -powScale); } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked> __global__ void kFRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; acts += pxIdx * numImages + imgIdx; inputs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; outGrads += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; // if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) { // return; // } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += acts[f * imgPixels * numImages + i * B_X]; } } } // printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF); if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = inputs[i * B_X]; const float out = outGrads[i * B_X]; const float den = denoms[i * B_X]; prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = inputs[i * B_X]; const float out = outGrads[i * B_X]; const float den = denoms[i * B_X]; prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread * * sizeX should be something like 3 or 5 for this function. Not much more. * TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2). */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kTICA_manyfilter(float* imgs, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float scaleTarget, const float scaleOutput) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(imgs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } imgs += pxIdx * numImages; if (scaleTarget == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i])); } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i])); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * ticas: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread * * sizeX should be something like 3 or 5 for this function. Not much more. * TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2). */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kTICAGrad_manyfilter(float* imgs, float* ticas, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float scaleTarget, const float scaleOutput) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; ticas += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // adding 1/S values prod[f][i] += ticas[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]; } } } } } if (scaleTarget == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * sqrtf(prod[f][i]); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { for (int my = startOutputY; my < endOutputY; my++) { const float regionStartY = fmaxf(0, startX + my * strideX); const float regionEndY = fminf(imgSize, startX + my * strideX + subsX); const float regionSizeY = regionEndY - regionStartY; for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; const float regionStartX = fmaxf(0, startX + mx * strideX); const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX); const float regionSizeX = regionEndX - regionStartX; // It's important to do the division here, because pushing division into the below // loops makes the code 4x slower. const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * maxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { __shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X]; } } } for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i]; prod[f][i] += (img == ma) * mg; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * acts := -2 x scale x acts x outGrads / denoms */ template<int B_X, int eltsPerThread> __global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads, const uint numElements, const float scale) { const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x; const uint numThreads = B_X * gridDim.x; for (uint i = e; i < numElements; i += numThreads*eltsPerThread) { #pragma unroll for (uint k = 0; k < eltsPerThread; k++) { if (i + k * B_X < numElements) { acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int imgPixels = imgSize * imgSize; const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1); const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1); const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1); const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1); const int imgIdx = blockImgIdx + threadIdx.x; acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int sy = startY; sy < endY; sy++) { for (int sx = startX; sx < endX; sx++) { const int outPx = sy * imgSize + sx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X]; } } } } } // outGrads += blockPx * numImages; if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { __shared__ float shActs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1); const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1); const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4); const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1; const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1; const int myEndPxY = myPxY + sizeX/2 + 1; const int myEndPxX = myPxX + sizeX/2 + 1; const int imgIdx = blockImgIdx + threadIdx.x; acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += shActs[f][threadIdx.x + i * B_X]; } } } } __syncthreads(); } } acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX; acts += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } } void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX) { convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images.getNumCols(); int numFilters = maxGrads.getNumRows() / outputs; int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads.getNumRows() == numFilters * outputs); assert(maxGrads.getNumCols() == numImages); assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(images); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) { convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads.getNumCols(); int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads.getNumRows() / outputs; assert(avgGrads.getNumRows() == numFilters * outputs); assert(!target.isTrans()); assert(!avgGrads.isTrans()); assert(avgGrads.isContiguous()); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(numFilters * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } cutilCheckMsg("convLocalAvgUndo: kernel execution failed"); } void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); target.resize(images); denoms.resize(images); assert(target.isContiguous()); if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, hipFuncCachePreferL1); // L1 faster here hipLaunchKernelGGL(( kCNorm2<8, 8, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, hipFuncCachePreferL1); // L1 faster here hipLaunchKernelGGL(( kCNorm2<8, 8, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } else { bool checkCaseBounds = numImages % 128 != 0; if (numFilters <= 8) { dim3 threads(128); dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize); if (numFilters == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 3) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 5) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 6) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 7) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 8) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } } else { dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } } cutilCheckMsg("convResponseNorm: kernel execution failed"); } void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); assert(target.isContiguous()); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (imgsPerThread == 8) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } else { int imgsPerThread = numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } cutilCheckMsg("kRNormUndo: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize */ void convResizeBilinear(NVMatrix& images, NVMatrix& target, int imgSize, int tgtSize, float scale) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = images.getNumRows() / imgPixels; int numImages = images.getNumCols(); assert(images.getNumRows() == numChannels * imgPixels); target.resize(numChannels * tgtPixels, numImages); assert(target.isContiguous()); int numChunksX = DIVUP(tgtSize, 4); int numChunks = numChunksX * numChunksX; double imgCenter = imgSize * 0.5; double tgtCenter = tgtSize * 0.5; double centerScale = imgCenter - tgtCenter * scale; int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 16); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks); if (imgsPerThread == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kResizeBilinear<4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { hipFuncSetCacheConfig(kResizeBilinear<4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kResizeBilinear<2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { hipFuncSetCacheConfig(kResizeBilinear<2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kResizeBilinear<1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { hipFuncSetCacheConfig(kResizeBilinear<1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } cutilCheckMsg("convResizeBilinear: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToYUV(NVMatrix& images, NVMatrix& target) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = images.getNumRows() / 3; int numImages = images.getNumCols(); assert(images.getNumRows() == 3 * imgPixels); target.resize(3 * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToYUV<4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToYUV<4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToYUV<2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToYUV<2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToYUV<1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToYUV<1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } cutilCheckMsg("convRGBToYUV: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToLAB(NVMatrix& images, NVMatrix& target, bool center) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = images.getNumRows() / 3; int numImages = images.getNumCols(); assert(images.getNumRows() == 3 * imgPixels); target.resize(3 * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (center) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } else if (imgsPerThread == 2) { if (center) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<2, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<2, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<2, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<2, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } else { if (center) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<1, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<1, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<1, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<1, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } cutilCheckMsg("convRGBToLAB: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ void convCrop(NVMatrix& imgs, NVMatrix& target, int imgSize, int tgtSize, int startY, int startX) { int numImages = imgs.getNumCols(); int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = imgs.getNumRows() / imgPixels; assert(imgs.getNumRows() == imgPixels * numChannels); assert(imgPixels == imgSize * imgSize); assert(imgSize - startY >= tgtSize); assert(imgSize - startX >= tgtSize); assert(startY >= 0); assert(startX >= 0); target.resize(numChannels * tgtPixels, numImages); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4)); dim3 threads(32, 4); if (imgsPerThread == 4) { if (checkCaseBounds) { hipLaunchKernelGGL(( kCrop<4, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { hipLaunchKernelGGL(( kCrop<4, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipLaunchKernelGGL(( kCrop<2, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { hipLaunchKernelGGL(( kCrop<2, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } else { if (checkCaseBounds) { hipLaunchKernelGGL(( kCrop<1, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { hipLaunchKernelGGL(( kCrop<1, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } cutilCheckMsg("convCrop: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * ticas: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) (out) * * Computes TICA-style gradient for given feature maps * f(x) = exp(-(sum_i{x_i^2}^(1/2))) * dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps) * * eps added for numerical stability */ void convTICAGrad(NVMatrix& images, NVMatrix& ticas, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(!images.isTrans()); assert(images.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); assert(ticas.isSameDims(images)); assert(ticas.isContiguous()); if (scaleTarget == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } assert(target.isContiguous()); // TEMPORARY assert(numFilters > 8); assert(sizeX < 6); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (checkCaseBounds) { hipFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kTICAGrad_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), ticas.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } else { hipFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kTICAGrad_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), ticas.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } cutilCheckMsg("convTICAGrad: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) (out) * * Computes TICA-style gradient for given feature maps * f(x) = exp(-(sum_i{x_i^2}^(1/2))) * dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps) * * eps added for numerical stability */ void convTICA(NVMatrix& images, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(!images.isTrans()); assert(images.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); if (scaleTarget == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } assert(target.isContiguous()); // TEMPORARY assert(numFilters > 8); assert(sizeX < 6); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (checkCaseBounds) { hipFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kTICA_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } else { hipFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kTICA_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } cutilCheckMsg("convTICA: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * Note: at present, I have no code to compute the meanDiffs. So it should be set * to be equal to images. In other words, this isn't really doing contrast normalization, * just response normalization. */ void convContrastNormCrossMap(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(sizeF > 0 && sizeF <= numFilters); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0); target.resize(images); denoms.resize(images); assert(target.isContiguous()); bool checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); if (blocked) { if (checkCaseBounds) { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } else { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } else { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } } cutilCheckMsg("convContrastNormCrossMap: kernel execution failed"); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormCrossMapUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(sizeF > 0 && sizeF <= numFilters); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); assert(target.isContiguous()); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine dim3 threads2 = dim3(32, 4); dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (blocked) { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, true, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, false, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, true, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, false, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } } else { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, true, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, false, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, true, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, false, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } } cutilCheckMsg("convResponseNormCrossMapUndo: kernel execution failed"); } void convResponseNormCrossMap(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { convContrastNormCrossMap(images, images, denoms, target, numFilters, sizeF, addScale, powScale, blocked); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (filterSize, filterSize, filterSize*filterSize) * target: (numChannels, imgPixels, numImages) */ void convShift(NVMatrix& images, NVMatrix& filter, NVMatrix& target, int numChannels) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numChannels; int imgSize = int(sqrt(imgPixels)); int numFilters = filter.getNumCols(); int filterLength = filter.getNumRows(); int outputsX = int(sqrt(filter.getNumRows())); assert(imgPixels == imgSize * imgSize); assert(images.getNumRows() == numChannels * imgPixels); assert(!images.isTrans()); assert(!filter.isTrans()); assert(!target.isTrans()); assert(target.isContiguous()); int targetPixels = outputsX * outputsX; target.resize(targetPixels * numChannels * numFilters, numImages); Matrix targetM(target.getNumRows(), target.getNumCols()); Matrix imageM(images.getNumRows(), images.getNumCols()); Matrix filterM(filter.getNumRows(), filter.getNumCols()); target.copyToHost(targetM); images.copyToHost(imageM); filter.copyToHost(filterM); float* targetData = targetM.getDataWrite(); float* imageData = imageM.getDataWrite(); float* filterData = filterM.getDataWrite(); //FILE* fid = fopen("temp.txt","wb"); ////fwrite(targetData, sizeof(float), target.getNumRows()*target.getNumCols(), fid); //fwrite(filterData, sizeof(float), filterM.getNumRows()*filterM.getNumCols(), fid); //fclose(fid); int i,n,c,f; int x,y,dx,dy; int width = numImages; // the nth image for(n = 0; n < target.getNumCols(); n++) { // the cth channels for(c = 0; c < numChannels; c++) { // the fth filter for(f = 0; f < numFilters; f++) { for(i = 0; i < targetPixels; i++) { x = n; y = f * numChannels * targetPixels + c * targetPixels + i; dx = n; dy = c * imgPixels + int(filterData[i * numFilters + f]); targetData[y * width + x] = imageData[dy * width + dx]; //int ind1 = n * target.getNumRows() + f * numChannels * targetPixels + c * targetPixels + i; //int ind2 = n * images.getNumRows() + c * imgPixels + int(filterData[f * filterLength + i]); //float pp = targetData[n * target.getNumRows() + f * numChannels * targetPixels + c * targetPixels + i]; //if(pp > 0) // int aaa = 1; } } } } target.copyFromHost(targetM); //FILE* fid = fopen("temp.txt","wb"); //fwrite(targetData, sizeof(float), target.getNumRows()*target.getNumCols(), fid); //fclose(fid); }
7feaad21e5602fb8d14711147da52b7123f8b2aa.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <iostream> #include <assert.h> #include <nvmatrix_kernels.cuh> #include <nvmatrix.cuh> #include <conv_util.cuh> using namespace std; __device__ inline float square(const float a) { return a * a; } /* * blockIdx.y determines module in batches of B_Y * blockIdx.x determines filter in batches of B_X * filtersPerThread * * weights: (numModules, numColors, filterPixels, numFilters) * Not fully coalesced if B_X < 32, so use cache. */ template <int B_Y, int B_X, int filtersPerThread> __global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) { const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y; const uint filterIdx = B_X * blockIdx.x + threadIdx.x; float prod[filtersPerThread]; #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = 0; } if (moduleIdx < numModules) { weights += moduleIdx * weightsPerFilter * numFilters + filterIdx; for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] += square(weights[p * numFilters + i * B_X]); } } #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = sqrtf(prod[i]); prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f; } for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { weights[p * numFilters + i * B_X] *= prod[i]; } } } } /* * weights: (numModules, numColors, filterPixels, numFilters) */ void normalizeLocalWeights(NVMatrix& weights, int numModules, float norm) { int numFilters = weights.getNumCols(); int weightsPerFilter = weights.getNumRows() / numModules; assert(numModules * weightsPerFilter == weights.getNumRows()); assert(!weights.isTrans()); assert(weights.isContiguous()); assert(numFilters % 16 == 0); int bx = numFilters % 32 == 0 ? 32 : 16; int by = bx == 32 ? 4 : 8; int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1; dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by)); dim3 threads(bx, by); if (filtersPerThread == 4) { cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, cudaFuncCachePreferL1); kNormalizeLCWeights<4, 32, 4><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else if (filtersPerThread == 2) { cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, cudaFuncCachePreferL1); kNormalizeLCWeights<4, 32, 2><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, cudaFuncCachePreferL1); kNormalizeLCWeights<4, 32, 1><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else { cudaFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, cudaFuncCachePreferL1); kNormalizeLCWeights<8, 16, 1><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } } } /* * Block size 4x32 * blockIdx.x determines img idx in batches of 32*imgsPerThread * blockIdx.y determines channel idx, pixel idx in batches of 4 * * threadIdx.x determins case idx * threadIdx.y determines pixel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride, const uint imgSize, const uint tgtSize, const uint startY, const uint startX) { const uint imgPixels = imgSize * imgSize; const uint tgtPixels = tgtSize * tgtSize; const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4); const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y; const uint tgtPxY = tgtPixelIdx / tgtSize; const uint tgtPxX = tgtPixelIdx % tgtSize; const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX; if (tgtPixelIdx < tgtPixels) { imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx; target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx; #pragma unroll for (uint i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) { target[i * 32] = imgs[i * 32]; } } } } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * Each thread produces (y,u,v) values for a particular (r,g,b) pixel * * The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV): * * [Y] [0.2126 0.7152 0.0722 ][R] * [U] = [-0.09991 -0.33609 0.436 ][G] * [V] [0.615 -0.55861 -0.05639][B] */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V } } } } __device__ inline float labf(const float x) { if (x > 0.0088564517f) { return __powf(x, 0.3333f); } return 7.787037f * x + 0.13793103f; } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * This proceeds in two steps. * * - First, RGB values are linearly transformed to XYZ as per * http://en.wikipedia.org/wiki/CIE_XYZ_color_space * - Second, XYZ values are nonlinearly transformed to L*a*b* as per * http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation * * Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel * * The RGB --> XYZ transform is: * * [X] [0.49 0.31 0.2 ][R] * [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G] * [Z] [0 0.01 0.99 ][B] * * NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand. * * Then X_max, Y_max, Z_max = 5.6506753. * * The range of the L* values is [0, 100]. * If the center flag is given, the range will be [-50, 50]. * */ template <int imgsPerThread, bool checkCaseBounds, bool center> __global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; const float X = (0.49f * R + 0.31f * G + 0.2f * B); const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B); const float Z = (0.01f * G + 0.99f * B); const float labX = labf(X); const float labY = labf(Y); const float labZ = labf(Z); target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L* target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a* target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b* } } } } /* * Block size 16x32. * Each block produces a 4x4 chunk of the output image. * threadIdx.y determines pixel idx in 4x4 chunk. * threadIdx.x determines case idx. * blockIdx.x determines case idx in batches of 32*imgsPerThread. * blockIdx.y determines 4x4 chunk idx, channel idx. * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize (roughly) * * This is a rather naive kernel that relies on cache for speed. But all it's doing * is basic texture manipulation, which is very local in nature, so it should be ok. * Also, it will in practice be a tiny fraction of the runtime of a large convnet. * * So that is my justification for being lazy here. */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize, const int numImages, const int imgStride, const float scale, const float centerScale) { const int numChunksX = DIVUP(tgtSize, 4); const int numChunks = numChunksX * numChunksX; const int channelIdx = blockIdx.y / numChunks; const int chunkIdx = blockIdx.y % numChunks; const int chunkIdxX = chunkIdx % numChunksX; const int chunkIdxY = chunkIdx / numChunksX; const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int imgPixels = imgSize * imgSize; const int tgtPixels = tgtSize * tgtSize; const int pxX = 4 * chunkIdxX + threadIdx.y % 4; const int pxY = 4 * chunkIdxY + threadIdx.y / 4; if (pxY < tgtSize && pxX < tgtSize) { const int pxIdx = pxY * tgtSize + pxX; imgs += channelIdx * imgPixels * imgStride + caseIdx; target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx; // This will cause slight distortions at the edges when upsampling in some cases. // But I think that's not a big deal. const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale)); const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale)); const float u = floorf(srcPxX + 1) - srcPxX; const float w = srcPxY - floorf(srcPxY); // Consider doing max(0, min(imgSize, x)) here const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left const int srcPx1 = srcPx0 + 1; // top-right const int srcPx2 = srcPx0 + imgSize; // bottom-left const int srcPx3 = srcPx2 + 1; // bottom-right #pragma unroll for (int c = 0; c < imgsPerThread; ++c) { if (!checkCaseBounds || caseIdx + c * 32 < numImages) { const float val0 = imgs[srcPx0 * imgStride + c * 32]; const float val1 = imgs[srcPx1 * imgStride + c * 32]; const float val2 = imgs[srcPx2 * imgStride + c * 32]; const float val3 = imgs[srcPx3 * imgStride + c * 32]; const float c0 = u * (val0 - val1) + val1; const float c1 = u * (val2 - val3) + val3; target[32 * c] = w * (c1 - c0) + c0; } } } } /* * Block size B_YxB_X. * B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx * B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) * * target can be the same matrix as imgs. * radius must be one of 3, 5, 7, 9. * * Tried imgsPerThread, slower. */ template<int B_Y, int B_X, int radius> __global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize, const int numImages, const int imgStride, const bool horiz, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilter[radius]; const int imgPixels = imgSize * imgSize; const int ty = B_Y * blockIdx.y + threadIdx.y; const int channelIdx = ty / imgSize; const int rowIdx = ty % imgSize; const int imgIdx = B_X*blockIdx.x + threadIdx.x; const int filterWidth = 2*radius+1; // const int tidx = B_Y * threadIdx.y + threadIdx.x; if (horiz) { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx; } else { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx; } float outputs[filterWidth-1]; #pragma unroll for (int r = 0; r < filterWidth-1; r++) { outputs[r] = 0; } if (threadIdx.x < filterWidth-1) { shFilter[threadIdx.x] = filter[threadIdx.x]; } __syncthreads(); if (imgIdx < numImages) { // This writes radius*2 = filterWidth - 1 values to outputs #pragma unroll for (int col = 0; col < radius; col++) { float px = imgs[0]; #pragma unroll for (int r = 0; r < radius + 1 + col; r++) { outputs[r] += px * shFilter[radius + col - r]; } imgs += horiz ? imgStride : imgStride * imgSize; } // Unfortunately this has to be at this level of granularity if (scaleTargets != 0) { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { float* t = &target[0]; t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } else { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { target[0] = scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } } } /* * Block size B_YxB_X * blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, numOutputs, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds> __global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels, const int numImages, const int startX, const int strideX, const int outputsX, const bool reverse, const float scaleTargets, const float scaleOutput) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numChanBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread; const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread); if (myChanIdx >= numChannels) { return; } // if (blockIdx.x != 0 || blockIdx.y != 0) { // return; // } const int outputIdx = outputIdxY * outputsX + outputIdxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgPx = startImgPxY * imgSize + startImgPxX; imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx; target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx; if (scaleTargets != 0) { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } else { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } } /* * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, outputs, numImages) */ void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, bool reverse, float scaleTargets, float scaleOutput) { int numImages = reverse ? target.getNumCols() : images.getNumCols(); int imgPixels = imgSize * imgSize; assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); assert(target.isContiguous()); assert(strideX > 1); int outputsX = DIVUP(imgSize, strideX); int outputs = outputsX * outputsX; if (reverse) { assert(target.getNumRows() == numChannels * outputs); } else { assert(images.getNumRows() == numChannels * imgPixels); } if (scaleTargets == 0) { if (reverse) { images.resize(numChannels * imgPixels, numImages); images.apply(NVMatrixOps::Zero()); } else { target.resize(numChannels*outputs, numImages); } } else { if (reverse) { assert(images.getNumRows() == numChannels * outputs); assert(images.getNumCols() == numImages); } else { assert(target.getNumRows() == numChannels * outputs); assert(target.getNumCols() == numImages); } } int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; int chansPerThread = numChannels % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX); if (imgsPerThread == 4) { if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else { if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } } void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput); } void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) */ void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels, float scaleTargets, float scaleOutputs) { int numImages = images.getNumCols(); int radius = filter.getNumCols() / 2; int imgPixels = images.getNumRows() / numChannels; int imgSize = int(sqrt(imgPixels)); assert(imgPixels == imgSize * imgSize); assert(radius >= 1 && radius <= 4); assert(imgSize >= 2 * radius + 1); assert(filter.getNumRows() == 1); assert(images.getNumRows() == numChannels * imgPixels); assert(!images.isTrans()); assert(!filter.isTrans()); assert(!target.isTrans()); assert(target.isContiguous()); if (scaleTargets == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y)); if (radius == 1) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 1><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 2) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 2><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 3) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 3><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 4) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 4><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } } /* * Block size 1x128 * blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread * blockIdx.y determines pixel.y * * So each block does one output for some number of images and all the fliters. * * threadIdx.x determines img idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int imgsPerThread, int numFilters, bool checkCaseBounds> __global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y; const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += pxIdx * numImages + imgIdx; denoms += pxIdx * numImages + imgIdx; meanDiffs += imgIdx; target += pxIdx * numImages + imgIdx; float prod[numFilters][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * 128] = prod[f][i]; target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region of pixels for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { __shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -sizeX/2 + blockPxX); const int startPxY = MAX(0, -sizeX/2 + blockPxY); const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3); const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -sizeX/2 + myPxY; const int myStartPxX = -sizeX/2 + myPxX; const int myEndPxY = myPxY + DIVUP(sizeX, 2); const int myEndPxX = myPxX + DIVUP(sizeX, 2); const int imgIdx = blockImgIdx + threadIdx.x; imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]); } } } } __syncthreads(); } } // imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX; // imgs += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y */ template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked> __global__ void kFCNorm(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += pxIdx * numImages + imgIdx; denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 0; } } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += square(meanDiffs[f * imgPixels * numImages + i * B_X]); } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 1 + addScale * prod[i]; denoms[i * B_X] = prod[i]; target[i * B_X] = imgs[i * B_X] * __powf(prod[i], -powScale); } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked> __global__ void kFRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; acts += pxIdx * numImages + imgIdx; inputs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; outGrads += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; // if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) { // return; // } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += acts[f * imgPixels * numImages + i * B_X]; } } } // printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF); if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = inputs[i * B_X]; const float out = outGrads[i * B_X]; const float den = denoms[i * B_X]; prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = inputs[i * B_X]; const float out = outGrads[i * B_X]; const float den = denoms[i * B_X]; prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread * * sizeX should be something like 3 or 5 for this function. Not much more. * TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2). */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kTICA_manyfilter(float* imgs, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float scaleTarget, const float scaleOutput) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(imgs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } imgs += pxIdx * numImages; if (scaleTarget == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i])); } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i])); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * ticas: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread * * sizeX should be something like 3 or 5 for this function. Not much more. * TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2). */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kTICAGrad_manyfilter(float* imgs, float* ticas, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float scaleTarget, const float scaleOutput) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; ticas += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // adding 1/S values prod[f][i] += ticas[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]; } } } } } if (scaleTarget == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * sqrtf(prod[f][i]); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { for (int my = startOutputY; my < endOutputY; my++) { const float regionStartY = fmaxf(0, startX + my * strideX); const float regionEndY = fminf(imgSize, startX + my * strideX + subsX); const float regionSizeY = regionEndY - regionStartY; for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; const float regionStartX = fmaxf(0, startX + mx * strideX); const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX); const float regionSizeX = regionEndX - regionStartX; // It's important to do the division here, because pushing division into the below // loops makes the code 4x slower. const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * maxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { __shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X]; } } } for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i]; prod[f][i] += (img == ma) * mg; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * acts := -2 x scale x acts x outGrads / denoms */ template<int B_X, int eltsPerThread> __global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads, const uint numElements, const float scale) { const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x; const uint numThreads = B_X * gridDim.x; for (uint i = e; i < numElements; i += numThreads*eltsPerThread) { #pragma unroll for (uint k = 0; k < eltsPerThread; k++) { if (i + k * B_X < numElements) { acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int imgPixels = imgSize * imgSize; const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1); const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1); const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1); const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1); const int imgIdx = blockImgIdx + threadIdx.x; acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int sy = startY; sy < endY; sy++) { for (int sx = startX; sx < endX; sx++) { const int outPx = sy * imgSize + sx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X]; } } } } } // outGrads += blockPx * numImages; if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { __shared__ float shActs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1); const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1); const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4); const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1; const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1; const int myEndPxY = myPxY + sizeX/2 + 1; const int myEndPxX = myPxX + sizeX/2 + 1; const int imgIdx = blockImgIdx + threadIdx.x; acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += shActs[f][threadIdx.x + i * B_X]; } } } } __syncthreads(); } } acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX; acts += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } } void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX) { convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images.getNumCols(); int numFilters = maxGrads.getNumRows() / outputs; int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads.getNumRows() == numFilters * outputs); assert(maxGrads.getNumCols() == numImages); assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(images); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 1, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 1, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 1, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 1, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) { convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads.getNumCols(); int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads.getNumRows() / outputs; assert(avgGrads.getNumRows() == numFilters * outputs); assert(!target.isTrans()); assert(!avgGrads.isTrans()); assert(avgGrads.isContiguous()); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(numFilters * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 2, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 2, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 2, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 2, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 1, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 1, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 1, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 1, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } cutilCheckMsg("convLocalAvgUndo: kernel execution failed"); } void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); target.resize(images); denoms.resize(images); assert(target.isContiguous()); if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } else { bool checkCaseBounds = numImages % 128 != 0; if (numFilters <= 8) { dim3 threads(128); dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize); if (numFilters == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 3) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 5) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 6) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 7) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 8) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } } else { dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } } cutilCheckMsg("convResponseNorm: kernel execution failed"); } void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); assert(target.isContiguous()); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (imgsPerThread == 8) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } else { int imgsPerThread = numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } cutilCheckMsg("kRNormUndo: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize */ void convResizeBilinear(NVMatrix& images, NVMatrix& target, int imgSize, int tgtSize, float scale) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = images.getNumRows() / imgPixels; int numImages = images.getNumCols(); assert(images.getNumRows() == numChannels * imgPixels); target.resize(numChannels * tgtPixels, numImages); assert(target.isContiguous()); int numChunksX = DIVUP(tgtSize, 4); int numChunks = numChunksX * numChunksX; double imgCenter = imgSize * 0.5; double tgtCenter = tgtSize * 0.5; double centerScale = imgCenter - tgtCenter * scale; int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 16); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks); if (imgsPerThread == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kResizeBilinear<4, true>, cudaFuncCachePreferL1); kResizeBilinear<4, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { cudaFuncSetCacheConfig(kResizeBilinear<4, false>, cudaFuncCachePreferL1); kResizeBilinear<4, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kResizeBilinear<2, true>, cudaFuncCachePreferL1); kResizeBilinear<2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { cudaFuncSetCacheConfig(kResizeBilinear<2, false>, cudaFuncCachePreferL1); kResizeBilinear<2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kResizeBilinear<1, true>, cudaFuncCachePreferL1); kResizeBilinear<1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { cudaFuncSetCacheConfig(kResizeBilinear<1, false>, cudaFuncCachePreferL1); kResizeBilinear<1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } cutilCheckMsg("convResizeBilinear: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToYUV(NVMatrix& images, NVMatrix& target) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = images.getNumRows() / 3; int numImages = images.getNumCols(); assert(images.getNumRows() == 3 * imgPixels); target.resize(3 * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToYUV<4, true>, cudaFuncCachePreferL1); kRGBToYUV<4, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToYUV<4, false>, cudaFuncCachePreferL1); kRGBToYUV<4, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToYUV<2, true>, cudaFuncCachePreferL1); kRGBToYUV<2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToYUV<2, false>, cudaFuncCachePreferL1); kRGBToYUV<2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToYUV<1, true>, cudaFuncCachePreferL1); kRGBToYUV<1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToYUV<1, false>, cudaFuncCachePreferL1); kRGBToYUV<1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } cutilCheckMsg("convRGBToYUV: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToLAB(NVMatrix& images, NVMatrix& target, bool center) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = images.getNumRows() / 3; int numImages = images.getNumCols(); assert(images.getNumRows() == 3 * imgPixels); target.resize(3 * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (center) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<4, true, true>, cudaFuncCachePreferL1); kRGBToLAB<4, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<4, false, true>, cudaFuncCachePreferL1); kRGBToLAB<4, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<4, true, false>, cudaFuncCachePreferL1); kRGBToLAB<4, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<4, false, false>, cudaFuncCachePreferL1); kRGBToLAB<4, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } else if (imgsPerThread == 2) { if (center) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<2, true, true>, cudaFuncCachePreferL1); kRGBToLAB<2, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<2, false, true>, cudaFuncCachePreferL1); kRGBToLAB<2, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<2, true, false>, cudaFuncCachePreferL1); kRGBToLAB<2, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<2, false, false>, cudaFuncCachePreferL1); kRGBToLAB<2, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } else { if (center) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<1, true, true>, cudaFuncCachePreferL1); kRGBToLAB<1, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<1, false, true>, cudaFuncCachePreferL1); kRGBToLAB<1, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<1, true, false>, cudaFuncCachePreferL1); kRGBToLAB<1, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<1, false, false>, cudaFuncCachePreferL1); kRGBToLAB<1, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } cutilCheckMsg("convRGBToLAB: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ void convCrop(NVMatrix& imgs, NVMatrix& target, int imgSize, int tgtSize, int startY, int startX) { int numImages = imgs.getNumCols(); int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = imgs.getNumRows() / imgPixels; assert(imgs.getNumRows() == imgPixels * numChannels); assert(imgPixels == imgSize * imgSize); assert(imgSize - startY >= tgtSize); assert(imgSize - startX >= tgtSize); assert(startY >= 0); assert(startX >= 0); target.resize(numChannels * tgtPixels, numImages); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4)); dim3 threads(32, 4); if (imgsPerThread == 4) { if (checkCaseBounds) { kCrop<4, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { kCrop<4, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { kCrop<2, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { kCrop<2, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } else { if (checkCaseBounds) { kCrop<1, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { kCrop<1, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } cutilCheckMsg("convCrop: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * ticas: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) (out) * * Computes TICA-style gradient for given feature maps * f(x) = exp(-(sum_i{x_i^2}^(1/2))) * dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps) * * eps added for numerical stability */ void convTICAGrad(NVMatrix& images, NVMatrix& ticas, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(!images.isTrans()); assert(images.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); assert(ticas.isSameDims(images)); assert(ticas.isContiguous()); if (scaleTarget == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } assert(target.isContiguous()); // TEMPORARY assert(numFilters > 8); assert(sizeX < 6); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (checkCaseBounds) { cudaFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kTICAGrad_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), ticas.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } else { cudaFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kTICAGrad_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), ticas.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } cutilCheckMsg("convTICAGrad: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) (out) * * Computes TICA-style gradient for given feature maps * f(x) = exp(-(sum_i{x_i^2}^(1/2))) * dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps) * * eps added for numerical stability */ void convTICA(NVMatrix& images, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(!images.isTrans()); assert(images.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); if (scaleTarget == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } assert(target.isContiguous()); // TEMPORARY assert(numFilters > 8); assert(sizeX < 6); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (checkCaseBounds) { cudaFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kTICA_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } else { cudaFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kTICA_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } cutilCheckMsg("convTICA: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * Note: at present, I have no code to compute the meanDiffs. So it should be set * to be equal to images. In other words, this isn't really doing contrast normalization, * just response normalization. */ void convContrastNormCrossMap(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(sizeF > 0 && sizeF <= numFilters); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0); target.resize(images); denoms.resize(images); assert(target.isContiguous()); bool checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); if (blocked) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, true, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } else { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, false, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, true, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } else { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, false, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } } cutilCheckMsg("convContrastNormCrossMap: kernel execution failed"); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormCrossMapUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(sizeF > 0 && sizeF <= numFilters); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); assert(target.isContiguous()); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine dim3 threads2 = dim3(32, 4); dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (blocked) { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, true>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, false, true, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, true>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, false, false, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, true>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, true, true, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, true>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, true, false, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } } else { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, false>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, false, true, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, false>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, false, false, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, false>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, true, true, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, false>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, true, false, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } } cutilCheckMsg("convResponseNormCrossMapUndo: kernel execution failed"); } void convResponseNormCrossMap(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { convContrastNormCrossMap(images, images, denoms, target, numFilters, sizeF, addScale, powScale, blocked); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (filterSize, filterSize, filterSize*filterSize) * target: (numChannels, imgPixels, numImages) */ void convShift(NVMatrix& images, NVMatrix& filter, NVMatrix& target, int numChannels) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numChannels; int imgSize = int(sqrt(imgPixels)); int numFilters = filter.getNumCols(); int filterLength = filter.getNumRows(); int outputsX = int(sqrt(filter.getNumRows())); assert(imgPixels == imgSize * imgSize); assert(images.getNumRows() == numChannels * imgPixels); assert(!images.isTrans()); assert(!filter.isTrans()); assert(!target.isTrans()); assert(target.isContiguous()); int targetPixels = outputsX * outputsX; target.resize(targetPixels * numChannels * numFilters, numImages); Matrix targetM(target.getNumRows(), target.getNumCols()); Matrix imageM(images.getNumRows(), images.getNumCols()); Matrix filterM(filter.getNumRows(), filter.getNumCols()); target.copyToHost(targetM); images.copyToHost(imageM); filter.copyToHost(filterM); float* targetData = targetM.getDataWrite(); float* imageData = imageM.getDataWrite(); float* filterData = filterM.getDataWrite(); //FILE* fid = fopen("temp.txt","wb"); ////fwrite(targetData, sizeof(float), target.getNumRows()*target.getNumCols(), fid); //fwrite(filterData, sizeof(float), filterM.getNumRows()*filterM.getNumCols(), fid); //fclose(fid); int i,n,c,f; int x,y,dx,dy; int width = numImages; // the nth image for(n = 0; n < target.getNumCols(); n++) { // the cth channels for(c = 0; c < numChannels; c++) { // the fth filter for(f = 0; f < numFilters; f++) { for(i = 0; i < targetPixels; i++) { x = n; y = f * numChannels * targetPixels + c * targetPixels + i; dx = n; dy = c * imgPixels + int(filterData[i * numFilters + f]); targetData[y * width + x] = imageData[dy * width + dx]; //int ind1 = n * target.getNumRows() + f * numChannels * targetPixels + c * targetPixels + i; //int ind2 = n * images.getNumRows() + c * imgPixels + int(filterData[f * filterLength + i]); //float pp = targetData[n * target.getNumRows() + f * numChannels * targetPixels + c * targetPixels + i]; //if(pp > 0) // int aaa = 1; } } } } target.copyFromHost(targetM); //FILE* fid = fopen("temp.txt","wb"); //fwrite(targetData, sizeof(float), target.getNumRows()*target.getNumCols(), fid); //fclose(fid); }
5ea192e58664569c98c295812354df8b51dbfadd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef D_CA_CROSS_KERNEL #define D_CA_CROSS_KERNEL #include "d_ca_cross.h" #include "cuda_utils.h" #include <math.h> #define CROSS_ARM_COUNT 4 typedef enum { CROSS_ARM_UP = 0, CROSS_ARM_DOWN, CROSS_ARM_LEFT, CROSS_ARM_RIGHT } cross_arm_e; __global__ void ca_cross_construction_kernel(unsigned char* img, unsigned char** cross, float ucd, float lcd, int usd, int lsd, int num_rows, int num_cols, int elem_sz) { int tx = threadIdx.x + blockIdx.x * blockDim.x; int ty = threadIdx.y + blockIdx.y * blockDim.y; if ((tx > num_cols - 1) || (ty > num_rows - 1)) return; int a_color_b = (int) img[(tx + ty * num_cols) * elem_sz]; int a_color_g = (int) img[(tx + ty * num_cols) * elem_sz + 1]; int a_color_r = (int) img[(tx + ty * num_cols) * elem_sz + 2]; cross[CROSS_ARM_UP][tx + ty * num_cols] = 0; cross[CROSS_ARM_DOWN][tx + ty * num_cols] = 0; cross[CROSS_ARM_LEFT][tx + ty * num_cols] = 0; cross[CROSS_ARM_RIGHT][tx + ty * num_cols] = 0; int p_color_b = a_color_b; int p_color_g = a_color_g; int p_color_r = a_color_r; // Upper arm for (int y = 1; y <= usd; ++y) { if (ty - y < 0) break; cross[CROSS_ARM_UP][tx + ty * num_cols] = (unsigned char) y; int c_color_b = (int) img[(tx + (ty - y) * num_cols) * elem_sz]; int c_color_g = (int) img[(tx + (ty - y) * num_cols) * elem_sz + 1]; int c_color_r = (int) img[(tx + (ty - y) * num_cols) * elem_sz + 2]; int ac_mad = max(max(abs(c_color_b - a_color_b), abs(c_color_g - a_color_g)), abs(c_color_r - a_color_r)); int cp_mad = max(max(abs(c_color_b - p_color_b), abs(c_color_g - p_color_g)), abs(c_color_r - p_color_r)); if (y > lsd) { if ((float) ac_mad > ucd) break; } else { if ((float) ac_mad > lcd || (float) cp_mad > lcd) break; } p_color_b = c_color_b; p_color_g = c_color_g; p_color_r = c_color_r; } p_color_b = a_color_b; p_color_g = a_color_g; p_color_r = a_color_r; // Down arm for (int y = 1; y <= usd; ++y) { if (ty + y > num_rows - 1) break; cross[CROSS_ARM_DOWN][tx + ty * num_cols] = (unsigned char) y; int c_color_b = (int) img[(tx + (ty + y) * num_cols) * elem_sz]; int c_color_g = (int) img[(tx + (ty + y) * num_cols) * elem_sz + 1]; int c_color_r = (int) img[(tx + (ty + y) * num_cols) * elem_sz + 2]; int ac_mad = max(max(abs(c_color_b - a_color_b), abs(c_color_g - a_color_g)), abs(c_color_r - a_color_r)); int cp_mad = max(max(abs(c_color_b - p_color_b), abs(c_color_g - p_color_g)), abs(c_color_r - p_color_r)); if (y > lsd) { if ((float) ac_mad > ucd) break; } else { if ((float) ac_mad > lcd || (float) cp_mad > lcd) break; } p_color_b = c_color_b; p_color_g = c_color_g; p_color_r = c_color_r; } p_color_b = a_color_b; p_color_g = a_color_g; p_color_r = a_color_r; // Left arm for (int x = 1; x <= usd; ++x) { if (tx - x < 0) break; cross[CROSS_ARM_LEFT][tx + ty * num_cols] = (unsigned char) x; int c_color_b = (int) img[(tx - x + ty * num_cols) * elem_sz]; int c_color_g = (int) img[(tx - x + ty * num_cols) * elem_sz + 1]; int c_color_r = (int) img[(tx - x + ty * num_cols) * elem_sz + 2]; int ac_mad = max(max(abs(c_color_b - a_color_b), abs(c_color_g - a_color_g)), abs(c_color_r - a_color_r)); int cp_mad = max(max(abs(c_color_b - p_color_b), abs(c_color_g - p_color_g)), abs(c_color_r - p_color_r)); if (x > lsd) { if ((float) ac_mad > ucd) break; } else { if ((float) ac_mad > lcd || (float) cp_mad > lcd) break; } p_color_b = c_color_b; p_color_g = c_color_g; p_color_r = c_color_r; } p_color_b = a_color_b; p_color_g = a_color_g; p_color_r = a_color_r; // Right arm for (int x = 1; x <= usd; ++x) { if (tx + x > num_cols - 1) break; cross[CROSS_ARM_RIGHT][tx + ty * num_cols] = (unsigned char) x; int c_color_b = (int) img[(tx + x + ty * num_cols) * elem_sz]; int c_color_g = (int) img[(tx + x + ty * num_cols) * elem_sz + 1]; int c_color_r = (int) img[(tx + x + ty * num_cols) * elem_sz + 2]; int ac_mad = max(max(abs(c_color_b - a_color_b), abs(c_color_g - a_color_g)), abs(c_color_r - a_color_r)); int cp_mad = max(max(abs(c_color_b - p_color_b), abs(c_color_g - p_color_g)), abs(c_color_r - p_color_r)); if (x > lsd) { if ((float) ac_mad > ucd) break; } else { if ((float) ac_mad > lcd || (float) cp_mad > lcd) break; } p_color_b = c_color_b; p_color_g = c_color_g; p_color_r = c_color_r; } } void d_ca_cross(unsigned char* d_img, float** d_cost, float** d_acost, float** h_acost, float *d_acost_memory, unsigned char **d_cross, float ucd, float lcd, int usd, int lsd, int num_disp, int num_rows, int num_cols, int elem_sz) { //////////////// // PARAMETERS // //////////////// size_t img_sz = num_rows * num_cols; //////////////////////// // CROSS CONSTRUCTION // //////////////////////// size_t bw = num_cols; size_t bh = 1; size_t gw = (num_cols + bw - 1) / bw; size_t gh = (num_rows + bh - 1) / bh; const dim3 block_sz(bw, bh, 1); const dim3 grid_sz(gw, gh, 1); hipLaunchKernelGGL(( ca_cross_construction_kernel), dim3(grid_sz), dim3(block_sz), 0, 0, d_img, d_cross, ucd, lcd, usd, lsd, num_rows, num_cols, elem_sz); hipDeviceSynchronize(); /////////////////////////// // CROSS-AGGRAGATE COSTS // /////////////////////////// int sm_cols = bw; int sm_sz = sm_cols * bh; int sm_padding = 0; for (int d = 0; d < num_disp; ++d) h_acost[d] = d_acost_memory + (d * img_sz); checkCudaError(hipMemcpy(d_acost, h_acost, sizeof(float*) * num_disp, hipMemcpyHostToDevice)); size_t bw_t = 32; size_t bh_t = 8; size_t gw_t = (num_cols + bw_t - 1) / bw_t; size_t gh_t = (num_rows + bh_t - 1) / bh_t / 4; const dim3 block_sz_t(bw_t, bh_t, 1); const dim3 grid_sz_t(gw_t, gh_t, 1); size_t bw_t_v = 32; size_t bh_t_v = 8; size_t gw_t_v = (num_rows + bw_t_v - 1) / bw_t_v; size_t gh_t_v = (num_cols + bh_t_v - 1) / bh_t_v / 4; const dim3 block_sz_t_v(bw_t_v, bh_t_v, 1); const dim3 grid_sz_t_v(gw_t_v, gh_t_v, 1); int sm_width = 32; int ipt_s = 2; size_t bw_s = num_cols / ipt_s; size_t bh_s = 1; size_t gw_s = (num_cols + bw_s - 1) / bw_s / ipt_s; size_t gh_s = (num_rows + bh_s - 1) / bh_s; const dim3 block_sz_s(bw_s, bh_s, 1); const dim3 grid_sz_s(gw_s, gh_s, 1); int sm_cols_s = bw_s * ipt_s; int sm_arm_s = 2 * ipt_s * bw_s; int sm_sz_s = sm_cols_s + sm_arm_s + 1; int sm_padding_s = 0; int ipt_s_v = 2; size_t bw_s_v = num_rows / ipt_s_v; size_t bh_s_v = 1; size_t gw_s_v = (num_rows + bw_s_v - 1) / bw_s_v / ipt_s_v; size_t gh_s_v = (num_cols + bh_s_v - 1) / bh_s_v; const dim3 block_sz_s_v(bw_s_v, bh_s_v, 1); const dim3 grid_sz_s_v(gw_s_v, gh_s_v, 1); int sm_cols_s_v = bw_s_v * ipt_s_v; int sm_arm_s_v = 2 * ipt_s_v * bw_s_v; int sm_sz_s_v = sm_cols_s_v + sm_arm_s_v + 1; int sm_padding_s_v = 0; hipLaunchKernelGGL(( ca_cross_hsum_kernel_3), dim3(grid_sz_s), dim3(block_sz_s), sizeof(float) * sm_sz_s, 0, d_cost, d_acost, d_cross, num_disp, num_rows, num_cols, sm_cols_s, sm_sz_s, sm_padding_s, ipt_s); hipDeviceSynchronize(); hipLaunchKernelGGL(( cost_transpose_kernel_4), dim3(grid_sz_t), dim3(block_sz_t), 0, 0, d_acost, d_cost, num_disp, num_rows, num_cols, sm_width, sm_width); hipDeviceSynchronize(); hipLaunchKernelGGL(( ca_cross_vhsum_kernel_2), dim3(grid_sz_s_v), dim3(block_sz_s_v), sizeof(float) * sm_sz_s_v, 0, d_cost, d_acost, d_cross, num_disp, num_cols, num_rows, sm_cols_s_v, sm_sz_s_v, sm_padding_s_v, ipt_s_v); hipDeviceSynchronize(); hipLaunchKernelGGL(( ca_cross_vhsum_kernel_2), dim3(grid_sz_s_v), dim3(block_sz_s_v), sizeof(float) * sm_sz_s_v, 0, d_acost, d_cost, d_cross, num_disp, num_cols, num_rows, sm_cols_s_v, sm_sz_s_v, sm_padding_s_v, ipt_s_v); hipDeviceSynchronize(); hipLaunchKernelGGL(( cost_transpose_kernel_4), dim3(grid_sz_t_v), dim3(block_sz_t_v), 0, 0, d_cost, d_acost, num_disp, num_cols, num_rows, sm_width, sm_width); hipDeviceSynchronize(); hipLaunchKernelGGL(( ca_cross_hsum_kernel_3), dim3(grid_sz_s), dim3(block_sz_s), sizeof(float) * sm_sz_s, 0, d_acost, d_cost, d_cross, num_disp, num_rows, num_cols, sm_cols_s, sm_sz_s, sm_padding_s, ipt_s); hipDeviceSynchronize(); } void ca_cross(unsigned char* img, unsigned char **cross, float** cost, float** acost, float ucd, float lcd, int usd, int lsd, int num_disp, int num_rows, int num_cols, int elem_sz) { cudaEventPair_t timer; /////////////////////// // DEVICE PARAMETERS // /////////////////////// size_t bw = num_cols; size_t bh = 1; size_t gw = (num_cols + bw - 1) / bw; size_t gh = (num_rows + bh - 1) / bh; const dim3 block_sz(bw, bh, 1); const dim3 grid_sz(gw, gh, 1); //////////////////////// // CROSS CONSTRUCTION // //////////////////////// unsigned char* d_img; checkCudaError(hipMalloc(&d_img, sizeof(unsigned char) * num_rows * num_cols * elem_sz)); checkCudaError(hipMemcpy(d_img, img, sizeof(unsigned char) * num_rows * num_cols * elem_sz, hipMemcpyHostToDevice)); unsigned char** d_cross; checkCudaError(hipMalloc(&d_cross, sizeof(unsigned char*) * CROSS_ARM_COUNT)); unsigned char** h_cross = (unsigned char**) malloc(sizeof(unsigned char*) * CROSS_ARM_COUNT); for (int i = 0; i < CROSS_ARM_COUNT; ++i) checkCudaError(hipMalloc(&h_cross[i], sizeof(unsigned char) * num_rows * num_cols)); checkCudaError(hipMemcpy(d_cross, h_cross, sizeof(unsigned char*) * CROSS_ARM_COUNT, hipMemcpyHostToDevice)); // Launch kernel startCudaTimer(&timer); hipLaunchKernelGGL(( ca_cross_construction_kernel), dim3(grid_sz), dim3(block_sz), 0, 0, d_img, d_cross, ucd, lcd, usd, lsd, num_rows, num_cols, elem_sz); stopCudaTimer(&timer, "Cross Aggragation - Cross Construciton Kernel"); for (int i = 0; i < CROSS_ARM_COUNT; ++i) checkCudaError(hipMemcpy(cross[i], h_cross[i], sizeof(unsigned char) * num_rows * num_cols, hipMemcpyDeviceToHost)); /////////////////////////// // CROSS-AGGRAGATE COSTS // /////////////////////////// float** d_cost; checkCudaError(hipMalloc(&d_cost, sizeof(float*) * num_disp)); float** h_cost = (float**) malloc(sizeof(float*) * num_disp); for (int d = 0; d < num_disp; ++d) { checkCudaError(hipMalloc(&h_cost[d], sizeof(float) * num_rows * num_cols)); checkCudaError(hipMemcpy(h_cost[d], cost[d], sizeof(float) * num_rows * num_cols, hipMemcpyHostToDevice)); } checkCudaError(hipMemcpy(d_cost, h_cost, sizeof(float*) * num_disp, hipMemcpyHostToDevice)); float** d_acost; checkCudaError(hipMalloc(&d_acost, sizeof(float*) * num_disp)); float** h_acost = (float**) malloc(sizeof(float*) * num_disp); for (int d = 0; d < num_disp; ++d) { checkCudaError(hipMalloc(&h_acost[d], sizeof(float) * num_rows * num_cols)); } checkCudaError(hipMemcpy(d_acost, h_acost, sizeof(float*) * num_disp, hipMemcpyHostToDevice)); int sm_cols = bw + 2 * usd; int sm_sz = sm_cols * bh; int sm_padding = usd; size_t bw_t = 32; size_t bh_t = 8; size_t gw_t = (num_cols + bw_t - 1) / bw_t; size_t gh_t = (num_rows + bh_t - 1) / bh_t / 4; const dim3 block_sz_t(bw_t, bh_t, 1); const dim3 grid_sz_t(gw_t, gh_t, 1); size_t bw_t_v = 32; size_t bh_t_v = 8; size_t gw_t_v = (num_rows + bw_t_v - 1) / bw_t_v; size_t gh_t_v = (num_cols + bh_t_v - 1) / bh_t_v / 4; const dim3 block_sz_t_v(bw_t_v, bh_t_v, 1); const dim3 grid_sz_t_v(gw_t_v, gh_t_v, 1); int sm_width= 32; int ipt_s = 2; size_t bw_s = num_cols / ipt_s; size_t bh_s = 1; size_t gw_s = (num_cols + bw_s - 1) / bw_s / ipt_s; size_t gh_s = (num_rows + bh_s - 1) / bh_s; const dim3 block_sz_s(bw_s, bh_s, 1); const dim3 grid_sz_s(gw_s, gh_s, 1); int sm_cols_s = bw_s * ipt_s; int sm_arm_s = 2 * ipt_s * bw_s; int sm_sz_s = sm_cols_s + sm_arm_s + 1; int sm_padding_s = 0; int ipt_s_v = 2; size_t bw_s_v = num_rows / ipt_s_v; size_t bh_s_v = 1; size_t gw_s_v = (num_rows + bw_s_v - 1) / bw_s_v / ipt_s_v; size_t gh_s_v = (num_cols + bh_s_v - 1) / bh_s_v; const dim3 block_sz_s_v(bw_s_v, bh_s_v, 1); const dim3 grid_sz_s_v(gw_s_v, gh_s_v, 1); int sm_cols_s_v = bw_s_v * ipt_s_v; int sm_arm_s_v = 2 * ipt_s_v * bw_s_v; int sm_sz_s_v = sm_cols_s_v + sm_arm_s_v + 1; int sm_padding_s_v = 0; startCudaTimer(&timer); hipLaunchKernelGGL(( ca_cross_hsum_kernel_3), dim3(grid_sz_s), dim3(block_sz_s), sizeof(float) * sm_sz_s, 0, d_cost, d_acost, d_cross, num_disp, num_rows, num_cols, sm_cols_s, sm_sz_s, sm_padding_s, ipt_s); stopCudaTimer(&timer, "Cross Horizontal Sum #3"); startCudaTimer(&timer); hipLaunchKernelGGL(( cost_transpose_kernel_4), dim3(grid_sz_t), dim3(block_sz_t), 0, 0, d_acost, d_cost, num_disp, num_rows, num_cols, sm_width, sm_width); stopCudaTimer(&timer, "Cost Transpose Kernel #4"); startCudaTimer(&timer); hipLaunchKernelGGL(( ca_cross_vhsum_kernel_2), dim3(grid_sz_s_v), dim3(block_sz_s_v), sizeof(float) * sm_sz_s_v, 0, d_cost, d_acost, d_cross, num_disp, num_cols, num_rows, sm_cols_s_v, sm_sz_s_v, sm_padding_s_v, ipt_s_v); stopCudaTimer(&timer, "Cross Horizontal Transposed Sum Kernel #2"); startCudaTimer(&timer); hipLaunchKernelGGL(( ca_cross_vhsum_kernel_2), dim3(grid_sz_s_v), dim3(block_sz_s_v), sizeof(float) * sm_sz_s_v, 0, d_acost, d_cost, d_cross, num_disp, num_cols, num_rows, sm_cols_s_v, sm_sz_s_v, sm_padding_s_v, ipt_s_v); stopCudaTimer(&timer, "Cross Horizontal Transposed Sum Kernel"); startCudaTimer(&timer); hipLaunchKernelGGL(( cost_transpose_kernel_4), dim3(grid_sz_t_v), dim3(block_sz_t_v), 0, 0, d_cost, d_acost, num_disp, num_cols, num_rows, sm_width, sm_width); stopCudaTimer(&timer, "Cost Transpose Kernel #4"); startCudaTimer(&timer); hipLaunchKernelGGL(( ca_cross_hsum_kernel_3), dim3(grid_sz_s), dim3(block_sz_s), sizeof(float) * sm_sz_s, 0, d_acost, d_cost, d_cross, num_disp, num_rows, num_cols, sm_cols_s, sm_sz_s, sm_padding_s, ipt_s); stopCudaTimer(&timer, "Cross Horizontal Sum #3"); for (int d = 0; d < num_disp; ++d) { checkCudaError(hipMemcpy(acost[d], h_cost[d], sizeof(float) * num_cols * num_rows, hipMemcpyDeviceToHost)); } /////////////////// // DE-ALLOCATION // /////////////////// hipFree(d_img); hipFree(d_cross); hipFree(d_cost); hipFree(d_acost); for (int d = 0; d < num_disp; ++d) { hipFree(h_cost[d]); hipFree(h_acost[d]); } for (int i = 0; i < CROSS_ARM_COUNT; ++i) { hipFree(h_cross[i]); } free(h_cost); free(h_acost); free(h_cross); } #endif
5ea192e58664569c98c295812354df8b51dbfadd.cu
#ifndef D_CA_CROSS_KERNEL #define D_CA_CROSS_KERNEL #include "d_ca_cross.h" #include "cuda_utils.h" #include <math.h> #define CROSS_ARM_COUNT 4 typedef enum { CROSS_ARM_UP = 0, CROSS_ARM_DOWN, CROSS_ARM_LEFT, CROSS_ARM_RIGHT } cross_arm_e; __global__ void ca_cross_construction_kernel(unsigned char* img, unsigned char** cross, float ucd, float lcd, int usd, int lsd, int num_rows, int num_cols, int elem_sz) { int tx = threadIdx.x + blockIdx.x * blockDim.x; int ty = threadIdx.y + blockIdx.y * blockDim.y; if ((tx > num_cols - 1) || (ty > num_rows - 1)) return; int a_color_b = (int) img[(tx + ty * num_cols) * elem_sz]; int a_color_g = (int) img[(tx + ty * num_cols) * elem_sz + 1]; int a_color_r = (int) img[(tx + ty * num_cols) * elem_sz + 2]; cross[CROSS_ARM_UP][tx + ty * num_cols] = 0; cross[CROSS_ARM_DOWN][tx + ty * num_cols] = 0; cross[CROSS_ARM_LEFT][tx + ty * num_cols] = 0; cross[CROSS_ARM_RIGHT][tx + ty * num_cols] = 0; int p_color_b = a_color_b; int p_color_g = a_color_g; int p_color_r = a_color_r; // Upper arm for (int y = 1; y <= usd; ++y) { if (ty - y < 0) break; cross[CROSS_ARM_UP][tx + ty * num_cols] = (unsigned char) y; int c_color_b = (int) img[(tx + (ty - y) * num_cols) * elem_sz]; int c_color_g = (int) img[(tx + (ty - y) * num_cols) * elem_sz + 1]; int c_color_r = (int) img[(tx + (ty - y) * num_cols) * elem_sz + 2]; int ac_mad = max(max(abs(c_color_b - a_color_b), abs(c_color_g - a_color_g)), abs(c_color_r - a_color_r)); int cp_mad = max(max(abs(c_color_b - p_color_b), abs(c_color_g - p_color_g)), abs(c_color_r - p_color_r)); if (y > lsd) { if ((float) ac_mad > ucd) break; } else { if ((float) ac_mad > lcd || (float) cp_mad > lcd) break; } p_color_b = c_color_b; p_color_g = c_color_g; p_color_r = c_color_r; } p_color_b = a_color_b; p_color_g = a_color_g; p_color_r = a_color_r; // Down arm for (int y = 1; y <= usd; ++y) { if (ty + y > num_rows - 1) break; cross[CROSS_ARM_DOWN][tx + ty * num_cols] = (unsigned char) y; int c_color_b = (int) img[(tx + (ty + y) * num_cols) * elem_sz]; int c_color_g = (int) img[(tx + (ty + y) * num_cols) * elem_sz + 1]; int c_color_r = (int) img[(tx + (ty + y) * num_cols) * elem_sz + 2]; int ac_mad = max(max(abs(c_color_b - a_color_b), abs(c_color_g - a_color_g)), abs(c_color_r - a_color_r)); int cp_mad = max(max(abs(c_color_b - p_color_b), abs(c_color_g - p_color_g)), abs(c_color_r - p_color_r)); if (y > lsd) { if ((float) ac_mad > ucd) break; } else { if ((float) ac_mad > lcd || (float) cp_mad > lcd) break; } p_color_b = c_color_b; p_color_g = c_color_g; p_color_r = c_color_r; } p_color_b = a_color_b; p_color_g = a_color_g; p_color_r = a_color_r; // Left arm for (int x = 1; x <= usd; ++x) { if (tx - x < 0) break; cross[CROSS_ARM_LEFT][tx + ty * num_cols] = (unsigned char) x; int c_color_b = (int) img[(tx - x + ty * num_cols) * elem_sz]; int c_color_g = (int) img[(tx - x + ty * num_cols) * elem_sz + 1]; int c_color_r = (int) img[(tx - x + ty * num_cols) * elem_sz + 2]; int ac_mad = max(max(abs(c_color_b - a_color_b), abs(c_color_g - a_color_g)), abs(c_color_r - a_color_r)); int cp_mad = max(max(abs(c_color_b - p_color_b), abs(c_color_g - p_color_g)), abs(c_color_r - p_color_r)); if (x > lsd) { if ((float) ac_mad > ucd) break; } else { if ((float) ac_mad > lcd || (float) cp_mad > lcd) break; } p_color_b = c_color_b; p_color_g = c_color_g; p_color_r = c_color_r; } p_color_b = a_color_b; p_color_g = a_color_g; p_color_r = a_color_r; // Right arm for (int x = 1; x <= usd; ++x) { if (tx + x > num_cols - 1) break; cross[CROSS_ARM_RIGHT][tx + ty * num_cols] = (unsigned char) x; int c_color_b = (int) img[(tx + x + ty * num_cols) * elem_sz]; int c_color_g = (int) img[(tx + x + ty * num_cols) * elem_sz + 1]; int c_color_r = (int) img[(tx + x + ty * num_cols) * elem_sz + 2]; int ac_mad = max(max(abs(c_color_b - a_color_b), abs(c_color_g - a_color_g)), abs(c_color_r - a_color_r)); int cp_mad = max(max(abs(c_color_b - p_color_b), abs(c_color_g - p_color_g)), abs(c_color_r - p_color_r)); if (x > lsd) { if ((float) ac_mad > ucd) break; } else { if ((float) ac_mad > lcd || (float) cp_mad > lcd) break; } p_color_b = c_color_b; p_color_g = c_color_g; p_color_r = c_color_r; } } void d_ca_cross(unsigned char* d_img, float** d_cost, float** d_acost, float** h_acost, float *d_acost_memory, unsigned char **d_cross, float ucd, float lcd, int usd, int lsd, int num_disp, int num_rows, int num_cols, int elem_sz) { //////////////// // PARAMETERS // //////////////// size_t img_sz = num_rows * num_cols; //////////////////////// // CROSS CONSTRUCTION // //////////////////////// size_t bw = num_cols; size_t bh = 1; size_t gw = (num_cols + bw - 1) / bw; size_t gh = (num_rows + bh - 1) / bh; const dim3 block_sz(bw, bh, 1); const dim3 grid_sz(gw, gh, 1); ca_cross_construction_kernel<<<grid_sz, block_sz>>>(d_img, d_cross, ucd, lcd, usd, lsd, num_rows, num_cols, elem_sz); cudaDeviceSynchronize(); /////////////////////////// // CROSS-AGGRAGATE COSTS // /////////////////////////// int sm_cols = bw; int sm_sz = sm_cols * bh; int sm_padding = 0; for (int d = 0; d < num_disp; ++d) h_acost[d] = d_acost_memory + (d * img_sz); checkCudaError(cudaMemcpy(d_acost, h_acost, sizeof(float*) * num_disp, cudaMemcpyHostToDevice)); size_t bw_t = 32; size_t bh_t = 8; size_t gw_t = (num_cols + bw_t - 1) / bw_t; size_t gh_t = (num_rows + bh_t - 1) / bh_t / 4; const dim3 block_sz_t(bw_t, bh_t, 1); const dim3 grid_sz_t(gw_t, gh_t, 1); size_t bw_t_v = 32; size_t bh_t_v = 8; size_t gw_t_v = (num_rows + bw_t_v - 1) / bw_t_v; size_t gh_t_v = (num_cols + bh_t_v - 1) / bh_t_v / 4; const dim3 block_sz_t_v(bw_t_v, bh_t_v, 1); const dim3 grid_sz_t_v(gw_t_v, gh_t_v, 1); int sm_width = 32; int ipt_s = 2; size_t bw_s = num_cols / ipt_s; size_t bh_s = 1; size_t gw_s = (num_cols + bw_s - 1) / bw_s / ipt_s; size_t gh_s = (num_rows + bh_s - 1) / bh_s; const dim3 block_sz_s(bw_s, bh_s, 1); const dim3 grid_sz_s(gw_s, gh_s, 1); int sm_cols_s = bw_s * ipt_s; int sm_arm_s = 2 * ipt_s * bw_s; int sm_sz_s = sm_cols_s + sm_arm_s + 1; int sm_padding_s = 0; int ipt_s_v = 2; size_t bw_s_v = num_rows / ipt_s_v; size_t bh_s_v = 1; size_t gw_s_v = (num_rows + bw_s_v - 1) / bw_s_v / ipt_s_v; size_t gh_s_v = (num_cols + bh_s_v - 1) / bh_s_v; const dim3 block_sz_s_v(bw_s_v, bh_s_v, 1); const dim3 grid_sz_s_v(gw_s_v, gh_s_v, 1); int sm_cols_s_v = bw_s_v * ipt_s_v; int sm_arm_s_v = 2 * ipt_s_v * bw_s_v; int sm_sz_s_v = sm_cols_s_v + sm_arm_s_v + 1; int sm_padding_s_v = 0; ca_cross_hsum_kernel_3<<<grid_sz_s, block_sz_s, sizeof(float) * sm_sz_s>>>(d_cost, d_acost, d_cross, num_disp, num_rows, num_cols, sm_cols_s, sm_sz_s, sm_padding_s, ipt_s); cudaDeviceSynchronize(); cost_transpose_kernel_4<<<grid_sz_t, block_sz_t>>>(d_acost, d_cost, num_disp, num_rows, num_cols, sm_width, sm_width); cudaDeviceSynchronize(); ca_cross_vhsum_kernel_2<<<grid_sz_s_v, block_sz_s_v, sizeof(float) * sm_sz_s_v>>>(d_cost, d_acost, d_cross, num_disp, num_cols, num_rows, sm_cols_s_v, sm_sz_s_v, sm_padding_s_v, ipt_s_v); cudaDeviceSynchronize(); ca_cross_vhsum_kernel_2<<<grid_sz_s_v, block_sz_s_v, sizeof(float) * sm_sz_s_v>>>(d_acost, d_cost, d_cross, num_disp, num_cols, num_rows, sm_cols_s_v, sm_sz_s_v, sm_padding_s_v, ipt_s_v); cudaDeviceSynchronize(); cost_transpose_kernel_4<<<grid_sz_t_v, block_sz_t_v>>>(d_cost, d_acost, num_disp, num_cols, num_rows, sm_width, sm_width); cudaDeviceSynchronize(); ca_cross_hsum_kernel_3<<<grid_sz_s, block_sz_s, sizeof(float) * sm_sz_s>>>(d_acost, d_cost, d_cross, num_disp, num_rows, num_cols, sm_cols_s, sm_sz_s, sm_padding_s, ipt_s); cudaDeviceSynchronize(); } void ca_cross(unsigned char* img, unsigned char **cross, float** cost, float** acost, float ucd, float lcd, int usd, int lsd, int num_disp, int num_rows, int num_cols, int elem_sz) { cudaEventPair_t timer; /////////////////////// // DEVICE PARAMETERS // /////////////////////// size_t bw = num_cols; size_t bh = 1; size_t gw = (num_cols + bw - 1) / bw; size_t gh = (num_rows + bh - 1) / bh; const dim3 block_sz(bw, bh, 1); const dim3 grid_sz(gw, gh, 1); //////////////////////// // CROSS CONSTRUCTION // //////////////////////// unsigned char* d_img; checkCudaError(cudaMalloc(&d_img, sizeof(unsigned char) * num_rows * num_cols * elem_sz)); checkCudaError(cudaMemcpy(d_img, img, sizeof(unsigned char) * num_rows * num_cols * elem_sz, cudaMemcpyHostToDevice)); unsigned char** d_cross; checkCudaError(cudaMalloc(&d_cross, sizeof(unsigned char*) * CROSS_ARM_COUNT)); unsigned char** h_cross = (unsigned char**) malloc(sizeof(unsigned char*) * CROSS_ARM_COUNT); for (int i = 0; i < CROSS_ARM_COUNT; ++i) checkCudaError(cudaMalloc(&h_cross[i], sizeof(unsigned char) * num_rows * num_cols)); checkCudaError(cudaMemcpy(d_cross, h_cross, sizeof(unsigned char*) * CROSS_ARM_COUNT, cudaMemcpyHostToDevice)); // Launch kernel startCudaTimer(&timer); ca_cross_construction_kernel<<<grid_sz, block_sz>>>(d_img, d_cross, ucd, lcd, usd, lsd, num_rows, num_cols, elem_sz); stopCudaTimer(&timer, "Cross Aggragation - Cross Construciton Kernel"); for (int i = 0; i < CROSS_ARM_COUNT; ++i) checkCudaError(cudaMemcpy(cross[i], h_cross[i], sizeof(unsigned char) * num_rows * num_cols, cudaMemcpyDeviceToHost)); /////////////////////////// // CROSS-AGGRAGATE COSTS // /////////////////////////// float** d_cost; checkCudaError(cudaMalloc(&d_cost, sizeof(float*) * num_disp)); float** h_cost = (float**) malloc(sizeof(float*) * num_disp); for (int d = 0; d < num_disp; ++d) { checkCudaError(cudaMalloc(&h_cost[d], sizeof(float) * num_rows * num_cols)); checkCudaError(cudaMemcpy(h_cost[d], cost[d], sizeof(float) * num_rows * num_cols, cudaMemcpyHostToDevice)); } checkCudaError(cudaMemcpy(d_cost, h_cost, sizeof(float*) * num_disp, cudaMemcpyHostToDevice)); float** d_acost; checkCudaError(cudaMalloc(&d_acost, sizeof(float*) * num_disp)); float** h_acost = (float**) malloc(sizeof(float*) * num_disp); for (int d = 0; d < num_disp; ++d) { checkCudaError(cudaMalloc(&h_acost[d], sizeof(float) * num_rows * num_cols)); } checkCudaError(cudaMemcpy(d_acost, h_acost, sizeof(float*) * num_disp, cudaMemcpyHostToDevice)); int sm_cols = bw + 2 * usd; int sm_sz = sm_cols * bh; int sm_padding = usd; size_t bw_t = 32; size_t bh_t = 8; size_t gw_t = (num_cols + bw_t - 1) / bw_t; size_t gh_t = (num_rows + bh_t - 1) / bh_t / 4; const dim3 block_sz_t(bw_t, bh_t, 1); const dim3 grid_sz_t(gw_t, gh_t, 1); size_t bw_t_v = 32; size_t bh_t_v = 8; size_t gw_t_v = (num_rows + bw_t_v - 1) / bw_t_v; size_t gh_t_v = (num_cols + bh_t_v - 1) / bh_t_v / 4; const dim3 block_sz_t_v(bw_t_v, bh_t_v, 1); const dim3 grid_sz_t_v(gw_t_v, gh_t_v, 1); int sm_width= 32; int ipt_s = 2; size_t bw_s = num_cols / ipt_s; size_t bh_s = 1; size_t gw_s = (num_cols + bw_s - 1) / bw_s / ipt_s; size_t gh_s = (num_rows + bh_s - 1) / bh_s; const dim3 block_sz_s(bw_s, bh_s, 1); const dim3 grid_sz_s(gw_s, gh_s, 1); int sm_cols_s = bw_s * ipt_s; int sm_arm_s = 2 * ipt_s * bw_s; int sm_sz_s = sm_cols_s + sm_arm_s + 1; int sm_padding_s = 0; int ipt_s_v = 2; size_t bw_s_v = num_rows / ipt_s_v; size_t bh_s_v = 1; size_t gw_s_v = (num_rows + bw_s_v - 1) / bw_s_v / ipt_s_v; size_t gh_s_v = (num_cols + bh_s_v - 1) / bh_s_v; const dim3 block_sz_s_v(bw_s_v, bh_s_v, 1); const dim3 grid_sz_s_v(gw_s_v, gh_s_v, 1); int sm_cols_s_v = bw_s_v * ipt_s_v; int sm_arm_s_v = 2 * ipt_s_v * bw_s_v; int sm_sz_s_v = sm_cols_s_v + sm_arm_s_v + 1; int sm_padding_s_v = 0; startCudaTimer(&timer); ca_cross_hsum_kernel_3<<<grid_sz_s, block_sz_s, sizeof(float) * sm_sz_s>>>(d_cost, d_acost, d_cross, num_disp, num_rows, num_cols, sm_cols_s, sm_sz_s, sm_padding_s, ipt_s); stopCudaTimer(&timer, "Cross Horizontal Sum #3"); startCudaTimer(&timer); cost_transpose_kernel_4<<<grid_sz_t, block_sz_t>>>(d_acost, d_cost, num_disp, num_rows, num_cols, sm_width, sm_width); stopCudaTimer(&timer, "Cost Transpose Kernel #4"); startCudaTimer(&timer); ca_cross_vhsum_kernel_2<<<grid_sz_s_v, block_sz_s_v, sizeof(float) * sm_sz_s_v>>>(d_cost, d_acost, d_cross, num_disp, num_cols, num_rows, sm_cols_s_v, sm_sz_s_v, sm_padding_s_v, ipt_s_v); stopCudaTimer(&timer, "Cross Horizontal Transposed Sum Kernel #2"); startCudaTimer(&timer); ca_cross_vhsum_kernel_2<<<grid_sz_s_v, block_sz_s_v, sizeof(float) * sm_sz_s_v>>>(d_acost, d_cost, d_cross, num_disp, num_cols, num_rows, sm_cols_s_v, sm_sz_s_v, sm_padding_s_v, ipt_s_v); stopCudaTimer(&timer, "Cross Horizontal Transposed Sum Kernel"); startCudaTimer(&timer); cost_transpose_kernel_4<<<grid_sz_t_v, block_sz_t_v>>>(d_cost, d_acost, num_disp, num_cols, num_rows, sm_width, sm_width); stopCudaTimer(&timer, "Cost Transpose Kernel #4"); startCudaTimer(&timer); ca_cross_hsum_kernel_3<<<grid_sz_s, block_sz_s, sizeof(float) * sm_sz_s>>>(d_acost, d_cost, d_cross, num_disp, num_rows, num_cols, sm_cols_s, sm_sz_s, sm_padding_s, ipt_s); stopCudaTimer(&timer, "Cross Horizontal Sum #3"); for (int d = 0; d < num_disp; ++d) { checkCudaError(cudaMemcpy(acost[d], h_cost[d], sizeof(float) * num_cols * num_rows, cudaMemcpyDeviceToHost)); } /////////////////// // DE-ALLOCATION // /////////////////// cudaFree(d_img); cudaFree(d_cross); cudaFree(d_cost); cudaFree(d_acost); for (int d = 0; d < num_disp; ++d) { cudaFree(h_cost[d]); cudaFree(h_acost[d]); } for (int i = 0; i < CROSS_ARM_COUNT; ++i) { cudaFree(h_cross[i]); } free(h_cost); free(h_acost); free(h_cross); } #endif
76541c2becec2bf896adf96581d3b4f3ced06241.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "parameters.cuh" #include <thrust/reduce.h> #include <thrust/execution_policy.h> inline CUDA_CALLABLE_MEMBER point crossmul(point a, point b) { return point(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x); } inline CUDA_CALLABLE_MEMBER point stochastic_reflection(point rf, point rs, double mass_fl, double kbt, point len, int *iv, int *seed, int *idum, int *iy) { double m_beta = mass_fl/kbt, random_e = power(1 - ran(iv, seed, idum, iy), 2), val, v[4], x[4], z = 2; point un, ut, t, n; n = rs/sqrt((rs*rs).sum()); val = sqrt(-log(random_e)/m_beta); un = n*val; t = img((t.random(iv, seed, idum, iy)*len - rf), len); ut = crossmul(un, t); ut = ut/sqrt((ut*ut).sum()); while(z > 1) { x[1] = 2.0 * ran(iv, seed, idum, iy) - 1, x[2] = 2.0 * ran(iv, seed, idum, iy) - 1; z = x[1]*x[1] + x[2]*x[2]; } z = sqrt((-2.0*log(z))/z); v[1] = x[1]*z*sqrt(kbt/mass_fl); v[2] = x[2]*z*sqrt(kbt/mass_fl); return ut*v[1] + un; } __global__ void d_fluid_colloid_collision(int *no_neigh, point *pos_colloid, point *pos_fl, point *vel_colloid, point *ang_vel_colloid, point *dump_vel_fl, point **u, double mass_colloid, double I_colloid, double mass_fl, double dt, point *vel_fl, point len, double sigma, int no_of_colloid, double kbt, int **neigh_fl, point **vc, point **om) { point rr, rs, uu; int j = blockIdx.y*blockDim.y + threadIdx.y + 1; int i = blockIdx.x*blockDim.x + threadIdx.x + 1; // for(int j = 1; j <= no_of_colloid; j++) { if(j <= no_of_colloid) { // for(int i = 1; i <= no_neigh[j]; i++) { vc[j][0] = om[j][0] = point(0, 0, 0); if(i <= no_neigh[j]) { vc[j][i] = om[j][i] = point(0, 0, 0); int l = neigh_fl[j][i]; rr = img(pos_colloid[j] - pos_fl[l], len); if((rr*rr).sum() <= pow(sigma, 2)*0.25) { pos_fl[l] = mod(pos_fl[l] - vel_fl[l]*dt* 0.5, len); rs = img(pos_fl[l] - pos_colloid[j], len); // uu = stochastic_reflection(pos_fl[l], rs, mass_fl, kbt, len, iv, seed, idum, iy); vel_fl[l] = u[j][i] + vel_colloid[j] + crossmul(ang_vel_colloid[j], rs); pos_fl[l] = mod(pos_fl[l] + vel_fl[l]*dt*0.5, len); point t1 = dump_vel_fl[l] - vel_fl[l], t2; t2 = crossmul(rs, t1); vc[j][i] = t1; om[j][i] = t2; } } } } __global__ void d_dump(point *dump_vel_fl, point *vel_fl, int no_of_fluid) { int i = blockIdx.x*blockDim.x + threadIdx.x + 1; if(i <= no_of_fluid) dump_vel_fl[i] = vel_fl[i]; } struct add_point: public thrust::binary_function<point &, point &, point &> { CUDA_CALLABLE_MEMBER point operator()(const point &a, const point &b) { return point(a.x+b.x, a.y+b.y, a.z+b.z); } }; __global__ void update_fcc(point **vc, point **om, point *vel_colloid, point *ang_vel_colloid, int *no_neigh, int no_of_colloid, double mass_colloid, double mass_fl, double I_colloid) { int j = blockIdx.x*blockDim.x + threadIdx.x; if(j <= no_of_colloid) { vc[j][0] = thrust::reduce(thrust::seq, vc[j], vc[j] + no_neigh[j] + 1, point(0, 0, 0), add_point()); om[j][0] = thrust::reduce(thrust::seq, om[j], om[j] + no_neigh[j] + 1, point(0, 0, 0), add_point()); vel_colloid[j] += vc[j][0]*mass_fl/mass_colloid; ang_vel_colloid[j] += om[j][0]*mass_fl/I_colloid; } } void fluid_colloid_collision() { point rr, rs; int thr = 256, blk = (no_of_fluid + thr - 1)/thr; hipLaunchKernelGGL(( d_dump), dim3(blk), dim3(thr), 0, 0, dump_vel_fl, vel_fl, no_of_fluid); hipDeviceSynchronize(); for(int j = 1; j <= no_of_colloid; j++) { for(int i = 1; i <= no_neigh[j]; i++) { int l = neigh_fl[j][i]; rr = img(pos_colloid[j] - pos_fl[l], len); if((rr*rr).sum() <= pow(sigma, 2)*0.25) { rs = img(mod(pos_fl[l] - vel_fl[l]*dt* 0.5, len)- pos_colloid[j], len); u[j][i] = stochastic_reflection( mod(pos_fl[l] - vel_fl[l]*dt* 0.5, len), rs, mass_fl, kbt, len, iv, seed, idum, iy); } } } dim3 thrs = dim3(32, 32), blks = dim3((10000 + thrs.x - 1)/thrs.x, (no_of_colloid + thrs.y - 1)/thrs.y); blk = (no_of_colloid + thr -1)/thr; hipLaunchKernelGGL(( d_fluid_colloid_collision), dim3(blks), dim3(thrs), 0, 0, no_neigh, pos_colloid, pos_fl, vel_colloid, ang_vel_colloid, dump_vel_fl, u, mass_colloid, I_colloid, mass_fl, dt, vel_fl, len, sigma, no_of_colloid, kbt, neigh_fl, vc, om); hipLaunchKernelGGL(( update_fcc), dim3(blk), dim3(thr), 0, 0, vc, om, vel_colloid, ang_vel_colloid, no_neigh, no_of_colloid, mass_colloid, mass_fl, I_colloid); }
76541c2becec2bf896adf96581d3b4f3ced06241.cu
#include "parameters.cuh" #include <thrust/reduce.h> #include <thrust/execution_policy.h> inline CUDA_CALLABLE_MEMBER point crossmul(point a, point b) { return point(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x); } inline CUDA_CALLABLE_MEMBER point stochastic_reflection(point rf, point rs, double mass_fl, double kbt, point len, int *iv, int *seed, int *idum, int *iy) { double m_beta = mass_fl/kbt, random_e = power(1 - ran(iv, seed, idum, iy), 2), val, v[4], x[4], z = 2; point un, ut, t, n; n = rs/sqrt((rs*rs).sum()); val = sqrt(-log(random_e)/m_beta); un = n*val; t = img((t.random(iv, seed, idum, iy)*len - rf), len); ut = crossmul(un, t); ut = ut/sqrt((ut*ut).sum()); while(z > 1) { x[1] = 2.0 * ran(iv, seed, idum, iy) - 1, x[2] = 2.0 * ran(iv, seed, idum, iy) - 1; z = x[1]*x[1] + x[2]*x[2]; } z = sqrt((-2.0*log(z))/z); v[1] = x[1]*z*sqrt(kbt/mass_fl); v[2] = x[2]*z*sqrt(kbt/mass_fl); return ut*v[1] + un; } __global__ void d_fluid_colloid_collision(int *no_neigh, point *pos_colloid, point *pos_fl, point *vel_colloid, point *ang_vel_colloid, point *dump_vel_fl, point **u, double mass_colloid, double I_colloid, double mass_fl, double dt, point *vel_fl, point len, double sigma, int no_of_colloid, double kbt, int **neigh_fl, point **vc, point **om) { point rr, rs, uu; int j = blockIdx.y*blockDim.y + threadIdx.y + 1; int i = blockIdx.x*blockDim.x + threadIdx.x + 1; // for(int j = 1; j <= no_of_colloid; j++) { if(j <= no_of_colloid) { // for(int i = 1; i <= no_neigh[j]; i++) { vc[j][0] = om[j][0] = point(0, 0, 0); if(i <= no_neigh[j]) { vc[j][i] = om[j][i] = point(0, 0, 0); int l = neigh_fl[j][i]; rr = img(pos_colloid[j] - pos_fl[l], len); if((rr*rr).sum() <= pow(sigma, 2)*0.25) { pos_fl[l] = mod(pos_fl[l] - vel_fl[l]*dt* 0.5, len); rs = img(pos_fl[l] - pos_colloid[j], len); // uu = stochastic_reflection(pos_fl[l], rs, mass_fl, kbt, len, iv, seed, idum, iy); vel_fl[l] = u[j][i] + vel_colloid[j] + crossmul(ang_vel_colloid[j], rs); pos_fl[l] = mod(pos_fl[l] + vel_fl[l]*dt*0.5, len); point t1 = dump_vel_fl[l] - vel_fl[l], t2; t2 = crossmul(rs, t1); vc[j][i] = t1; om[j][i] = t2; } } } } __global__ void d_dump(point *dump_vel_fl, point *vel_fl, int no_of_fluid) { int i = blockIdx.x*blockDim.x + threadIdx.x + 1; if(i <= no_of_fluid) dump_vel_fl[i] = vel_fl[i]; } struct add_point: public thrust::binary_function<point &, point &, point &> { CUDA_CALLABLE_MEMBER point operator()(const point &a, const point &b) { return point(a.x+b.x, a.y+b.y, a.z+b.z); } }; __global__ void update_fcc(point **vc, point **om, point *vel_colloid, point *ang_vel_colloid, int *no_neigh, int no_of_colloid, double mass_colloid, double mass_fl, double I_colloid) { int j = blockIdx.x*blockDim.x + threadIdx.x; if(j <= no_of_colloid) { vc[j][0] = thrust::reduce(thrust::seq, vc[j], vc[j] + no_neigh[j] + 1, point(0, 0, 0), add_point()); om[j][0] = thrust::reduce(thrust::seq, om[j], om[j] + no_neigh[j] + 1, point(0, 0, 0), add_point()); vel_colloid[j] += vc[j][0]*mass_fl/mass_colloid; ang_vel_colloid[j] += om[j][0]*mass_fl/I_colloid; } } void fluid_colloid_collision() { point rr, rs; int thr = 256, blk = (no_of_fluid + thr - 1)/thr; d_dump<<<blk, thr>>> (dump_vel_fl, vel_fl, no_of_fluid); cudaDeviceSynchronize(); for(int j = 1; j <= no_of_colloid; j++) { for(int i = 1; i <= no_neigh[j]; i++) { int l = neigh_fl[j][i]; rr = img(pos_colloid[j] - pos_fl[l], len); if((rr*rr).sum() <= pow(sigma, 2)*0.25) { rs = img(mod(pos_fl[l] - vel_fl[l]*dt* 0.5, len)- pos_colloid[j], len); u[j][i] = stochastic_reflection( mod(pos_fl[l] - vel_fl[l]*dt* 0.5, len), rs, mass_fl, kbt, len, iv, seed, idum, iy); } } } dim3 thrs = dim3(32, 32), blks = dim3((10000 + thrs.x - 1)/thrs.x, (no_of_colloid + thrs.y - 1)/thrs.y); blk = (no_of_colloid + thr -1)/thr; d_fluid_colloid_collision<<<blks, thrs>>>(no_neigh, pos_colloid, pos_fl, vel_colloid, ang_vel_colloid, dump_vel_fl, u, mass_colloid, I_colloid, mass_fl, dt, vel_fl, len, sigma, no_of_colloid, kbt, neigh_fl, vc, om); update_fcc<<<blk, thr>>>(vc, om, vel_colloid, ang_vel_colloid, no_neigh, no_of_colloid, mass_colloid, mass_fl, I_colloid); }
f7993c99b357f9ba2248db577d18ce3354625872.hip
// !!! This is a file automatically generated by hipify!!! /* For DIRECTED GRAPH */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <iostream> #include <vector> #include <unordered_map> #include <string> #include <algorithm> #define MAX_NODE 100000000 #define DEBUG 0 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; class Node{ public: unsigned int val; vector<unsigned int> weights; vector<Node*> Edges; Node(int val){ this->val = val; } void addEdge(Node* v,unsigned int w){ this->Edges.push_back(v); this->weights.push_back(w); } }; void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c); void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph, int* diffOff,int* diffEdges,unsigned int* diffWeight ); void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int& del_size); void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int E, int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size, int* mOffset,int* mEdges,unsigned int* mWeight); // __device__ volatile int Cx[MAX_NODE]; __device__ volatile int PQ[MAX_NODE]; //K in parallel __global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* Cx,int* openList,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<K && PQ_size[id]>0){ //extract min from PQ int front = id* ( (N+K-1)/K ); int node = PQ[front]; // restructure the heap PQ[front]=PQ[front+PQ_size[id]-1]; PQ_size[id]-=1; int pqIndex = 0; while(2*pqIndex+1 < PQ_size[id]){ if(2*pqIndex+2 >= PQ_size[id]){ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else break; } else{ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){ int swap = PQ[front + 2*pqIndex+2]; PQ[front + 2*pqIndex+2] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+2; } else{ break; } } } //removed from openList openList[node] = -1; //added to expand next int len = atomicAdd(expandNodes_size,1); expandNodes[len]=node; } } //for K in parallel __global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent,volatile int* Cx, int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList, int N,int E, int K,int dest,int* nVFlag,int* PQ_size, int flagDiff,int* diff_off,int* diff_edge,unsigned int* diff_weight,int dE ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id< *expandNodes_size ){ int node = expandNodes[id]; //reach dest if(node == dest){ atomicOr(flagfound,1); } // expand int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start < end){ int child = edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(leaveLoop==false){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(openList[child]==-1){ nVFlag[child]=1; //add only once } } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } //diff expand if(flagDiff){ start = diff_off[node]; end = dE; if(node!=N-1) end = diff_off[node+1]; while(start<end){ int child = diff_edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) bool leaveLoop = false; while(!leaveLoop){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(openList[child]==-1){ nVFlag[child]=1; //add only once } } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } } //end diff }//end } //K in parallel -- O(N) __global__ void keepHeapPQ(int* PQ_size,int* Cx,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K && PQ_size[id] > 0){ int front = id*( (N+K-1)/K ); int size = PQ_size[id]; for(int i=front;i<front+size;i++){ if(2*i+2 < front+size){ int cost = Cx[PQ[i]]; int costLeft = Cx[PQ[2*i+1]]; int costRight = Cx[PQ[2*i+2]]; if( cost > costLeft || cost > costRight ){ int index ; if(costLeft <= costRight) index = 2*i+1; else index = 2*i+2; while(index > front){ if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ int swap = PQ[index]; PQ[index] = PQ[(index-1)/2]; PQ[(index-1)/2] = swap; index = (index-1)/2; } else break; } } } else if(2*i+1 < front+size){ if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){ int index = 2*i+1; while(index > front){ if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ int swap = PQ[index]; PQ[index] = PQ[(index-1)/2]; PQ[(index-1)/2] = swap; index = (index-1)/2; } else break; } } } } } } //N threads __global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < N){ if(nextFlag[id]==1){ int index = atomicAdd(nvSize,1); nextV[index]=id; } } } //for K in parallel __global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int* Cx,int K,int N,int* openList){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K){ int front = id*( (N+K-1)/K ); int i = id; while(i<*nVsize){ //if not already present if(openList[nextV[i]]!=-1){ i+=K; continue; } PQ[front+PQS[id]]= nextV[i]; PQS[id]+=1; //add in openList openList[nextV[i]] = id; if(PQS[id]>1){ int index = PQS[id]-1; while(index>0){ if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ int swap = PQ[front+index]; PQ[front+index]=PQ[front+ (index-1)/2]; PQ[front+ (index-1)/2] = swap; index = (index-1)/2; } else break; } } i += K; } } } //for K in parallel __global__ void checkMIN(int* PQ_size,int* flagEnd,int* Cx,int dest,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K && PQ_size[id] > 0 ){ int front = id* ( (N+K-1)/K ); int node = PQ[front]; //check if atleast one min, dont end the a* if( Cx[node] < Cx[dest] ){ atomicAnd(flagEnd,0); } } } __global__ void getCx(int* Cx,int dest,int* val){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id==0){ *val = Cx[dest]; } } int main(){ //the K PQ int K ; scanf("%d\n",&K); int startNode,endNode; scanf("%d %d",&startNode,&endNode); FILE* fgraph = fopen("graph.txt","r"); int N,E; fscanf(fgraph,"%d %d\n",&N,&E); int* H_offset = (int*)malloc(sizeof(int)*N); int* H_edges = (int*)malloc(sizeof(int)*E); unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E); int* H_hx = (int*)malloc(sizeof(int)*N); int* H_cx = (int*)malloc(sizeof(int)*N); int* H_parent = (int*)malloc(sizeof(int)*N); int* H_parent_old = (int*)malloc(sizeof(int)*N); int* H_PQ = (int*)malloc(sizeof(int)*N); int* H_openList = (int*)malloc(sizeof(int)*N); int* H_PQ_size = (int*)malloc(sizeof(int)*K); //for cost of endNode int* H_dest_cost = (int*)malloc(sizeof(int)); memset(H_PQ_size,0,sizeof(int)*K); memset(H_openList,-1,sizeof(int)*N); //init cx for(int i=0;i<N;i++){ H_cx[i]=INT_MAX; H_parent[i]=-1; } for(int i=0;i<E;i++){ fscanf(fgraph,"%d",&H_edges[i]); } for(int i=0;i<N;i++){ fscanf(fgraph,"%d",&H_offset[i]); } for(int i=0;i<E;i++){ fscanf(fgraph,"%u",&H_weight[i]); } FILE* fhx = fopen("Hx.txt","r"); for(int i=0;i<N;i++){ int temp; fscanf(fhx,"%d",&temp); if(temp!=-1) H_hx[i]= temp; else H_hx[i] = 0; //to change } fclose(fgraph); fclose(fhx); printf("[INFO] completed taking input\n"); //init Host var int* H_flagEnd = (int*)malloc(sizeof(int)); int* H_flagfound = (int*)malloc(sizeof(int)); int* H_a0 = (int*)malloc(sizeof(int)); int* H_nV_size = (int*)malloc(sizeof(int)); int* H_nV = (int*)malloc(sizeof(int)*N); //required coz if many tries to add same in diff threads high low lower int* H_nVFlag = (int*)malloc(sizeof(int)*N); memset(H_nVFlag,-1,sizeof(int)*N); *H_flagEnd = 0; *H_flagfound = 0; *H_a0 = 0; //insert startNode in PQ[0] H_cx[startNode]=H_hx[startNode]; H_PQ[0]=startNode; H_PQ_size[0]=1; H_openList[startNode]=0; //create events to record runtime hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //graph struture int* D_offset; int* D_edges ; unsigned int* D_weight; int* D_hx; int* D_parent; //for reading the ancessostor to avoid lock for write after read. int* D_parent_old; //Priority queue size int* D_PQ_size; //CX int* D_Cx; //flag if in openList(contains which PQ) int* D_openList; //lock for nodes int* D_lock; //Diff structure int* D_diff_edges; int* D_diff_offset; unsigned int* D_diff_weight; //next nodes flag int* D_nVFlag; //next nodes array to insert PQ int* D_nV; int* D_nV_size; //nodes to be expanded ( extracted from PQ ) int* D_expandNodes; int* D_expandNodes_size; //flag to end while loop and found the destination int* D_flagEnd; int* D_flagfound; //cost of endNode int* D_dest_cost; gpuErrchk ( hipMalloc(&D_offset,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_edges,sizeof(int)*E) ); gpuErrchk ( hipMalloc(&D_weight,sizeof(unsigned int)*E) ); gpuErrchk ( hipMalloc(&D_hx,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_parent,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_parent_old,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_Cx,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_PQ_size,sizeof(int)*K) ); gpuErrchk ( hipMalloc(&D_openList,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_lock,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_dest_cost,sizeof(int)) ); //for next set of vertices to add in PQ gpuErrchk ( hipMalloc(&D_nV,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_nV_size,sizeof(int)) ); gpuErrchk ( hipMalloc(&D_nVFlag,sizeof(int)*N) ); //next nodes to expand gpuErrchk ( hipMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K gpuErrchk ( hipMalloc(&D_expandNodes_size,sizeof(int)) ); //flag to end search gpuErrchk( hipMalloc(&D_flagEnd,sizeof(int)) ); gpuErrchk( hipMalloc(&D_flagfound,sizeof(int)) ); gpuErrchk ( hipMemcpy(D_offset,H_offset,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_edges,H_edges,sizeof(int)*E,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_hx,H_hx,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_parent,H_parent,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_openList,H_openList,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_Cx,H_cx,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_flagEnd,H_flagEnd,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_flagfound,H_flagfound,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemset(D_lock,0,sizeof(int)*N) ); int flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } int numThreads = 512; int numBlocks = (K+numThreads-1)/numThreads; int N_numBlocks = (N+numThreads-1)/numThreads; if(DEBUG) printf("[INFO] A* started\n"); hipEventRecord(start); //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_not_empty==1){ //extract min hipLaunchKernelGGL(( extractMin), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size, D_expandNodes,D_expandNodes_size,D_Cx,D_openList,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipLaunchKernelGGL(( A_star_expand), dim3(numBlocks),dim3(numThreads), 0, 0, D_offset,D_edges,D_weight,D_hx,D_parent,D_Cx, D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, false,D_diff_offset,D_diff_edges,D_diff_weight,0); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipLaunchKernelGGL(( keepHeapPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,D_Cx,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); //gen from flag D_nV //for N in parallel hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipLaunchKernelGGL(( insertPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,D_nV,D_nV_size,D_Cx,K,N,D_openList); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); //cpy flagend and flagEmpty gpuErrchk( hipMemcpy(H_flagfound,D_flagfound, sizeof(int),hipMemcpyDeviceToHost) ); gpuErrchk( hipMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,hipMemcpyDeviceToHost) ); //reset nVFlag gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); //reset next insert array gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //check for mins if( *H_flagfound==1 && flag_PQ_not_empty==1){ //end gpuErrchk( hipMemcpy(D_flagEnd,H_flagfound,sizeof(int),hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( checkMIN), dim3(numBlocks),dim3(numThreads) , 0, 0, D_PQ_size,D_flagEnd,D_Cx,endNode,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); gpuErrchk( hipMemcpy(H_flagEnd,D_flagEnd, sizeof(int),hipMemcpyDeviceToHost) ); } } hipLaunchKernelGGL(( getCx), dim3(1),dim3(1), 0, 0, D_Cx,endNode,D_dest_cost); gpuErrchk( hipMemcpy(H_dest_cost,D_dest_cost, sizeof(int),hipMemcpyDeviceToHost) ); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("RUN TIME: %f\n",milliseconds); gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) ); printf("[OUT] Cost: %d\n",*H_dest_cost); printf("[OUT] Path(in reverse): "); if(*H_dest_cost!=INT_MAX){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); p = H_parent[p]; } printf("%d\n",p); } else{ printf("not found\n"); } FILE* fdiff = fopen("Updates.txt","r"); int line; int update_count = 0; while(fscanf(fdiff,"%d\n",&line)!=EOF){ unordered_map<unsigned int,Node*> Graph; unordered_map<unsigned int,Node*> rev_Graph; vector<pair<int,int>>deleted_edges; int insertEdge=0, delEdge=0; for(int i=0;i<line;i++){ int flag; int u,v; unsigned int w; fscanf(fdiff,"%d %d %d %u\n",&flag,&u,&v,&w); if(flag==1){ insertDiff(Graph,u,v,w); insertDiff(rev_Graph,v,u,w); insertEdge++; } else if(flag==0){ deleted_edges.push_back(pair<int,int>(u,v)); } } // insertEdge is insertion size //for diff int* H_diff_edges = (int*)malloc(sizeof(int)*insertEdge); int* H_diff_offset = (int*)malloc(sizeof(int)*N); unsigned int* H_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge); //reset offset to 0 ..ie no nodes memset(H_diff_offset,0,sizeof(int)*N); if(1) printf("[INFO](%d) insertion:%d\n",update_count,insertEdge); createDiffGraph(N,Graph,H_diff_offset,H_diff_edges,H_diff_weight); //start computation for deletion for(int j=0;j<deleted_edges.size();j++){ int u,v; u= deleted_edges[j].first; v= deleted_edges[j].second; //if deleted adds to delEdge removeDelEdges(u,v,H_offset,H_edges,N,E,delEdge); } //merge graph int* H_offset_new,*H_edges_new; unsigned int* H_weight_new; int E_new = E + insertEdge - delEdge; H_offset_new = (int*)malloc(sizeof(int)*N); H_edges_new = (int*)malloc(sizeof(int)*E_new); H_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new); mergeDiff(H_offset,H_edges,H_weight,N,E, H_diff_offset,H_diff_edges,H_diff_weight,insertEdge,delEdge, H_offset_new,H_edges_new,H_weight_new); //free pointer free(H_offset); free(H_edges); free(H_weight); free(H_diff_offset); free(H_diff_edges); free(H_diff_weight); H_offset = H_offset_new; H_edges = H_edges_new; H_weight = H_weight_new; //hipFree and cpy hipFree(D_edges); hipFree(D_weight); gpuErrchk ( hipMalloc(&D_edges,sizeof(int)*E_new) ); gpuErrchk ( hipMalloc(&D_weight,sizeof(unsigned int)*E_new) ); gpuErrchk ( hipMemcpy(D_offset,H_offset,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_edges,H_edges,sizeof(int)*E_new,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_weight,H_weight,sizeof(unsigned int)*E_new,hipMemcpyHostToDevice) ); //change E E = E_new; //reset everything memset(H_parent,-1,sizeof(int)*N); memset(H_openList,-1,sizeof(int)*N); memset(H_PQ_size,0,sizeof(int)*K); H_cx[startNode]=H_hx[startNode]; H_PQ[0]=startNode; H_PQ_size[0]=1; H_openList[startNode]=0; *H_flagEnd = 0; *H_flagfound = 0; *H_a0 = 0; gpuErrchk ( hipMemcpy(D_Cx,H_cx,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_parent,H_parent,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_openList,H_openList,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_flagEnd,H_flagEnd,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_flagfound,H_flagfound,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } hipEventRecord(start); //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_not_empty==1){ //extract min hipLaunchKernelGGL(( extractMin), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size, D_expandNodes,D_expandNodes_size,D_Cx,D_openList,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipLaunchKernelGGL(( A_star_expand), dim3(numBlocks),dim3(numThreads), 0, 0, D_offset,D_edges,D_weight,D_hx,D_parent,D_Cx, D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, false,D_diff_offset,D_diff_edges,D_diff_weight,0); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipLaunchKernelGGL(( keepHeapPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,D_Cx,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); //gen from flag D_nV //for N in parallel hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipLaunchKernelGGL(( insertPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,D_nV,D_nV_size,D_Cx,K,N,D_openList); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); //cpy flagend and flagEmpty gpuErrchk( hipMemcpy(H_flagfound,D_flagfound, sizeof(int),hipMemcpyDeviceToHost) ); gpuErrchk( hipMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,hipMemcpyDeviceToHost) ); //reset nVFlag gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); //reset next insert array gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //check for mins if( *H_flagfound==1 && flag_PQ_not_empty==1){ //end gpuErrchk( hipMemcpy(D_flagEnd,H_flagfound,sizeof(int),hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( checkMIN), dim3(numBlocks),dim3(numThreads) , 0, 0, D_PQ_size,D_flagEnd,D_Cx,endNode,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); gpuErrchk( hipMemcpy(H_flagEnd,D_flagEnd, sizeof(int),hipMemcpyDeviceToHost) ); } } hipLaunchKernelGGL(( getCx), dim3(1),dim3(1), 0, 0, D_Cx,endNode,D_dest_cost); gpuErrchk( hipMemcpy(H_dest_cost,D_dest_cost, sizeof(int),hipMemcpyDeviceToHost) ); hipEventRecord(stop); hipEventSynchronize(stop); float mt = 0; hipEventElapsedTime(&mt, start, stop); printf("RUN TIME: %f\n",mt); milliseconds+=mt; gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) ); printf("[OUT] Cost: %d\n",*H_dest_cost); printf("[OUT] Path(in reverse): "); if(*H_dest_cost!=INT_MAX){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); p = H_parent[p]; } printf("%d\n",p); } else{ printf("not found\n"); } update_count++; } printf("[INFO] run time %d: %f\n",update_count,milliseconds); } void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c){ unordered_map<unsigned int,Node*>:: iterator itr; itr = Graph.find(a); if(itr!=Graph.end()){ Node* n = itr->second; unordered_map<unsigned int,Node*>:: iterator it; it = Graph.find(b); if(it!=Graph.end()){ Node* v = it->second; n->addEdge(v,c); } else{ Node* v = new Node(b); n->addEdge(v,c); Graph.insert(pair<unsigned int,Node*>(b,v)); } } else{ Node* n =new Node(a); Graph.insert(pair<unsigned int,Node*>(a,n)); unordered_map<unsigned int,Node*>:: iterator it; it = Graph.find(b); if(it!=Graph.end()){ Node* v = it->second; n->addEdge(v,c); } else{ Node* v = new Node(b); n->addEdge(v,c); Graph.insert(pair<unsigned int,Node*>(b,v)); } } } void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph, int* diffOff,int* diffEdges,unsigned int* diffWeight ){ int offindex = 0; diffOff[offindex] = 0; offindex++; int k =0; int weightCount = 0; for(int i=0;i<N;i++){ unordered_map<unsigned int,Node*>:: iterator itr; itr = Graph.find(i); if(itr!=Graph.end()){ Node* n = itr->second; for(int j=0;j<n->Edges.size();j++){ diffEdges[k] = n->Edges[j]->val; k++; } for(int j=0;j<n->weights.size();j++){ diffWeight[weightCount] = n->weights[j]; weightCount++; } if(offindex < N ){ diffOff[offindex] = k; offindex++; } } else{ if(offindex < N ){ diffOff[offindex] = k; offindex++; } } } } void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int& del_size){ int start = offset[u]; int end = E; bool flag_done = false; if(u!=N-1) end = offset[u+1]; while(start<end){ if( v == edges[start]){ edges[start]=-1; flag_done = true; break; } start++; } if(flag_done) del_size++; } void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int E, int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size, int* mOffset,int* mEdges,unsigned int* mWeight){ int E_new = E + insert_size - del_size; mOffset[0] = 0; int edegOffset= 0; for(int i=0;i<N;i++){ int start = offset[i]; int end = E; if(i!=N-1) end = offset[i+1]; //int count = 0; while(start<end){ int child = edges[start]; if(child!=-1){ mEdges[edegOffset] = child; mWeight[edegOffset] = weight[start]; edegOffset++; } start++; } start = diff_offset[i]; end = insert_size; if(i!=N-1) end = diff_offset[i+1]; while(start<end){ int child = diff_edges[start]; if(child!=-1){ mEdges[edegOffset] = child; mWeight[edegOffset]= diff_weight[start]; edegOffset++; } start++; } if(edegOffset > E_new){ printf("ERROR: size %d::%d\n",E_new,edegOffset); } if(i!=N-1) mOffset[i+1]=edegOffset; } }
f7993c99b357f9ba2248db577d18ce3354625872.cu
/* For DIRECTED GRAPH */ #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <iostream> #include <vector> #include <unordered_map> #include <string> #include <algorithm> #define MAX_NODE 100000000 #define DEBUG 0 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; class Node{ public: unsigned int val; vector<unsigned int> weights; vector<Node*> Edges; Node(int val){ this->val = val; } void addEdge(Node* v,unsigned int w){ this->Edges.push_back(v); this->weights.push_back(w); } }; void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c); void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph, int* diffOff,int* diffEdges,unsigned int* diffWeight ); void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int& del_size); void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int E, int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size, int* mOffset,int* mEdges,unsigned int* mWeight); // __device__ volatile int Cx[MAX_NODE]; __device__ volatile int PQ[MAX_NODE]; //K in parallel __global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* Cx,int* openList,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<K && PQ_size[id]>0){ //extract min from PQ int front = id* ( (N+K-1)/K ); int node = PQ[front]; // restructure the heap PQ[front]=PQ[front+PQ_size[id]-1]; PQ_size[id]-=1; int pqIndex = 0; while(2*pqIndex+1 < PQ_size[id]){ if(2*pqIndex+2 >= PQ_size[id]){ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else break; } else{ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){ int swap = PQ[front + 2*pqIndex+2]; PQ[front + 2*pqIndex+2] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+2; } else{ break; } } } //removed from openList openList[node] = -1; //added to expand next int len = atomicAdd(expandNodes_size,1); expandNodes[len]=node; } } //for K in parallel __global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent,volatile int* Cx, int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList, int N,int E, int K,int dest,int* nVFlag,int* PQ_size, int flagDiff,int* diff_off,int* diff_edge,unsigned int* diff_weight,int dE ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id< *expandNodes_size ){ int node = expandNodes[id]; //reach dest if(node == dest){ atomicOr(flagfound,1); } // expand int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start < end){ int child = edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(leaveLoop==false){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(openList[child]==-1){ nVFlag[child]=1; //add only once } } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } //diff expand if(flagDiff){ start = diff_off[node]; end = dE; if(node!=N-1) end = diff_off[node+1]; while(start<end){ int child = diff_edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) bool leaveLoop = false; while(!leaveLoop){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(openList[child]==-1){ nVFlag[child]=1; //add only once } } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } } //end diff }//end } //K in parallel -- O(N) __global__ void keepHeapPQ(int* PQ_size,int* Cx,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K && PQ_size[id] > 0){ int front = id*( (N+K-1)/K ); int size = PQ_size[id]; for(int i=front;i<front+size;i++){ if(2*i+2 < front+size){ int cost = Cx[PQ[i]]; int costLeft = Cx[PQ[2*i+1]]; int costRight = Cx[PQ[2*i+2]]; if( cost > costLeft || cost > costRight ){ int index ; if(costLeft <= costRight) index = 2*i+1; else index = 2*i+2; while(index > front){ if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ int swap = PQ[index]; PQ[index] = PQ[(index-1)/2]; PQ[(index-1)/2] = swap; index = (index-1)/2; } else break; } } } else if(2*i+1 < front+size){ if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){ int index = 2*i+1; while(index > front){ if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ int swap = PQ[index]; PQ[index] = PQ[(index-1)/2]; PQ[(index-1)/2] = swap; index = (index-1)/2; } else break; } } } } } } //N threads __global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < N){ if(nextFlag[id]==1){ int index = atomicAdd(nvSize,1); nextV[index]=id; } } } //for K in parallel __global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int* Cx,int K,int N,int* openList){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K){ int front = id*( (N+K-1)/K ); int i = id; while(i<*nVsize){ //if not already present if(openList[nextV[i]]!=-1){ i+=K; continue; } PQ[front+PQS[id]]= nextV[i]; PQS[id]+=1; //add in openList openList[nextV[i]] = id; if(PQS[id]>1){ int index = PQS[id]-1; while(index>0){ if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ int swap = PQ[front+index]; PQ[front+index]=PQ[front+ (index-1)/2]; PQ[front+ (index-1)/2] = swap; index = (index-1)/2; } else break; } } i += K; } } } //for K in parallel __global__ void checkMIN(int* PQ_size,int* flagEnd,int* Cx,int dest,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K && PQ_size[id] > 0 ){ int front = id* ( (N+K-1)/K ); int node = PQ[front]; //check if atleast one min, dont end the a* if( Cx[node] < Cx[dest] ){ atomicAnd(flagEnd,0); } } } __global__ void getCx(int* Cx,int dest,int* val){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id==0){ *val = Cx[dest]; } } int main(){ //the K PQ int K ; scanf("%d\n",&K); int startNode,endNode; scanf("%d %d",&startNode,&endNode); FILE* fgraph = fopen("graph.txt","r"); int N,E; fscanf(fgraph,"%d %d\n",&N,&E); int* H_offset = (int*)malloc(sizeof(int)*N); int* H_edges = (int*)malloc(sizeof(int)*E); unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E); int* H_hx = (int*)malloc(sizeof(int)*N); int* H_cx = (int*)malloc(sizeof(int)*N); int* H_parent = (int*)malloc(sizeof(int)*N); int* H_parent_old = (int*)malloc(sizeof(int)*N); int* H_PQ = (int*)malloc(sizeof(int)*N); int* H_openList = (int*)malloc(sizeof(int)*N); int* H_PQ_size = (int*)malloc(sizeof(int)*K); //for cost of endNode int* H_dest_cost = (int*)malloc(sizeof(int)); memset(H_PQ_size,0,sizeof(int)*K); memset(H_openList,-1,sizeof(int)*N); //init cx for(int i=0;i<N;i++){ H_cx[i]=INT_MAX; H_parent[i]=-1; } for(int i=0;i<E;i++){ fscanf(fgraph,"%d",&H_edges[i]); } for(int i=0;i<N;i++){ fscanf(fgraph,"%d",&H_offset[i]); } for(int i=0;i<E;i++){ fscanf(fgraph,"%u",&H_weight[i]); } FILE* fhx = fopen("Hx.txt","r"); for(int i=0;i<N;i++){ int temp; fscanf(fhx,"%d",&temp); if(temp!=-1) H_hx[i]= temp; else H_hx[i] = 0; //to change } fclose(fgraph); fclose(fhx); printf("[INFO] completed taking input\n"); //init Host var int* H_flagEnd = (int*)malloc(sizeof(int)); int* H_flagfound = (int*)malloc(sizeof(int)); int* H_a0 = (int*)malloc(sizeof(int)); int* H_nV_size = (int*)malloc(sizeof(int)); int* H_nV = (int*)malloc(sizeof(int)*N); //required coz if many tries to add same in diff threads high low lower int* H_nVFlag = (int*)malloc(sizeof(int)*N); memset(H_nVFlag,-1,sizeof(int)*N); *H_flagEnd = 0; *H_flagfound = 0; *H_a0 = 0; //insert startNode in PQ[0] H_cx[startNode]=H_hx[startNode]; H_PQ[0]=startNode; H_PQ_size[0]=1; H_openList[startNode]=0; //create events to record runtime cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //graph struture int* D_offset; int* D_edges ; unsigned int* D_weight; int* D_hx; int* D_parent; //for reading the ancessostor to avoid lock for write after read. int* D_parent_old; //Priority queue size int* D_PQ_size; //CX int* D_Cx; //flag if in openList(contains which PQ) int* D_openList; //lock for nodes int* D_lock; //Diff structure int* D_diff_edges; int* D_diff_offset; unsigned int* D_diff_weight; //next nodes flag int* D_nVFlag; //next nodes array to insert PQ int* D_nV; int* D_nV_size; //nodes to be expanded ( extracted from PQ ) int* D_expandNodes; int* D_expandNodes_size; //flag to end while loop and found the destination int* D_flagEnd; int* D_flagfound; //cost of endNode int* D_dest_cost; gpuErrchk ( cudaMalloc(&D_offset,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E) ); gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E) ); gpuErrchk ( cudaMalloc(&D_hx,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_parent,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_parent_old,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_Cx,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_PQ_size,sizeof(int)*K) ); gpuErrchk ( cudaMalloc(&D_openList,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_lock,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_dest_cost,sizeof(int)) ); //for next set of vertices to add in PQ gpuErrchk ( cudaMalloc(&D_nV,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_nV_size,sizeof(int)) ); gpuErrchk ( cudaMalloc(&D_nVFlag,sizeof(int)*N) ); //next nodes to expand gpuErrchk ( cudaMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K gpuErrchk ( cudaMalloc(&D_expandNodes_size,sizeof(int)) ); //flag to end search gpuErrchk( cudaMalloc(&D_flagEnd,sizeof(int)) ); gpuErrchk( cudaMalloc(&D_flagfound,sizeof(int)) ); gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_hx,H_hx,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_Cx,H_cx,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemset(D_lock,0,sizeof(int)*N) ); int flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } int numThreads = 512; int numBlocks = (K+numThreads-1)/numThreads; int N_numBlocks = (N+numThreads-1)/numThreads; if(DEBUG) printf("[INFO] A* started\n"); cudaEventRecord(start); //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_not_empty==1){ //extract min extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_Cx,D_openList,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent,D_Cx, D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, false,D_diff_offset,D_diff_edges,D_diff_weight,0); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_Cx,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); //gen from flag D_nV //for N in parallel setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,D_Cx,K,N,D_openList); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); //cpy flagend and flagEmpty gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) ); //reset nVFlag gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); //reset next insert array gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //check for mins if( *H_flagfound==1 && flag_PQ_not_empty==1){ //end gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) ); checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,D_Cx,endNode,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) ); } } getCx<<<1,1>>>(D_Cx,endNode,D_dest_cost); gpuErrchk( cudaMemcpy(H_dest_cost,D_dest_cost, sizeof(int),cudaMemcpyDeviceToHost) ); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("RUN TIME: %f\n",milliseconds); gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) ); printf("[OUT] Cost: %d\n",*H_dest_cost); printf("[OUT] Path(in reverse): "); if(*H_dest_cost!=INT_MAX){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); p = H_parent[p]; } printf("%d\n",p); } else{ printf("not found\n"); } FILE* fdiff = fopen("Updates.txt","r"); int line; int update_count = 0; while(fscanf(fdiff,"%d\n",&line)!=EOF){ unordered_map<unsigned int,Node*> Graph; unordered_map<unsigned int,Node*> rev_Graph; vector<pair<int,int>>deleted_edges; int insertEdge=0, delEdge=0; for(int i=0;i<line;i++){ int flag; int u,v; unsigned int w; fscanf(fdiff,"%d %d %d %u\n",&flag,&u,&v,&w); if(flag==1){ insertDiff(Graph,u,v,w); insertDiff(rev_Graph,v,u,w); insertEdge++; } else if(flag==0){ deleted_edges.push_back(pair<int,int>(u,v)); } } // insertEdge is insertion size //for diff int* H_diff_edges = (int*)malloc(sizeof(int)*insertEdge); int* H_diff_offset = (int*)malloc(sizeof(int)*N); unsigned int* H_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge); //reset offset to 0 ..ie no nodes memset(H_diff_offset,0,sizeof(int)*N); if(1) printf("[INFO](%d) insertion:%d\n",update_count,insertEdge); createDiffGraph(N,Graph,H_diff_offset,H_diff_edges,H_diff_weight); //start computation for deletion for(int j=0;j<deleted_edges.size();j++){ int u,v; u= deleted_edges[j].first; v= deleted_edges[j].second; //if deleted adds to delEdge removeDelEdges(u,v,H_offset,H_edges,N,E,delEdge); } //merge graph int* H_offset_new,*H_edges_new; unsigned int* H_weight_new; int E_new = E + insertEdge - delEdge; H_offset_new = (int*)malloc(sizeof(int)*N); H_edges_new = (int*)malloc(sizeof(int)*E_new); H_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new); mergeDiff(H_offset,H_edges,H_weight,N,E, H_diff_offset,H_diff_edges,H_diff_weight,insertEdge,delEdge, H_offset_new,H_edges_new,H_weight_new); //free pointer free(H_offset); free(H_edges); free(H_weight); free(H_diff_offset); free(H_diff_edges); free(H_diff_weight); H_offset = H_offset_new; H_edges = H_edges_new; H_weight = H_weight_new; //cudaFree and cpy cudaFree(D_edges); cudaFree(D_weight); gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E_new) ); gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E_new) ); gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E_new,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E_new,cudaMemcpyHostToDevice) ); //change E E = E_new; //reset everything memset(H_parent,-1,sizeof(int)*N); memset(H_openList,-1,sizeof(int)*N); memset(H_PQ_size,0,sizeof(int)*K); H_cx[startNode]=H_hx[startNode]; H_PQ[0]=startNode; H_PQ_size[0]=1; H_openList[startNode]=0; *H_flagEnd = 0; *H_flagfound = 0; *H_a0 = 0; gpuErrchk ( cudaMemcpy(D_Cx,H_cx,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } cudaEventRecord(start); //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_not_empty==1){ //extract min extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_Cx,D_openList,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent,D_Cx, D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, false,D_diff_offset,D_diff_edges,D_diff_weight,0); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_Cx,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); //gen from flag D_nV //for N in parallel setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,D_Cx,K,N,D_openList); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); //cpy flagend and flagEmpty gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) ); //reset nVFlag gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); //reset next insert array gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //check for mins if( *H_flagfound==1 && flag_PQ_not_empty==1){ //end gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) ); checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,D_Cx,endNode,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) ); } } getCx<<<1,1>>>(D_Cx,endNode,D_dest_cost); gpuErrchk( cudaMemcpy(H_dest_cost,D_dest_cost, sizeof(int),cudaMemcpyDeviceToHost) ); cudaEventRecord(stop); cudaEventSynchronize(stop); float mt = 0; cudaEventElapsedTime(&mt, start, stop); printf("RUN TIME: %f\n",mt); milliseconds+=mt; gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) ); printf("[OUT] Cost: %d\n",*H_dest_cost); printf("[OUT] Path(in reverse): "); if(*H_dest_cost!=INT_MAX){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); p = H_parent[p]; } printf("%d\n",p); } else{ printf("not found\n"); } update_count++; } printf("[INFO] run time %d: %f\n",update_count,milliseconds); } void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c){ unordered_map<unsigned int,Node*>:: iterator itr; itr = Graph.find(a); if(itr!=Graph.end()){ Node* n = itr->second; unordered_map<unsigned int,Node*>:: iterator it; it = Graph.find(b); if(it!=Graph.end()){ Node* v = it->second; n->addEdge(v,c); } else{ Node* v = new Node(b); n->addEdge(v,c); Graph.insert(pair<unsigned int,Node*>(b,v)); } } else{ Node* n =new Node(a); Graph.insert(pair<unsigned int,Node*>(a,n)); unordered_map<unsigned int,Node*>:: iterator it; it = Graph.find(b); if(it!=Graph.end()){ Node* v = it->second; n->addEdge(v,c); } else{ Node* v = new Node(b); n->addEdge(v,c); Graph.insert(pair<unsigned int,Node*>(b,v)); } } } void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph, int* diffOff,int* diffEdges,unsigned int* diffWeight ){ int offindex = 0; diffOff[offindex] = 0; offindex++; int k =0; int weightCount = 0; for(int i=0;i<N;i++){ unordered_map<unsigned int,Node*>:: iterator itr; itr = Graph.find(i); if(itr!=Graph.end()){ Node* n = itr->second; for(int j=0;j<n->Edges.size();j++){ diffEdges[k] = n->Edges[j]->val; k++; } for(int j=0;j<n->weights.size();j++){ diffWeight[weightCount] = n->weights[j]; weightCount++; } if(offindex < N ){ diffOff[offindex] = k; offindex++; } } else{ if(offindex < N ){ diffOff[offindex] = k; offindex++; } } } } void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int& del_size){ int start = offset[u]; int end = E; bool flag_done = false; if(u!=N-1) end = offset[u+1]; while(start<end){ if( v == edges[start]){ edges[start]=-1; flag_done = true; break; } start++; } if(flag_done) del_size++; } void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int E, int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size, int* mOffset,int* mEdges,unsigned int* mWeight){ int E_new = E + insert_size - del_size; mOffset[0] = 0; int edegOffset= 0; for(int i=0;i<N;i++){ int start = offset[i]; int end = E; if(i!=N-1) end = offset[i+1]; //int count = 0; while(start<end){ int child = edges[start]; if(child!=-1){ mEdges[edegOffset] = child; mWeight[edegOffset] = weight[start]; edegOffset++; } start++; } start = diff_offset[i]; end = insert_size; if(i!=N-1) end = diff_offset[i+1]; while(start<end){ int child = diff_edges[start]; if(child!=-1){ mEdges[edegOffset] = child; mWeight[edegOffset]= diff_weight[start]; edegOffset++; } start++; } if(edegOffset > E_new){ printf("ERROR: size %d::%d\n",E_new,edegOffset); } if(i!=N-1) mOffset[i+1]=edegOffset; } }
46e07c3dc839d23df23ff4c3a2b800380a16cbe4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" __global__ void dropoutTrain( const float* arguments, float* dropoutMask, float* results, const float dropoutFraction, const long size ) { const int X = gridDim.x; const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x; if(index < size) { const float mask = dropoutFraction < dropoutMask[index]; dropoutMask[index] = mask; results[index] = mask * arguments[index]; } }
46e07c3dc839d23df23ff4c3a2b800380a16cbe4.cu
#include "includes.h" extern "C" extern "C" __global__ void dropoutTrain( const float* arguments, float* dropoutMask, float* results, const float dropoutFraction, const long size ) { const int X = gridDim.x; const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x; if(index < size) { const float mask = dropoutFraction < dropoutMask[index]; dropoutMask[index] = mask; results[index] = mask * arguments[index]; } }
a61dda786d65ff4c172f90062037a8bd14c7c55d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void g_One_backpropagation( float* _curDelta, float* _w, float* _nextDelta, int rows, int cols, int channels) { int row = blockIdx.x; int channel = blockIdx.y; int skip = channel * rows * cols + row * cols; float* curDelta = _curDelta + skip; float* nextDelta= _nextDelta+ skip; float* w = _w + channel * cols; for(int i = 0; i < cols; i += blockDim.x){ int id = i + threadIdx.x; if(id < cols){ nextDelta[id] = curDelta[id] * w[id]; } } }
a61dda786d65ff4c172f90062037a8bd14c7c55d.cu
#include "includes.h" __global__ void g_One_backpropagation( float* _curDelta, float* _w, float* _nextDelta, int rows, int cols, int channels) { int row = blockIdx.x; int channel = blockIdx.y; int skip = channel * rows * cols + row * cols; float* curDelta = _curDelta + skip; float* nextDelta= _nextDelta+ skip; float* w = _w + channel * cols; for(int i = 0; i < cols; i += blockDim.x){ int id = i + threadIdx.x; if(id < cols){ nextDelta[id] = curDelta[id] * w[id]; } } }
02b4b3539838ea0d1c7938dd293baa229a790d4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from ztrtri_upper.cu normal z -> d, Wed Sep 17 15:08:23 2014 @author Peng Du @author Tingxing Dong @author Mark Gates This file implements upper case, and is called by dtrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "common_magma.h" #include "dtrtri.h" /* This inverts the diagonal IB by IB inner blocks of A, and stores the results in d_dinvA. Each thread block with IB threads does one inner block. Each thread deals with one row of the inner block. */ __global__ void dtrtri_diag_kernel_upper( magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA) { int tx = threadIdx.x; int bx = blockIdx.x; int blk_ind = bx*IB; //int ind = blk_ind + tx; A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind) // TODO sB should be [IB][IB+1] to avoid bank conflicts, right? __shared__ double sB[IB*IB]; double y_tx; // load upper triangle of inner block of A; zero lower triangle & outside matrix #pragma unroll for( int j=0; j < IB; j++ ) { if (tx <= j && blk_ind + j < n) { sB[tx + j*IB] = A[tx + j*lda]; } else { sB[tx + j*IB] = MAGMA_D_ZERO; } } __syncthreads(); // invert the diagonal if (diag == MagmaUnit) { sB[tx + tx*IB] = MAGMA_D_ONE; } else { if ( sB[tx + tx*IB] == MAGMA_D_ZERO ) { // singular or outside matrix sB[tx + tx*IB] = MAGMA_D_ONE; } else { sB[tx + tx*IB] = MAGMA_D_ONE / sB[tx + tx*IB]; } } // compute elements 0:j-1 of j-th column. for( int j=1; j < IB; j++ ) { if ( tx < j ) { // trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j) // each thread sums one element, y[tx] y_tx = MAGMA_D_ZERO; #pragma unroll for( int k=0; k < j; k++ ) y_tx += sB[tx + k*IB] * sB[k + j*IB]; // scal: sB(0:j-1, j) = -sB(j,j) * y sB[tx + j*IB] = -sB[j + j*IB] * y_tx; } __syncthreads(); } // go to the (bx / ib_per_NB) outer NB*NB block, // then the (bx % ib_per_NB) inner IB*IB block inside that. int ib_per_NB = NB/IB; d_dinvA += (bx / ib_per_NB)*NB*NB + (bx % ib_per_NB)*(NB*IB + IB); // write result #pragma unroll for( int j=0; j < IB; j++ ) { d_dinvA[tx + j*NB] = sB[tx + j*IB]; } } /* Let A be an NB*NB upper triangular matrix, and B its inverse. Then the block decomposition [ A11 A12 ] * [ B11 B12 ] = [ I 0 ] [ 0 A22 ] [ 0 B22 ] [ 0 I ] yields A11*B11 = I ==> B11 = A11^{-1}, A22*B22 = I ==> B22 = A22^{-1}, A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22. dtrtri_diag_kernel inverts A11 and A22. triple_dgemm16 routines multiply: part 1: B12 = A12 * B22, part 2: B12 = -B11 * B12. At this level, inner block is jb=16, with one 4x4 thread block per inner block. Each submatrix Aij and Bij is jb x jb. The submatrix dimension is multiplied by 2 at each level, so the next level is jb*2 = 32. A "page" is the next bigger block, here jb*2=32, [ B11 B12 ] which contains [ 0 B22 ]. Outer blocks are NB x NB. A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on the bottom. Unfortunately, this means checking every single reference. We could easily verify that A12 is full, and select between a fast version without checks and a slow version with checks. B is stored in workspace that is a full multiple of NB x NB; no checks needed. We split this into part1 & part2 to synchronize all blocks and make sure that writes to B12 are observed by all blocks. */ /* * B12 = A12 * B22 */ __global__ void triple_dgemm16_part1_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const double *A, *B; double *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_dgemm16_part2_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const double *A, *B; double *C; int lda = NB; // shadows lda argument int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_dgemm32_part1_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const double *A, *B; double *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_dgemm32_part2_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; //int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const double *A, *B; double *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_dgemm64_part1_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const double *A, *B; double *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_dgemm64_part2_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const double *A, *B; double *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_dgemm_above64_part1_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const double *A, *B; double *C; int ldb = NB; int ldc = NB; // For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1. // Each section needs all of the B matrix, so C cannot overwrite B. // Therefore, store B21 temporarily in the previously unused B12 matrix // (i.e., above diagonal), then in part 3, zero out B12. // // Kernels with jb <= 64 don't have this problem, because only the // NT x 16 section of C that overwrites the same section of B depends // on that section of B. // // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B12; write to B21 temp location A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_dgemm_above64_part2_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const double *A, *B; double *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 B = d_dinvA + jb; // B12, read from B21 temp location C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * zero out B21 temp location */ __global__ void triple_dgemm_above64_part3_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part three---------------------------// { // zero out B21 temp location double *B21; int ldb = NB; B21 = d_dinvA + jb; B21 += ibx + id + iby*ldb; #pragma unroll for( int i = 0; i < 16; i++ ) { B21[i*ldb] = MAGMA_D_ZERO; } } }
02b4b3539838ea0d1c7938dd293baa229a790d4f.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from ztrtri_upper.cu normal z -> d, Wed Sep 17 15:08:23 2014 @author Peng Du @author Tingxing Dong @author Mark Gates This file implements upper case, and is called by dtrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "common_magma.h" #include "dtrtri.h" /* This inverts the diagonal IB by IB inner blocks of A, and stores the results in d_dinvA. Each thread block with IB threads does one inner block. Each thread deals with one row of the inner block. */ __global__ void dtrtri_diag_kernel_upper( magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA) { int tx = threadIdx.x; int bx = blockIdx.x; int blk_ind = bx*IB; //int ind = blk_ind + tx; A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind) // TODO sB should be [IB][IB+1] to avoid bank conflicts, right? __shared__ double sB[IB*IB]; double y_tx; // load upper triangle of inner block of A; zero lower triangle & outside matrix #pragma unroll for( int j=0; j < IB; j++ ) { if (tx <= j && blk_ind + j < n) { sB[tx + j*IB] = A[tx + j*lda]; } else { sB[tx + j*IB] = MAGMA_D_ZERO; } } __syncthreads(); // invert the diagonal if (diag == MagmaUnit) { sB[tx + tx*IB] = MAGMA_D_ONE; } else { if ( sB[tx + tx*IB] == MAGMA_D_ZERO ) { // singular or outside matrix sB[tx + tx*IB] = MAGMA_D_ONE; } else { sB[tx + tx*IB] = MAGMA_D_ONE / sB[tx + tx*IB]; } } // compute elements 0:j-1 of j-th column. for( int j=1; j < IB; j++ ) { if ( tx < j ) { // trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j) // each thread sums one element, y[tx] y_tx = MAGMA_D_ZERO; #pragma unroll for( int k=0; k < j; k++ ) y_tx += sB[tx + k*IB] * sB[k + j*IB]; // scal: sB(0:j-1, j) = -sB(j,j) * y sB[tx + j*IB] = -sB[j + j*IB] * y_tx; } __syncthreads(); } // go to the (bx / ib_per_NB) outer NB*NB block, // then the (bx % ib_per_NB) inner IB*IB block inside that. int ib_per_NB = NB/IB; d_dinvA += (bx / ib_per_NB)*NB*NB + (bx % ib_per_NB)*(NB*IB + IB); // write result #pragma unroll for( int j=0; j < IB; j++ ) { d_dinvA[tx + j*NB] = sB[tx + j*IB]; } } /* Let A be an NB*NB upper triangular matrix, and B its inverse. Then the block decomposition [ A11 A12 ] * [ B11 B12 ] = [ I 0 ] [ 0 A22 ] [ 0 B22 ] [ 0 I ] yields A11*B11 = I ==> B11 = A11^{-1}, A22*B22 = I ==> B22 = A22^{-1}, A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22. dtrtri_diag_kernel inverts A11 and A22. triple_dgemm16 routines multiply: part 1: B12 = A12 * B22, part 2: B12 = -B11 * B12. At this level, inner block is jb=16, with one 4x4 thread block per inner block. Each submatrix Aij and Bij is jb x jb. The submatrix dimension is multiplied by 2 at each level, so the next level is jb*2 = 32. A "page" is the next bigger block, here jb*2=32, [ B11 B12 ] which contains [ 0 B22 ]. Outer blocks are NB x NB. A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on the bottom. Unfortunately, this means checking every single reference. We could easily verify that A12 is full, and select between a fast version without checks and a slow version with checks. B is stored in workspace that is a full multiple of NB x NB; no checks needed. We split this into part1 & part2 to synchronize all blocks and make sure that writes to B12 are observed by all blocks. */ /* * B12 = A12 * B22 */ __global__ void triple_dgemm16_part1_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const double *A, *B; double *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_dgemm16_part2_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const double *A, *B; double *C; int lda = NB; // shadows lda argument int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_dgemm32_part1_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const double *A, *B; double *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_dgemm32_part2_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; //int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const double *A, *B; double *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_dgemm64_part1_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const double *A, *B; double *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_dgemm64_part2_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const double *A, *B; double *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_dgemm_above64_part1_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const double *A, *B; double *C; int ldb = NB; int ldc = NB; // For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1. // Each section needs all of the B matrix, so C cannot overwrite B. // Therefore, store B21 temporarily in the previously unused B12 matrix // (i.e., above diagonal), then in part 3, zero out B12. // // Kernels with jb <= 64 don't have this problem, because only the // NT x 16 section of C that overwrites the same section of B depends // on that section of B. // // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B12; write to B21 temp location A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_dgemm_above64_part2_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ double sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const double *A, *B; double *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 B = d_dinvA + jb; // B12, read from B21 temp location C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const double *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; double rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * zero out B21 temp location */ __global__ void triple_dgemm_above64_part3_upper( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part three---------------------------// { // zero out B21 temp location double *B21; int ldb = NB; B21 = d_dinvA + jb; B21 += ibx + id + iby*ldb; #pragma unroll for( int i = 0; i < 16; i++ ) { B21[i*ldb] = MAGMA_D_ZERO; } } }
827aba018dc8acec9aa68656c3e46389f1a0653f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include "ising_cuda.h" #define N 700 #define K 200 #define SPINS_PER_THREAD_DIM 3 // max = 3 | if SPINS_PER_THREAD_DIM > 3 the 48kB of Shared memory wil not be enough and you will get a compilation error. #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } __global__ void checkForNoChanges(int *G, int *H, int *checkForNoChanges, int n){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if( (i < n) && (j < n)){ if( G[i*n + j] != H[i*n + j] ){ checkForNoChanges[0] = 1; } } } __global__ void ising(int *G, int *H, double *w, int n){ /* Block has 32 x 32 Threads. Every Thread deals with SPINS_PER_THREAD_DIM x SPINS_PER_THREAD_DIM Spins. So the whole Block deals with a Lattice of (32 x SPINS_PER_THREAD_DIM) x (32 x SPINS_PER_THREAD_DIM) Spins. In order to make the computations for the Spins that are in the boundaries of this Lattice we need some extra Spins. So finally we need to load on shared memory (32 x SPINS_PER_THREAD_DIM + 4) x (32 x SPINS_PER_THREAD_DIM + 4) Spins. */ __shared__ int sharedSpins[(32*SPINS_PER_THREAD_DIM + 4)*(32*SPINS_PER_THREAD_DIM + 4)]; int blockSpinsDim = 32*SPINS_PER_THREAD_DIM; int sharedSpinsDim = blockSpinsDim + 4; // Load data to Shared memory for(int k = threadIdx.x; k < sharedSpinsDim ; k = k + 32){ for(int l = threadIdx.y; l < sharedSpinsDim ; l = l + 32){ if( ((k + blockSpinsDim*blockIdx.x) < (n + 4)) && ((l + blockSpinsDim*blockIdx.y) < (n + 4))) sharedSpins[k*(sharedSpinsDim) + l] = G[((k + blockSpinsDim*blockIdx.x - 2 + n)%n)*n + ((l + blockSpinsDim*blockIdx.y - 2 + n)%n)]; } } __syncthreads(); // Compute new spins using data from shared memory int i_start = threadIdx.x*SPINS_PER_THREAD_DIM; int j_start = threadIdx.y*SPINS_PER_THREAD_DIM; if(blockIdx.x*blockSpinsDim + i_start < n && blockIdx.y*blockSpinsDim + j_start < n){ int i_end = i_start + SPINS_PER_THREAD_DIM; int j_end = j_start + SPINS_PER_THREAD_DIM; if(blockIdx.x*blockSpinsDim + i_end > n) if(blockIdx.x != 0) i_end = n%(blockIdx.x*blockSpinsDim); else i_end = n; if(blockIdx.y*blockSpinsDim + j_end > n) if(blockIdx.y != 0) j_end = n%(blockIdx.y*blockSpinsDim); else j_end = n; double influence = 0.0; for(int k = i_start; k < i_end; k++){ for(int l = j_start; l < j_end; l++){ for(int x = 0; x < 5; x++){ for(int y = 0; y < 5; y++){ influence += w[x*5 + y]*sharedSpins[((k + 2 - 2 + x + sharedSpinsDim)%sharedSpinsDim)*sharedSpinsDim + ((l + 2 - 2 + y + sharedSpinsDim)%sharedSpinsDim)]; } } H[(blockIdx.x*blockSpinsDim + k)*n + (blockIdx.y*blockSpinsDim + l)] = sharedSpins[(k + 2)*sharedSpinsDim + (l + 2)]; if(influence > 0.000000001){ H[(blockIdx.x*blockSpinsDim + k)*n + (blockIdx.y*blockSpinsDim + l)] = 1; } else if(influence < -0.000000001) { H[(blockIdx.x*blockSpinsDim + k)*n + (blockIdx.y*blockSpinsDim + l)] = -1; } influence = 0.0; } } } } int main(int argc, char** argv){ // Declare all variables int n = 0; int k = 0; if (argc != 3) { n = N; k = K; } else { n = atoi(argv[1]); k = atoi(argv[2]); printf("Input n=%d k=%d", n, k); } // Check if SPINS_PER_THREAD_DIM is less than 3 if( SPINS_PER_THREAD_DIM > 3){ printf("ERROR: SPINS_PER_THREAD_DIM must be less than 3. Aborting..."); return -1; } int *G, *G_final, *G_dev, *H_dev; double *w_dev; double w[25] = {0.004 , 0.016 , 0.026 , 0.016 , 0.004 , 0.016 , 0.071 , 0.117 , 0.071 , 0.016 , 0.026 , 0.117 , 0 , 0.117 , 0.026 , 0.016 , 0.071 , 0.117 , 0.071 , 0.016 , 0.004 , 0.016 , 0.026 , 0.016 , 0.004}; int *checkForNoChanges_SomeSpins; int *checkForNoChanges_AllSpins; int *checkForNoChanges_SomeSpins_dev; int *checkForNoChanges_AllSpins_dev; int iterations = k; // Allocate host memory G = (int*)malloc(n*n*sizeof(int)); G_final = (int*)malloc(n*n*sizeof(int)); if(G == NULL || G_final == NULL){ printf("ERROR: Cannot allocate host memory. Aborting..."); return 1; } checkForNoChanges_SomeSpins = (int*)malloc(sizeof(int)); checkForNoChanges_AllSpins = (int*)malloc(sizeof(int)); // Allocate device memory HANDLE_ERROR( hipMalloc((void**) &G_dev, n*n*sizeof(int)) ); HANDLE_ERROR( hipMalloc((void**) &H_dev, n*n*sizeof(int)) ); HANDLE_ERROR( hipMalloc((void**) &w_dev, 25*sizeof(double))); HANDLE_ERROR( hipMalloc((void**) &checkForNoChanges_SomeSpins_dev, sizeof(int) )); HANDLE_ERROR( hipMalloc((void**) &checkForNoChanges_AllSpins_dev, sizeof(int) )); // Write to host memory /* Assign random values to G) */ int spin[] = {-1, 1}; for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) G[i*n + j] = spin[rand()%2]; /* Assign values to checking variables */ checkForNoChanges_SomeSpins[0] = 0; checkForNoChanges_AllSpins[0] = 0; // Copy host memory to device memory HANDLE_ERROR( hipMemcpy(G_dev, G, n*n*sizeof(int), hipMemcpyHostToDevice) ); HANDLE_ERROR( hipMemcpy(w_dev, w, 25*sizeof(double), hipMemcpyHostToDevice)); HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), hipMemcpyHostToDevice)); printf("\nComputing...\n"); // capture start time hipEvent_t start, stop; HANDLE_ERROR( hipEventCreate( &start ) ); HANDLE_ERROR( hipEventCreate( &stop ) ); HANDLE_ERROR( hipEventRecord( start, 0 ) ); // Set kernel dimesions dim3 ising_Grid((int) ceil((double)N/(double)(32*SPINS_PER_THREAD_DIM)), (int) ceil((double)N/(double)(32*SPINS_PER_THREAD_DIM))); dim3 ising_Block(32, 32); dim3 checkForNoChanges_SomeSpins_Grid(2, 2); dim3 checkForNoChanges_Block(32, 32); dim3 checkForNoChanges_AllSpins_Grid((int) ceil(((double)N)/32.0), (int) ceil(((double)N)/32.0)); // Execute kernel on the device for(int q = 0; q < k; q++){ if( q%2 == 0){ hipLaunchKernelGGL(( ising), dim3(ising_Grid), dim3(ising_Block) , 0, 0, G_dev, H_dev, w_dev, n); // Check if no changes are made hipLaunchKernelGGL(( checkForNoChanges), dim3(checkForNoChanges_SomeSpins_Grid), dim3(checkForNoChanges_Block), 0, 0, G_dev, H_dev, checkForNoChanges_SomeSpins_dev, n); HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins, checkForNoChanges_SomeSpins_dev, sizeof(int), hipMemcpyDeviceToHost) ); if(checkForNoChanges_SomeSpins[0] == 0){ hipLaunchKernelGGL(( checkForNoChanges), dim3(checkForNoChanges_AllSpins_Grid), dim3(checkForNoChanges_Block), 0, 0, G_dev, H_dev, checkForNoChanges_AllSpins_dev, n); HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins, checkForNoChanges_AllSpins_dev, sizeof(int), hipMemcpyDeviceToHost) ); if( checkForNoChanges_AllSpins[0] == 0){ printf("\nNo changes: %d iterations\n", q); iterations = q; break; } else{ checkForNoChanges_AllSpins[0] = 0; HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), hipMemcpyHostToDevice)); } } else{ checkForNoChanges_SomeSpins[0] = 0; HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), hipMemcpyHostToDevice)); } } else{ hipLaunchKernelGGL(( ising), dim3(ising_Grid), dim3(ising_Block) , 0, 0, H_dev, G_dev, w_dev, n); // Check if no changes are made hipLaunchKernelGGL(( checkForNoChanges), dim3(checkForNoChanges_SomeSpins_Grid), dim3(checkForNoChanges_Block), 0, 0, G_dev, H_dev, checkForNoChanges_SomeSpins_dev, n); HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins, checkForNoChanges_SomeSpins_dev, sizeof(int), hipMemcpyDeviceToHost) ); if(checkForNoChanges_SomeSpins[0] == 0){ hipLaunchKernelGGL(( checkForNoChanges), dim3(checkForNoChanges_AllSpins_Grid), dim3(checkForNoChanges_Block), 0, 0, G_dev, H_dev, checkForNoChanges_AllSpins_dev, n); HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins, checkForNoChanges_AllSpins_dev, sizeof(int), hipMemcpyDeviceToHost) ); if( checkForNoChanges_AllSpins[0] == 0){ printf("\nNo changes: %d iterations\n", q); iterations = q; break; } else{ checkForNoChanges_AllSpins[0] = 0; HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), hipMemcpyHostToDevice)); } } else{ checkForNoChanges_SomeSpins[0] = 0; HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), hipMemcpyHostToDevice)); } } } // Write GPU results to host memory if( k%2 == 1) HANDLE_ERROR( hipMemcpy(G_final, H_dev, n*n*sizeof(int), hipMemcpyDeviceToHost) ); else HANDLE_ERROR( hipMemcpy(G_final, G_dev, n*n*sizeof(int), hipMemcpyDeviceToHost) ); // Capture end time HANDLE_ERROR( hipEventRecord( stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) ); printf( "\nTime used for parallel call: %3.3f sec\n", elapsedTime*0.001 ); // Free device memory HANDLE_ERROR( hipFree(G_dev) ); HANDLE_ERROR( hipFree(H_dev) ); HANDLE_ERROR( hipFree(checkForNoChanges_SomeSpins_dev)); HANDLE_ERROR( hipFree(checkForNoChanges_AllSpins_dev)); HANDLE_ERROR( hipEventDestroy( start ) ); HANDLE_ERROR( hipEventDestroy( stop ) ); // Validate results validate(G, G_final, w, iterations, n); // Free host memory free(G); free(G_final); free(checkForNoChanges_SomeSpins); free(checkForNoChanges_AllSpins); return 0; } void validate(int *G, int *G_final, double *w, int k, int n){ printf("\nValidating...\n"); int counter = 0; clock_t start, end; double time_used; start = clock(); // Run sequential code ising_sequential(G, w, k, n); end = clock(); time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("\nTime used for sequential call: %3.3f sec\n",time_used); // Validate for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ if(G[i*n + j] != G_final[i*n + j]){ printf("\nWRONG"); printf("\n%d %d",i, j); printf("\n%d %d\n",G[i*n + j],G_final[i*n + j]); counter++; } } } if(counter == 0) printf("\nValidation: CORRECT\n"); else { printf("\nValidation: Wrong\n"); printf("\n%d wrong values\n",counter); } } void ising_sequential( int *G, double *w, int k, int n){ int *H, *temp; double influence = 0.0; H = (int*)malloc(n*n*sizeof(int)); if(H == NULL){ printf("ERROR: Cannot allocate memory for H. Aborting..."); } for(int q = 0; q < k; q++){ for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ for(int x = 0; x < 5; x++){ for(int y = 0; y < 5; y++){ influence += w[x*5 + y]*G[((i - 2 + x + n)%n)*n + ((j - 2 + y + n)%n)]; } } H[i*n + j] = G[i*n + j]; if(influence > 0.000000001){ H[i*n + j] = 1; } else if(influence < -0.000000001) { H[i*n + j] = -1; } influence = 0.0; } } temp = G; G = H; H = temp; } if(k%2 == 1){ temp = G; G = H; H = temp; for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) G[i*n + j] = H[i*n + j]; } }
827aba018dc8acec9aa68656c3e46389f1a0653f.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "ising_cuda.h" #define N 700 #define K 200 #define SPINS_PER_THREAD_DIM 3 // max = 3 | if SPINS_PER_THREAD_DIM > 3 the 48kB of Shared memory wil not be enough and you will get a compilation error. #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } __global__ void checkForNoChanges(int *G, int *H, int *checkForNoChanges, int n){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if( (i < n) && (j < n)){ if( G[i*n + j] != H[i*n + j] ){ checkForNoChanges[0] = 1; } } } __global__ void ising(int *G, int *H, double *w, int n){ /* Block has 32 x 32 Threads. Every Thread deals with SPINS_PER_THREAD_DIM x SPINS_PER_THREAD_DIM Spins. So the whole Block deals with a Lattice of (32 x SPINS_PER_THREAD_DIM) x (32 x SPINS_PER_THREAD_DIM) Spins. In order to make the computations for the Spins that are in the boundaries of this Lattice we need some extra Spins. So finally we need to load on shared memory (32 x SPINS_PER_THREAD_DIM + 4) x (32 x SPINS_PER_THREAD_DIM + 4) Spins. */ __shared__ int sharedSpins[(32*SPINS_PER_THREAD_DIM + 4)*(32*SPINS_PER_THREAD_DIM + 4)]; int blockSpinsDim = 32*SPINS_PER_THREAD_DIM; int sharedSpinsDim = blockSpinsDim + 4; // Load data to Shared memory for(int k = threadIdx.x; k < sharedSpinsDim ; k = k + 32){ for(int l = threadIdx.y; l < sharedSpinsDim ; l = l + 32){ if( ((k + blockSpinsDim*blockIdx.x) < (n + 4)) && ((l + blockSpinsDim*blockIdx.y) < (n + 4))) sharedSpins[k*(sharedSpinsDim) + l] = G[((k + blockSpinsDim*blockIdx.x - 2 + n)%n)*n + ((l + blockSpinsDim*blockIdx.y - 2 + n)%n)]; } } __syncthreads(); // Compute new spins using data from shared memory int i_start = threadIdx.x*SPINS_PER_THREAD_DIM; int j_start = threadIdx.y*SPINS_PER_THREAD_DIM; if(blockIdx.x*blockSpinsDim + i_start < n && blockIdx.y*blockSpinsDim + j_start < n){ int i_end = i_start + SPINS_PER_THREAD_DIM; int j_end = j_start + SPINS_PER_THREAD_DIM; if(blockIdx.x*blockSpinsDim + i_end > n) if(blockIdx.x != 0) i_end = n%(blockIdx.x*blockSpinsDim); else i_end = n; if(blockIdx.y*blockSpinsDim + j_end > n) if(blockIdx.y != 0) j_end = n%(blockIdx.y*blockSpinsDim); else j_end = n; double influence = 0.0; for(int k = i_start; k < i_end; k++){ for(int l = j_start; l < j_end; l++){ for(int x = 0; x < 5; x++){ for(int y = 0; y < 5; y++){ influence += w[x*5 + y]*sharedSpins[((k + 2 - 2 + x + sharedSpinsDim)%sharedSpinsDim)*sharedSpinsDim + ((l + 2 - 2 + y + sharedSpinsDim)%sharedSpinsDim)]; } } H[(blockIdx.x*blockSpinsDim + k)*n + (blockIdx.y*blockSpinsDim + l)] = sharedSpins[(k + 2)*sharedSpinsDim + (l + 2)]; if(influence > 0.000000001){ H[(blockIdx.x*blockSpinsDim + k)*n + (blockIdx.y*blockSpinsDim + l)] = 1; } else if(influence < -0.000000001) { H[(blockIdx.x*blockSpinsDim + k)*n + (blockIdx.y*blockSpinsDim + l)] = -1; } influence = 0.0; } } } } int main(int argc, char** argv){ // Declare all variables int n = 0; int k = 0; if (argc != 3) { n = N; k = K; } else { n = atoi(argv[1]); k = atoi(argv[2]); printf("Input n=%d k=%d", n, k); } // Check if SPINS_PER_THREAD_DIM is less than 3 if( SPINS_PER_THREAD_DIM > 3){ printf("ERROR: SPINS_PER_THREAD_DIM must be less than 3. Aborting..."); return -1; } int *G, *G_final, *G_dev, *H_dev; double *w_dev; double w[25] = {0.004 , 0.016 , 0.026 , 0.016 , 0.004 , 0.016 , 0.071 , 0.117 , 0.071 , 0.016 , 0.026 , 0.117 , 0 , 0.117 , 0.026 , 0.016 , 0.071 , 0.117 , 0.071 , 0.016 , 0.004 , 0.016 , 0.026 , 0.016 , 0.004}; int *checkForNoChanges_SomeSpins; int *checkForNoChanges_AllSpins; int *checkForNoChanges_SomeSpins_dev; int *checkForNoChanges_AllSpins_dev; int iterations = k; // Allocate host memory G = (int*)malloc(n*n*sizeof(int)); G_final = (int*)malloc(n*n*sizeof(int)); if(G == NULL || G_final == NULL){ printf("ERROR: Cannot allocate host memory. Aborting..."); return 1; } checkForNoChanges_SomeSpins = (int*)malloc(sizeof(int)); checkForNoChanges_AllSpins = (int*)malloc(sizeof(int)); // Allocate device memory HANDLE_ERROR( cudaMalloc((void**) &G_dev, n*n*sizeof(int)) ); HANDLE_ERROR( cudaMalloc((void**) &H_dev, n*n*sizeof(int)) ); HANDLE_ERROR( cudaMalloc((void**) &w_dev, 25*sizeof(double))); HANDLE_ERROR( cudaMalloc((void**) &checkForNoChanges_SomeSpins_dev, sizeof(int) )); HANDLE_ERROR( cudaMalloc((void**) &checkForNoChanges_AllSpins_dev, sizeof(int) )); // Write to host memory /* Assign random values to G) */ int spin[] = {-1, 1}; for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) G[i*n + j] = spin[rand()%2]; /* Assign values to checking variables */ checkForNoChanges_SomeSpins[0] = 0; checkForNoChanges_AllSpins[0] = 0; // Copy host memory to device memory HANDLE_ERROR( cudaMemcpy(G_dev, G, n*n*sizeof(int), cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpy(w_dev, w, 25*sizeof(double), cudaMemcpyHostToDevice)); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), cudaMemcpyHostToDevice)); printf("\nComputing...\n"); // capture start time cudaEvent_t start, stop; HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); HANDLE_ERROR( cudaEventRecord( start, 0 ) ); // Set kernel dimesions dim3 ising_Grid((int) ceil((double)N/(double)(32*SPINS_PER_THREAD_DIM)), (int) ceil((double)N/(double)(32*SPINS_PER_THREAD_DIM))); dim3 ising_Block(32, 32); dim3 checkForNoChanges_SomeSpins_Grid(2, 2); dim3 checkForNoChanges_Block(32, 32); dim3 checkForNoChanges_AllSpins_Grid((int) ceil(((double)N)/32.0), (int) ceil(((double)N)/32.0)); // Execute kernel on the device for(int q = 0; q < k; q++){ if( q%2 == 0){ ising<<< ising_Grid, ising_Block >>>(G_dev, H_dev, w_dev, n); // Check if no changes are made checkForNoChanges<<< checkForNoChanges_SomeSpins_Grid, checkForNoChanges_Block>>>(G_dev, H_dev, checkForNoChanges_SomeSpins_dev, n); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins, checkForNoChanges_SomeSpins_dev, sizeof(int), cudaMemcpyDeviceToHost) ); if(checkForNoChanges_SomeSpins[0] == 0){ checkForNoChanges<<< checkForNoChanges_AllSpins_Grid, checkForNoChanges_Block>>>(G_dev, H_dev, checkForNoChanges_AllSpins_dev, n); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins, checkForNoChanges_AllSpins_dev, sizeof(int), cudaMemcpyDeviceToHost) ); if( checkForNoChanges_AllSpins[0] == 0){ printf("\nNo changes: %d iterations\n", q); iterations = q; break; } else{ checkForNoChanges_AllSpins[0] = 0; HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), cudaMemcpyHostToDevice)); } } else{ checkForNoChanges_SomeSpins[0] = 0; HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), cudaMemcpyHostToDevice)); } } else{ ising<<< ising_Grid, ising_Block >>>(H_dev, G_dev, w_dev, n); // Check if no changes are made checkForNoChanges<<< checkForNoChanges_SomeSpins_Grid, checkForNoChanges_Block>>>(G_dev, H_dev, checkForNoChanges_SomeSpins_dev, n); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins, checkForNoChanges_SomeSpins_dev, sizeof(int), cudaMemcpyDeviceToHost) ); if(checkForNoChanges_SomeSpins[0] == 0){ checkForNoChanges<<< checkForNoChanges_AllSpins_Grid, checkForNoChanges_Block>>>(G_dev, H_dev, checkForNoChanges_AllSpins_dev, n); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins, checkForNoChanges_AllSpins_dev, sizeof(int), cudaMemcpyDeviceToHost) ); if( checkForNoChanges_AllSpins[0] == 0){ printf("\nNo changes: %d iterations\n", q); iterations = q; break; } else{ checkForNoChanges_AllSpins[0] = 0; HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), cudaMemcpyHostToDevice)); } } else{ checkForNoChanges_SomeSpins[0] = 0; HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), cudaMemcpyHostToDevice)); } } } // Write GPU results to host memory if( k%2 == 1) HANDLE_ERROR( cudaMemcpy(G_final, H_dev, n*n*sizeof(int), cudaMemcpyDeviceToHost) ); else HANDLE_ERROR( cudaMemcpy(G_final, G_dev, n*n*sizeof(int), cudaMemcpyDeviceToHost) ); // Capture end time HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf( "\nTime used for parallel call: %3.3f sec\n", elapsedTime*0.001 ); // Free device memory HANDLE_ERROR( cudaFree(G_dev) ); HANDLE_ERROR( cudaFree(H_dev) ); HANDLE_ERROR( cudaFree(checkForNoChanges_SomeSpins_dev)); HANDLE_ERROR( cudaFree(checkForNoChanges_AllSpins_dev)); HANDLE_ERROR( cudaEventDestroy( start ) ); HANDLE_ERROR( cudaEventDestroy( stop ) ); // Validate results validate(G, G_final, w, iterations, n); // Free host memory free(G); free(G_final); free(checkForNoChanges_SomeSpins); free(checkForNoChanges_AllSpins); return 0; } void validate(int *G, int *G_final, double *w, int k, int n){ printf("\nValidating...\n"); int counter = 0; clock_t start, end; double time_used; start = clock(); // Run sequential code ising_sequential(G, w, k, n); end = clock(); time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("\nTime used for sequential call: %3.3f sec\n",time_used); // Validate for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ if(G[i*n + j] != G_final[i*n + j]){ printf("\nWRONG"); printf("\n%d %d",i, j); printf("\n%d %d\n",G[i*n + j],G_final[i*n + j]); counter++; } } } if(counter == 0) printf("\nValidation: CORRECT\n"); else { printf("\nValidation: Wrong\n"); printf("\n%d wrong values\n",counter); } } void ising_sequential( int *G, double *w, int k, int n){ int *H, *temp; double influence = 0.0; H = (int*)malloc(n*n*sizeof(int)); if(H == NULL){ printf("ERROR: Cannot allocate memory for H. Aborting..."); } for(int q = 0; q < k; q++){ for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ for(int x = 0; x < 5; x++){ for(int y = 0; y < 5; y++){ influence += w[x*5 + y]*G[((i - 2 + x + n)%n)*n + ((j - 2 + y + n)%n)]; } } H[i*n + j] = G[i*n + j]; if(influence > 0.000000001){ H[i*n + j] = 1; } else if(influence < -0.000000001) { H[i*n + j] = -1; } influence = 0.0; } } temp = G; G = H; H = temp; } if(k%2 == 1){ temp = G; G = H; H = temp; for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) G[i*n + j] = H[i*n + j]; } }
365824b6a62c1edb7ff717bd8ec90c1b00597563.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
365824b6a62c1edb7ff717bd8ec90c1b00597563.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
d3dfc3851f447494ee52aab3a8c46c527733d902.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (C) 2011 Abhinav Jauhri ([email protected]), Carnegie Mellon University - Silicon Valley This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "matrix_mul.h" #include <stdio.h> #define TILE_WIDTH 2 #define BLOCK_SIZE 32 namespace cuda { __global__ void matrix_mul_kernel(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, int sq_dimension) { //For repeatly access blocks, use shared memory to speed up __shared__ float local_mat_1[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float local_mat_2[BLOCK_SIZE][BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int block_offsetx = blockIdx.x * BLOCK_SIZE; int block_offsety = blockIdx.y * BLOCK_SIZE; float sum = 0.0f; #pragma unroll for(int i = 0; i < sq_dimension; i += BLOCK_SIZE) { //Transfer to 2-D matrix to avoid memory bank conflict //local_mat_1 is row-based moving for each 32*32 block in 1st input matrix if(ty + block_offsety < sq_dimension && tx + i < sq_dimension) local_mat_1[ty][tx] = sq_matrix_1[(ty + block_offsety) * sq_dimension + tx + i ]; else local_mat_1[ty][tx] = 0; //local_mat_2 is column-based moving for each 32*32 block in 2nd input matrix if(tx + block_offsetx < sq_dimension && ty + i < sq_dimension) local_mat_2[ty][tx] = sq_matrix_2[(ty + i ) * sq_dimension + tx + block_offsetx]; else local_mat_2[ty][tx] = 0; //must wait all threads finishing moving data into shared memory __syncthreads(); #pragma unroll for(int k = 0; k < BLOCK_SIZE; k++) { sum += local_mat_1[ty][k] * local_mat_2[k][tx]; } __syncthreads();//must wait all threads sum up } if(tx + block_offsetx < sq_dimension && ty + block_offsety < sq_dimension) { sq_matrix_result[(ty + block_offsety) * sq_dimension + tx + block_offsetx] = sum; //calculate the correct position of the product } } void matrix_multiplication(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, unsigned int sq_dimension) { int size = sq_dimension * sq_dimension * sizeof(float); float *sq_matrix_1_d, *sq_matrix_2_d, *sq_matrix_result_d; /*************************************************** 1st Part: Allocation of memory on device memory ****************************************************/ /* copy sq_matrix_1 and sq_matrix_2 to device memory */ hipMalloc((void**) &sq_matrix_1_d, size); hipMemcpy(sq_matrix_1_d, sq_matrix_1, size, hipMemcpyHostToDevice); hipMalloc((void**) &sq_matrix_2_d, size); hipMemcpy(sq_matrix_2_d, sq_matrix_2, size, hipMemcpyHostToDevice); /*allocate sq_matrix_result on host */ hipMalloc((void**) &sq_matrix_result_d, size); /*************************************************** 2nd Part: Inovke kernel ****************************************************/ //fix the size to maximum number 1024 dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); //if sq_dimension*sq_dimension>1024, use other blocks to calculate int gridx = int (sq_dimension + BLOCK_SIZE - 1) / int(BLOCK_SIZE); dim3 dimGrid(gridx, gridx); hipLaunchKernelGGL(( matrix_mul_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension); //, dimBlock.x * dimBlock.x * sizeof(float) /*************************************************** 3rd Part: Transfer result from device to host ****************************************************/ hipMemcpy(sq_matrix_result, sq_matrix_result_d, size, hipMemcpyDeviceToHost); hipFree(sq_matrix_1_d); hipFree(sq_matrix_2_d); hipFree(sq_matrix_result_d); } } // namespace cuda
d3dfc3851f447494ee52aab3a8c46c527733d902.cu
/* Copyright (C) 2011 Abhinav Jauhri ([email protected]), Carnegie Mellon University - Silicon Valley This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <cuda.h> #include <cuda_runtime.h> #include "matrix_mul.h" #include <stdio.h> #define TILE_WIDTH 2 #define BLOCK_SIZE 32 namespace cuda { __global__ void matrix_mul_kernel(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, int sq_dimension) { //For repeatly access blocks, use shared memory to speed up __shared__ float local_mat_1[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float local_mat_2[BLOCK_SIZE][BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int block_offsetx = blockIdx.x * BLOCK_SIZE; int block_offsety = blockIdx.y * BLOCK_SIZE; float sum = 0.0f; #pragma unroll for(int i = 0; i < sq_dimension; i += BLOCK_SIZE) { //Transfer to 2-D matrix to avoid memory bank conflict //local_mat_1 is row-based moving for each 32*32 block in 1st input matrix if(ty + block_offsety < sq_dimension && tx + i < sq_dimension) local_mat_1[ty][tx] = sq_matrix_1[(ty + block_offsety) * sq_dimension + tx + i ]; else local_mat_1[ty][tx] = 0; //local_mat_2 is column-based moving for each 32*32 block in 2nd input matrix if(tx + block_offsetx < sq_dimension && ty + i < sq_dimension) local_mat_2[ty][tx] = sq_matrix_2[(ty + i ) * sq_dimension + tx + block_offsetx]; else local_mat_2[ty][tx] = 0; //must wait all threads finishing moving data into shared memory __syncthreads(); #pragma unroll for(int k = 0; k < BLOCK_SIZE; k++) { sum += local_mat_1[ty][k] * local_mat_2[k][tx]; } __syncthreads();//must wait all threads sum up } if(tx + block_offsetx < sq_dimension && ty + block_offsety < sq_dimension) { sq_matrix_result[(ty + block_offsety) * sq_dimension + tx + block_offsetx] = sum; //calculate the correct position of the product } } void matrix_multiplication(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, unsigned int sq_dimension) { int size = sq_dimension * sq_dimension * sizeof(float); float *sq_matrix_1_d, *sq_matrix_2_d, *sq_matrix_result_d; /*************************************************** 1st Part: Allocation of memory on device memory ****************************************************/ /* copy sq_matrix_1 and sq_matrix_2 to device memory */ cudaMalloc((void**) &sq_matrix_1_d, size); cudaMemcpy(sq_matrix_1_d, sq_matrix_1, size, cudaMemcpyHostToDevice); cudaMalloc((void**) &sq_matrix_2_d, size); cudaMemcpy(sq_matrix_2_d, sq_matrix_2, size, cudaMemcpyHostToDevice); /*allocate sq_matrix_result on host */ cudaMalloc((void**) &sq_matrix_result_d, size); /*************************************************** 2nd Part: Inovke kernel ****************************************************/ //fix the size to maximum number 1024 dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); //if sq_dimension*sq_dimension>1024, use other blocks to calculate int gridx = int (sq_dimension + BLOCK_SIZE - 1) / int(BLOCK_SIZE); dim3 dimGrid(gridx, gridx); matrix_mul_kernel<<<dimGrid, dimBlock>>>(sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension); //, dimBlock.x * dimBlock.x * sizeof(float) /*************************************************** 3rd Part: Transfer result from device to host ****************************************************/ cudaMemcpy(sq_matrix_result, sq_matrix_result_d, size, cudaMemcpyDeviceToHost); cudaFree(sq_matrix_1_d); cudaFree(sq_matrix_2_d); cudaFree(sq_matrix_result_d); } } // namespace cuda
09dd5583788ccb62c0a86b5d9c92befa7febb077.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_fp16.h> #include <algorithm> #include "paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { SplitPlugin* CreateSplitPluginDeserialize(const void* buffer, size_t length) { return new SplitPlugin(buffer, length); } REGISTER_TRT_PLUGIN("split_plugin", CreateSplitPluginDeserialize); template <typename T> __device__ int upper_bound(T const* vals, int n, T const& key) { int i = 0; while (n > 0) { int m = n / 2; int j = i + m; if (!(key < vals[j])) { i = j + 1; n -= m + 1; } else { n = m; } } return i; } nvinfer1::Dims SplitPlugin::getOutputDimensions( int index, const nvinfer1::Dims* input_dims, int num_inputs) { PADDLE_ENFORCE_EQ(num_inputs, 1, platform::errors::InvalidArgument( "Invalid number of inputs of split TRT plugin. " "Expected 1, received %d.", num_inputs)); PADDLE_ENFORCE_LT( index, this->getNbOutputs(), platform::errors::InvalidArgument( "Index of output should be less than the total number of outputs in " "split TensorRT plugin. Received index = %d >= total outputs = %d", index, this->getNbOutputs())); nvinfer1::Dims output_dims = input_dims[0]; output_dims.d[axis_] = output_length_.at(index); return output_dims; } int SplitPlugin::initialize() { PADDLE_ENFORCE_LE(axis_, nvinfer1::Dims::MAX_DIMS, platform::errors::InvalidArgument( "Axis dimension exceeds max dimension in TensorRT. " "Received axis = %d > MAX_DIMS = %d", axis_, nvinfer1::Dims::MAX_DIMS)); // notice input dims is [C, H, W] nvinfer1::Dims dims = this->getInputDims(0); outer_rows_ = 1; inner_cols_ = 1; for (int i = 0; i < axis_; ++i) { outer_rows_ *= dims.d[i]; } for (int i = axis_ + 1; i < dims.nbDims; ++i) { inner_cols_ *= dims.d[i]; } same_shape_ = true; std::vector<int> segment_offsets(1, 0); for (int i = 0; i < this->getNbOutputs(); ++i) { if (output_length_[i] != output_length_[0]) { same_shape_ = false; } segment_offsets.push_back(segment_offsets.back() + output_length_[i]); } axis_shape_ = dims.d[axis_]; d_segment_offsets_ = segment_offsets; segment_offsets_ = std::move(segment_offsets); d_output_ptrs_.resize(this->getNbOutputs(), nullptr); return 0; } // The following part of the code refers to onnx-tensorrt // https://github.com/onnx/onnx-tensorrt/blob/master/Split.cu template <typename T> __global__ void split_kernel(int nsegment, int const* __restrict__ segment_offsets, T const* __restrict__ idata, T* const* odatas, int inner_cols, int axis_shape, int outer_rows) { int x0 = threadIdx.x + blockIdx.x * blockDim.x; int src_y0 = threadIdx.y + blockIdx.y * blockDim.y; int z0 = threadIdx.z + blockIdx.z * blockDim.z; for (int z = z0; z < outer_rows; z += blockDim.z * gridDim.z) { for (int src_y = src_y0; src_y < axis_shape; src_y += blockDim.y * gridDim.y) { for (int x = x0; x < inner_cols; x += blockDim.x * gridDim.x) { int segment = upper_bound(segment_offsets, nsegment, src_y) - 1; int dst_y = src_y - segment_offsets[segment]; int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment]; odatas[segment][x + inner_cols * (dst_y + dst_ny * z)] = idata[x + inner_cols * (src_y + axis_shape * z)]; } } } } int SplitPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) { const int* d_segment_offsets_ptr = thrust::raw_pointer_cast(&d_segment_offsets_[0]); float const* input_ptr = reinterpret_cast<float const*>(inputs[0]); float* const* h_odatas = reinterpret_cast<float* const*>(outputs); float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs_[0]); PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpyAsync( output_ptrs, h_odatas, d_output_ptrs_.size() * sizeof(float*), hipMemcpyHostToDevice, stream)); int outer_rows = outer_rows_ * batchSize; dim3 block(32, 16); dim3 grid(::min((inner_cols_ - 1) / block.x + 1, 65535u), ::min((axis_shape_ - 1) / block.y + 1, 65535u), ::min((outer_rows_ - 1) / block.z + 1, 65535u)); hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream, d_segment_offsets_.size(), d_segment_offsets_ptr, input_ptr, output_ptrs, inner_cols_, axis_shape_, outer_rows); return hipGetLastError() != hipSuccess; } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) int SplitPluginDynamic::initialize() { return 0; } size_t SplitPluginDynamic::getSerializationSize() const { return 0; } void SplitPluginDynamic::serialize(void* buffer) const {} nvinfer1::DimsExprs SplitPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs, nvinfer1::IExprBuilder& expr_builder) { PADDLE_ENFORCE_EQ(nb_inputs, 1, platform::errors::InvalidArgument( "The Split plugin should be only one input.")); PADDLE_ENFORCE_LT(output_index, output_length_.size(), platform::errors::InvalidArgument( "When GetOutputDimensions, the index(%d) should not " "greater the num(%d) of the outpus.", output_index, output_length_.size())); nvinfer1::DimsExprs output_dims = inputs[0]; output_dims.d[axis_] = expr_builder.constant(output_length_.at(output_index)); return output_dims; } bool SplitPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc* in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of split plugin should not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc& in = in_out[pos]; if (pos == 0) { #ifdef SUPPORTS_CUDA_FP16 return (in.type == nvinfer1::DataType::kFLOAT || in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); #else return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); #endif } const nvinfer1::PluginTensorDesc& prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SplitPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType* input_types, int nb_inputs) const { return input_types[0]; } int SplitPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc, const nvinfer1::PluginTensorDesc* output_desc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) { auto input_dims = input_desc[0].dims; int outer_rows = 1; int inner_cols = 1; // with batch for (int i = 0; i < axis_; i++) { outer_rows *= input_dims.d[i]; } for (int i = axis_ + 1; i < input_dims.nbDims; i++) { inner_cols *= input_dims.d[i]; } std::vector<int> segment_offsets(1, 0); for (int i = 0; i < this->getNbOutputs(); i++) { segment_offsets.push_back(segment_offsets.back() + output_length_[i]); } int axis_shape = input_dims.d[axis_]; thrust::device_vector<int> d_segment_offsets = segment_offsets; const int* d_segment_offsets_ptr = thrust::raw_pointer_cast(&d_segment_offsets[0]); dim3 block(32, 16); dim3 grid(::min((inner_cols - 1) / block.x + 1, 65535u), ::min((axis_shape - 1) / block.y + 1, 65535u), ::min((outer_rows - 1) / block.z + 1, 65535u)); auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { thrust::device_vector<float*> d_output_ptrs; d_output_ptrs.resize(this->getNbOutputs(), nullptr); const float* input_ptr = static_cast<const float*>(inputs[0]); float* const* h_odatas = reinterpret_cast<float* const*>(outputs); float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]); PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpyAsync( output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(float*), hipMemcpyHostToDevice, stream)); hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream, d_segment_offsets.size(), d_segment_offsets_ptr, input_ptr, output_ptrs, inner_cols, axis_shape, outer_rows); } else if (input_type == nvinfer1::DataType::kHALF) { #ifdef SUPPORTS_CUDA_FP16 thrust::device_vector<half*> d_output_ptrs; d_output_ptrs.resize(this->getNbOutputs(), nullptr); const half* input_ptr = static_cast<const half*>(inputs[0]); half* const* h_odatas = reinterpret_cast<half* const*>(outputs); half** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]); PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpyAsync( output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(half*), hipMemcpyHostToDevice, stream)); hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream, d_segment_offsets.size(), d_segment_offsets_ptr, input_ptr, output_ptrs, inner_cols, axis_shape, outer_rows); #else PADDLE_THROW(platform::errors::Fatal( "The cuda archs you specific should greater than 600.")); #endif } return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
09dd5583788ccb62c0a86b5d9c92befa7febb077.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda_fp16.h> #include <algorithm> #include "paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { SplitPlugin* CreateSplitPluginDeserialize(const void* buffer, size_t length) { return new SplitPlugin(buffer, length); } REGISTER_TRT_PLUGIN("split_plugin", CreateSplitPluginDeserialize); template <typename T> __device__ int upper_bound(T const* vals, int n, T const& key) { int i = 0; while (n > 0) { int m = n / 2; int j = i + m; if (!(key < vals[j])) { i = j + 1; n -= m + 1; } else { n = m; } } return i; } nvinfer1::Dims SplitPlugin::getOutputDimensions( int index, const nvinfer1::Dims* input_dims, int num_inputs) { PADDLE_ENFORCE_EQ(num_inputs, 1, platform::errors::InvalidArgument( "Invalid number of inputs of split TRT plugin. " "Expected 1, received %d.", num_inputs)); PADDLE_ENFORCE_LT( index, this->getNbOutputs(), platform::errors::InvalidArgument( "Index of output should be less than the total number of outputs in " "split TensorRT plugin. Received index = %d >= total outputs = %d", index, this->getNbOutputs())); nvinfer1::Dims output_dims = input_dims[0]; output_dims.d[axis_] = output_length_.at(index); return output_dims; } int SplitPlugin::initialize() { PADDLE_ENFORCE_LE(axis_, nvinfer1::Dims::MAX_DIMS, platform::errors::InvalidArgument( "Axis dimension exceeds max dimension in TensorRT. " "Received axis = %d > MAX_DIMS = %d", axis_, nvinfer1::Dims::MAX_DIMS)); // notice input dims is [C, H, W] nvinfer1::Dims dims = this->getInputDims(0); outer_rows_ = 1; inner_cols_ = 1; for (int i = 0; i < axis_; ++i) { outer_rows_ *= dims.d[i]; } for (int i = axis_ + 1; i < dims.nbDims; ++i) { inner_cols_ *= dims.d[i]; } same_shape_ = true; std::vector<int> segment_offsets(1, 0); for (int i = 0; i < this->getNbOutputs(); ++i) { if (output_length_[i] != output_length_[0]) { same_shape_ = false; } segment_offsets.push_back(segment_offsets.back() + output_length_[i]); } axis_shape_ = dims.d[axis_]; d_segment_offsets_ = segment_offsets; segment_offsets_ = std::move(segment_offsets); d_output_ptrs_.resize(this->getNbOutputs(), nullptr); return 0; } // The following part of the code refers to onnx-tensorrt // https://github.com/onnx/onnx-tensorrt/blob/master/Split.cu template <typename T> __global__ void split_kernel(int nsegment, int const* __restrict__ segment_offsets, T const* __restrict__ idata, T* const* odatas, int inner_cols, int axis_shape, int outer_rows) { int x0 = threadIdx.x + blockIdx.x * blockDim.x; int src_y0 = threadIdx.y + blockIdx.y * blockDim.y; int z0 = threadIdx.z + blockIdx.z * blockDim.z; for (int z = z0; z < outer_rows; z += blockDim.z * gridDim.z) { for (int src_y = src_y0; src_y < axis_shape; src_y += blockDim.y * gridDim.y) { for (int x = x0; x < inner_cols; x += blockDim.x * gridDim.x) { int segment = upper_bound(segment_offsets, nsegment, src_y) - 1; int dst_y = src_y - segment_offsets[segment]; int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment]; odatas[segment][x + inner_cols * (dst_y + dst_ny * z)] = idata[x + inner_cols * (src_y + axis_shape * z)]; } } } } int SplitPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) { const int* d_segment_offsets_ptr = thrust::raw_pointer_cast(&d_segment_offsets_[0]); float const* input_ptr = reinterpret_cast<float const*>(inputs[0]); float* const* h_odatas = reinterpret_cast<float* const*>(outputs); float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs_[0]); PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync( output_ptrs, h_odatas, d_output_ptrs_.size() * sizeof(float*), cudaMemcpyHostToDevice, stream)); int outer_rows = outer_rows_ * batchSize; dim3 block(32, 16); dim3 grid(std::min((inner_cols_ - 1) / block.x + 1, 65535u), std::min((axis_shape_ - 1) / block.y + 1, 65535u), std::min((outer_rows_ - 1) / block.z + 1, 65535u)); split_kernel<<<grid, block, 0, stream>>>( d_segment_offsets_.size(), d_segment_offsets_ptr, input_ptr, output_ptrs, inner_cols_, axis_shape_, outer_rows); return cudaGetLastError() != cudaSuccess; } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) int SplitPluginDynamic::initialize() { return 0; } size_t SplitPluginDynamic::getSerializationSize() const { return 0; } void SplitPluginDynamic::serialize(void* buffer) const {} nvinfer1::DimsExprs SplitPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs, nvinfer1::IExprBuilder& expr_builder) { PADDLE_ENFORCE_EQ(nb_inputs, 1, platform::errors::InvalidArgument( "The Split plugin should be only one input.")); PADDLE_ENFORCE_LT(output_index, output_length_.size(), platform::errors::InvalidArgument( "When GetOutputDimensions, the index(%d) should not " "greater the num(%d) of the outpus.", output_index, output_length_.size())); nvinfer1::DimsExprs output_dims = inputs[0]; output_dims.d[axis_] = expr_builder.constant(output_length_.at(output_index)); return output_dims; } bool SplitPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc* in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of split plugin should not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc& in = in_out[pos]; if (pos == 0) { #ifdef SUPPORTS_CUDA_FP16 return (in.type == nvinfer1::DataType::kFLOAT || in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); #else return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); #endif } const nvinfer1::PluginTensorDesc& prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SplitPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType* input_types, int nb_inputs) const { return input_types[0]; } int SplitPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc, const nvinfer1::PluginTensorDesc* output_desc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { auto input_dims = input_desc[0].dims; int outer_rows = 1; int inner_cols = 1; // with batch for (int i = 0; i < axis_; i++) { outer_rows *= input_dims.d[i]; } for (int i = axis_ + 1; i < input_dims.nbDims; i++) { inner_cols *= input_dims.d[i]; } std::vector<int> segment_offsets(1, 0); for (int i = 0; i < this->getNbOutputs(); i++) { segment_offsets.push_back(segment_offsets.back() + output_length_[i]); } int axis_shape = input_dims.d[axis_]; thrust::device_vector<int> d_segment_offsets = segment_offsets; const int* d_segment_offsets_ptr = thrust::raw_pointer_cast(&d_segment_offsets[0]); dim3 block(32, 16); dim3 grid(std::min((inner_cols - 1) / block.x + 1, 65535u), std::min((axis_shape - 1) / block.y + 1, 65535u), std::min((outer_rows - 1) / block.z + 1, 65535u)); auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { thrust::device_vector<float*> d_output_ptrs; d_output_ptrs.resize(this->getNbOutputs(), nullptr); const float* input_ptr = static_cast<const float*>(inputs[0]); float* const* h_odatas = reinterpret_cast<float* const*>(outputs); float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]); PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync( output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(float*), cudaMemcpyHostToDevice, stream)); split_kernel<<<grid, block, 0, stream>>>( d_segment_offsets.size(), d_segment_offsets_ptr, input_ptr, output_ptrs, inner_cols, axis_shape, outer_rows); } else if (input_type == nvinfer1::DataType::kHALF) { #ifdef SUPPORTS_CUDA_FP16 thrust::device_vector<half*> d_output_ptrs; d_output_ptrs.resize(this->getNbOutputs(), nullptr); const half* input_ptr = static_cast<const half*>(inputs[0]); half* const* h_odatas = reinterpret_cast<half* const*>(outputs); half** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]); PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync( output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(half*), cudaMemcpyHostToDevice, stream)); split_kernel<<<grid, block, 0, stream>>>( d_segment_offsets.size(), d_segment_offsets_ptr, input_ptr, output_ptrs, inner_cols, axis_shape, outer_rows); #else PADDLE_THROW(platform::errors::Fatal( "The cuda archs you specific should greater than 600.")); #endif } return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
b64b52f45e27d61449e88f222b3430d0bccad325.hip
// !!! This is a file automatically generated by hipify!!! #include "CUAPI.h" #include "CUFLU.h" #ifdef GRAVITY #include "CUPOT.h" #endif #ifdef LAOHU extern "C" { int GetFreeGpuDevID( int, int ); } #endif #ifdef GPU //------------------------------------------------------------------------------------------------------- // Function : CUAPI_SetDevice // Description : Set the active device // // Parameter : Mode : -3 --> set by the gpudevmgr library on the NAOC Laohu cluster // -2 --> set automatically by CUDA (must work with the "compute-exclusive mode") // -1 --> set by MPI ranks : SetDeviceID = MPI_Rank % DeviceCount // >= 0 --> set to "Mode" //------------------------------------------------------------------------------------------------------- void CUAPI_SetDevice( const int Mode ) { if ( MPI_Rank == 0 ) Aux_Message( stdout, "CUAPI_SetDevice ...\n" ); // check # ifdef LAOHU if ( Mode < -3 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "Mode", Mode ); if ( Mode != -3 && MPI_Rank == 0 ) Aux_Message( stderr, "WARNING : \"OPT__GPUID_SELECT != -3\" on the Laohu cluster !?\n" ); # else if ( Mode < -2 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "Mode", Mode ); # endif // get the hostname of each MPI process char Host[1024]; gethostname( Host, 1024 ); // verify that there are GPU supporting CUDA int DeviceCount; CUDA_CHECK_ERROR( hipGetDeviceCount( &DeviceCount ) ); if ( DeviceCount == 0 ) Aux_Error( ERROR_INFO, "no devices support CUDA at MPI_Rank %2d (host = %8s) !!\n", MPI_Rank, Host ); // set the device ID void **d_TempPtr = NULL; int SetDeviceID, GetDeviceID = 999; hipDeviceProp_t DeviceProp; switch ( Mode ) { # ifdef LAOHU case -3: SetDeviceID = GetFreeGpuDevID( DeviceCount, MPI_Rank ); if ( SetDeviceID < DeviceCount ) CUDA_CHECK_ERROR( hipSetDevice( SetDeviceID ) ); else Aux_Error( ERROR_INFO, "SetDeviceID (%d) >= DeviceCount (%d) at MPI_Rank %2d (host = %8s) !!\n", SetDeviceID, DeviceCount, MPI_Rank, Host ); break; # endif case -2: CUDA_CHECK_ERROR( hipMalloc( (void**) &d_TempPtr, sizeof(int) ) ); // to set the GPU ID CUDA_CHECK_ERROR( hipFree( d_TempPtr ) ); // make sure that the "exclusive" compute mode is adopted CUDA_CHECK_ERROR( hipGetDevice( &GetDeviceID ) ); CUDA_CHECK_ERROR( hipGetDeviceProperties( &DeviceProp, GetDeviceID ) ); if ( DeviceProp.computeMode != hipComputeModeExclusive ) { Aux_Message( stderr, "WARNING : \"exclusive\" compute mode is NOT enabled for \"%s\" at Rank %2d", "OPT__GPUID_SELECT == -2", MPI_Rank ); Aux_Message( stderr, " (host=%8s) !!\n", Host ); } break; case -1: SetDeviceID = MPI_Rank % DeviceCount; CUDA_CHECK_ERROR( hipSetDevice( SetDeviceID ) ); if ( MPI_NRank > 1 && MPI_Rank == 0 ) { Aux_Message( stderr, "WARNING : please make sure that different MPI ranks will use different GPUs " ); Aux_Message( stderr, "for \"%s\" !!\n", "OPT__GPUID_SELECT == -1" ); } break; default: SetDeviceID = Mode; if ( SetDeviceID < DeviceCount ) CUDA_CHECK_ERROR( hipSetDevice( SetDeviceID ) ); else Aux_Error( ERROR_INFO, "SetDeviceID (%d) >= DeviceCount (%d) at MPI_Rank %2d (host = %8s) !!\n", SetDeviceID, DeviceCount, MPI_Rank, Host ); if ( MPI_NRank > 1 && MPI_Rank == 0 ) { Aux_Message( stderr, "WARNING : please make sure that different MPI ranks will use different GPUs " ); Aux_Message( stderr, "for \"%s\" !!\n", "OPT__GPUID_SELECT == -1" ); } break; } // switch ( Mode ) // check // (0) load the device properties and the versions of CUDA and driver int DriverVersion = 0, RuntimeVersion = 0; CUDA_CHECK_ERROR( hipGetDevice( &GetDeviceID ) ); CUDA_CHECK_ERROR( hipGetDeviceProperties( &DeviceProp, GetDeviceID ) ); CUDA_CHECK_ERROR( hipDriverGetVersion( &DriverVersion ) ); CUDA_CHECK_ERROR( hipRuntimeGetVersion( &RuntimeVersion ) ); // (1) verify the device version if ( DeviceProp.major < 1 ) Aux_Error( ERROR_INFO, "\ndevice major version < 1 at MPI_Rank %2d (host = %8s) !!\n", MPI_Rank, Host ); if ( Mode >= -1 ) { // (2) verify that the device ID is properly set if ( GetDeviceID != SetDeviceID ) Aux_Error( ERROR_INFO, "GetDeviceID (%d) != SetDeviceID (%d) at MPI_Rank %2d (host = %8s) !!\n", GetDeviceID, SetDeviceID, MPI_Rank, Host ); // (3) verify that the adopted ID is accessible CUDA_CHECK_ERROR( hipMalloc( (void**) &d_TempPtr, sizeof(int) ) ); CUDA_CHECK_ERROR( hipFree( d_TempPtr ) ); } // (4) verify the capability of double precision # ifdef FLOAT8 if ( DeviceProp.major < 2 && DeviceProp.minor < 3 ) Aux_Error( ERROR_INFO, "GPU \"%s\" at MPI_Rank %2d (host = %8s) does not support FLOAT8 !!\n", DeviceProp.name, MPI_Rank, Host ); # endif // (5) verify the GPU architecture # if ( GPU_ARCH == FERMI ) if ( DeviceProp.major != 2 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Fermi architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # elif ( GPU_ARCH == KEPLER ) if ( DeviceProp.major != 3 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Kepler architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # elif ( GPU_ARCH == MAXWELL ) if ( DeviceProp.major != 5 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Maxwell architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # elif ( GPU_ARCH == PASCAL ) if ( DeviceProp.major != 6 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Pascal architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # elif ( GPU_ARCH == VOLTA ) if ( DeviceProp.major != 7 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Volta architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # else # error : UNKNOWN GPU_ARCH !! # endif // GPU_ARCH // (6) some options are not supported // (6-1) fluid solver # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == WAF && defined FLOAT8 ) if ( RuntimeVersion < 3020 ) Aux_Error( ERROR_INFO, "double-precision WAF scheme is not supported in CUDA version < 3.2 !!\n" ); # endif # if ( FLU_SCHEME == WAF && RSOLVER == EXACT ) # error : ERROR : Currently WAF scheme does not support the exact Riemann solver !!; # endif # if ( defined FLOAT8 && CHECK_INTERMEDIATE == EXACT && \ ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) ) if ( RuntimeVersion < 3020 ) Aux_Error( ERROR_INFO, "CHECK_INTERMEDIATE == EXACT + FLOAT8 is not supported in CUDA < 3.2 !!" ); # endif # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # endif // #if ( MODEL == HYDRO ) // (6-2) SOR Poisson solver # if ( POT_SCHEME == SOR ) # ifdef SOR_USE_SHUFFLE if ( DeviceProp.warpSize != 32 ) Aux_Error( ERROR_INFO, "warp size (%d) != 32 !!\n", DeviceProp.warpSize ); if ( DeviceProp.maxThreadsPerBlock > 1024 ) Aux_Error( ERROR_INFO, "maximum number of threads per block (%d) > 1024 !!\n", DeviceProp.maxThreadsPerBlock ); # endif # ifdef SOR_USE_PADDING if ( DeviceProp.warpSize != 32 ) Aux_Error( ERROR_INFO, "warp size (%d) != 32 !!\n", DeviceProp.warpSize ); if ( POT_GHOST_SIZE != 5 ) Aux_Error( ERROR_INFO, "POT_GHOST_SIZE (%d) != 5 !!\n", POT_GHOST_SIZE ); # endif # endif // if ( POT_SCHEME == SOR ) // (7) warp size if ( DeviceProp.warpSize != WARP_SIZE ) Aux_Error( ERROR_INFO, "inconsistent warp size (warpSize %d, WARP_SIZE %d) !!\n", DeviceProp.warpSize, WARP_SIZE ); if ( MPI_Rank == 0 ) Aux_Message( stdout, "CUAPI_SetDevice ... done\n" ); } // FUNCTION : CUAPI_SetDevice #endif // #ifdef GPU
b64b52f45e27d61449e88f222b3430d0bccad325.cu
#include "CUAPI.h" #include "CUFLU.h" #ifdef GRAVITY #include "CUPOT.h" #endif #ifdef LAOHU extern "C" { int GetFreeGpuDevID( int, int ); } #endif #ifdef GPU //------------------------------------------------------------------------------------------------------- // Function : CUAPI_SetDevice // Description : Set the active device // // Parameter : Mode : -3 --> set by the gpudevmgr library on the NAOC Laohu cluster // -2 --> set automatically by CUDA (must work with the "compute-exclusive mode") // -1 --> set by MPI ranks : SetDeviceID = MPI_Rank % DeviceCount // >= 0 --> set to "Mode" //------------------------------------------------------------------------------------------------------- void CUAPI_SetDevice( const int Mode ) { if ( MPI_Rank == 0 ) Aux_Message( stdout, "CUAPI_SetDevice ...\n" ); // check # ifdef LAOHU if ( Mode < -3 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "Mode", Mode ); if ( Mode != -3 && MPI_Rank == 0 ) Aux_Message( stderr, "WARNING : \"OPT__GPUID_SELECT != -3\" on the Laohu cluster !?\n" ); # else if ( Mode < -2 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "Mode", Mode ); # endif // get the hostname of each MPI process char Host[1024]; gethostname( Host, 1024 ); // verify that there are GPU supporting CUDA int DeviceCount; CUDA_CHECK_ERROR( cudaGetDeviceCount( &DeviceCount ) ); if ( DeviceCount == 0 ) Aux_Error( ERROR_INFO, "no devices support CUDA at MPI_Rank %2d (host = %8s) !!\n", MPI_Rank, Host ); // set the device ID void **d_TempPtr = NULL; int SetDeviceID, GetDeviceID = 999; cudaDeviceProp DeviceProp; switch ( Mode ) { # ifdef LAOHU case -3: SetDeviceID = GetFreeGpuDevID( DeviceCount, MPI_Rank ); if ( SetDeviceID < DeviceCount ) CUDA_CHECK_ERROR( cudaSetDevice( SetDeviceID ) ); else Aux_Error( ERROR_INFO, "SetDeviceID (%d) >= DeviceCount (%d) at MPI_Rank %2d (host = %8s) !!\n", SetDeviceID, DeviceCount, MPI_Rank, Host ); break; # endif case -2: CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_TempPtr, sizeof(int) ) ); // to set the GPU ID CUDA_CHECK_ERROR( cudaFree( d_TempPtr ) ); // make sure that the "exclusive" compute mode is adopted CUDA_CHECK_ERROR( cudaGetDevice( &GetDeviceID ) ); CUDA_CHECK_ERROR( cudaGetDeviceProperties( &DeviceProp, GetDeviceID ) ); if ( DeviceProp.computeMode != cudaComputeModeExclusive ) { Aux_Message( stderr, "WARNING : \"exclusive\" compute mode is NOT enabled for \"%s\" at Rank %2d", "OPT__GPUID_SELECT == -2", MPI_Rank ); Aux_Message( stderr, " (host=%8s) !!\n", Host ); } break; case -1: SetDeviceID = MPI_Rank % DeviceCount; CUDA_CHECK_ERROR( cudaSetDevice( SetDeviceID ) ); if ( MPI_NRank > 1 && MPI_Rank == 0 ) { Aux_Message( stderr, "WARNING : please make sure that different MPI ranks will use different GPUs " ); Aux_Message( stderr, "for \"%s\" !!\n", "OPT__GPUID_SELECT == -1" ); } break; default: SetDeviceID = Mode; if ( SetDeviceID < DeviceCount ) CUDA_CHECK_ERROR( cudaSetDevice( SetDeviceID ) ); else Aux_Error( ERROR_INFO, "SetDeviceID (%d) >= DeviceCount (%d) at MPI_Rank %2d (host = %8s) !!\n", SetDeviceID, DeviceCount, MPI_Rank, Host ); if ( MPI_NRank > 1 && MPI_Rank == 0 ) { Aux_Message( stderr, "WARNING : please make sure that different MPI ranks will use different GPUs " ); Aux_Message( stderr, "for \"%s\" !!\n", "OPT__GPUID_SELECT == -1" ); } break; } // switch ( Mode ) // check // (0) load the device properties and the versions of CUDA and driver int DriverVersion = 0, RuntimeVersion = 0; CUDA_CHECK_ERROR( cudaGetDevice( &GetDeviceID ) ); CUDA_CHECK_ERROR( cudaGetDeviceProperties( &DeviceProp, GetDeviceID ) ); CUDA_CHECK_ERROR( cudaDriverGetVersion( &DriverVersion ) ); CUDA_CHECK_ERROR( cudaRuntimeGetVersion( &RuntimeVersion ) ); // (1) verify the device version if ( DeviceProp.major < 1 ) Aux_Error( ERROR_INFO, "\ndevice major version < 1 at MPI_Rank %2d (host = %8s) !!\n", MPI_Rank, Host ); if ( Mode >= -1 ) { // (2) verify that the device ID is properly set if ( GetDeviceID != SetDeviceID ) Aux_Error( ERROR_INFO, "GetDeviceID (%d) != SetDeviceID (%d) at MPI_Rank %2d (host = %8s) !!\n", GetDeviceID, SetDeviceID, MPI_Rank, Host ); // (3) verify that the adopted ID is accessible CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_TempPtr, sizeof(int) ) ); CUDA_CHECK_ERROR( cudaFree( d_TempPtr ) ); } // (4) verify the capability of double precision # ifdef FLOAT8 if ( DeviceProp.major < 2 && DeviceProp.minor < 3 ) Aux_Error( ERROR_INFO, "GPU \"%s\" at MPI_Rank %2d (host = %8s) does not support FLOAT8 !!\n", DeviceProp.name, MPI_Rank, Host ); # endif // (5) verify the GPU architecture # if ( GPU_ARCH == FERMI ) if ( DeviceProp.major != 2 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Fermi architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # elif ( GPU_ARCH == KEPLER ) if ( DeviceProp.major != 3 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Kepler architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # elif ( GPU_ARCH == MAXWELL ) if ( DeviceProp.major != 5 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Maxwell architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # elif ( GPU_ARCH == PASCAL ) if ( DeviceProp.major != 6 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Pascal architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # elif ( GPU_ARCH == VOLTA ) if ( DeviceProp.major != 7 ) Aux_Error( ERROR_INFO, "GPU \"%s\" with the compute capability %d.%d is incompatible with the Volta architecture !!\n" " --> Please reset GPU_ARCH in the Makefile properly\n", DeviceProp.name, DeviceProp.major, DeviceProp.minor ); # else # error : UNKNOWN GPU_ARCH !! # endif // GPU_ARCH // (6) some options are not supported // (6-1) fluid solver # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == WAF && defined FLOAT8 ) if ( RuntimeVersion < 3020 ) Aux_Error( ERROR_INFO, "double-precision WAF scheme is not supported in CUDA version < 3.2 !!\n" ); # endif # if ( FLU_SCHEME == WAF && RSOLVER == EXACT ) # error : ERROR : Currently WAF scheme does not support the exact Riemann solver !!; # endif # if ( defined FLOAT8 && CHECK_INTERMEDIATE == EXACT && \ ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) ) if ( RuntimeVersion < 3020 ) Aux_Error( ERROR_INFO, "CHECK_INTERMEDIATE == EXACT + FLOAT8 is not supported in CUDA < 3.2 !!" ); # endif # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # endif // #if ( MODEL == HYDRO ) // (6-2) SOR Poisson solver # if ( POT_SCHEME == SOR ) # ifdef SOR_USE_SHUFFLE if ( DeviceProp.warpSize != 32 ) Aux_Error( ERROR_INFO, "warp size (%d) != 32 !!\n", DeviceProp.warpSize ); if ( DeviceProp.maxThreadsPerBlock > 1024 ) Aux_Error( ERROR_INFO, "maximum number of threads per block (%d) > 1024 !!\n", DeviceProp.maxThreadsPerBlock ); # endif # ifdef SOR_USE_PADDING if ( DeviceProp.warpSize != 32 ) Aux_Error( ERROR_INFO, "warp size (%d) != 32 !!\n", DeviceProp.warpSize ); if ( POT_GHOST_SIZE != 5 ) Aux_Error( ERROR_INFO, "POT_GHOST_SIZE (%d) != 5 !!\n", POT_GHOST_SIZE ); # endif # endif // if ( POT_SCHEME == SOR ) // (7) warp size if ( DeviceProp.warpSize != WARP_SIZE ) Aux_Error( ERROR_INFO, "inconsistent warp size (warpSize %d, WARP_SIZE %d) !!\n", DeviceProp.warpSize, WARP_SIZE ); if ( MPI_Rank == 0 ) Aux_Message( stdout, "CUAPI_SetDevice ... done\n" ); } // FUNCTION : CUAPI_SetDevice #endif // #ifdef GPU
bc84a22903254956b1a935a855be2ff05c7f7634.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Ball Query with BatchIdx Written by Li Jiang All Rights Reserved 2020. */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define TOTAL_THREADS 1024 #define THREADS_PER_BLOCK 512 #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) /* ================================== ballquery_batch_p ================================== */ __global__ void ballquery_batch_p_cuda_(int n, int meanActive, float radius, const float *xyz, const int *batch_idxs, const int *batch_offsets, int *idx, int *start_len, int *cumsum) { int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pt_idx >= n) return; start_len += (pt_idx * 2); int idx_temp[1000]; float radius2 = radius * radius; float o_x = xyz[pt_idx * 3 + 0]; float o_y = xyz[pt_idx * 3 + 1]; float o_z = xyz[pt_idx * 3 + 2]; int batch_idx = batch_idxs[pt_idx]; int start = batch_offsets[batch_idx]; int end = batch_offsets[batch_idx + 1]; int cnt = 0; for(int k = start; k < end; k++){ float x = xyz[k * 3 + 0]; float y = xyz[k * 3 + 1]; float z = xyz[k * 3 + 2]; float d2 = (o_x - x) * (o_x - x) + (o_y - y) * (o_y - y) + (o_z - z) * (o_z - z); if(d2 < radius2){ if(cnt < 1000){ idx_temp[cnt] = k; } else{ break; } ++cnt; } } start_len[0] = atomicAdd(cumsum, cnt); start_len[1] = cnt; int thre = n * meanActive; if(start_len[0] >= thre) return; idx += start_len[0]; if(start_len[0] + cnt >= thre) cnt = thre - start_len[0]; for(int k = 0; k < cnt; k++){ idx[k] = idx_temp[k]; } } int ballquery_batch_p_cuda(int n, int meanActive, float radius, const float *xyz, const int *batch_idxs, const int *batch_offsets, int *idx, int *start_len, hipStream_t stream) { // param xyz: (n, 3) // param batch_idxs: (n) // param batch_offsets: (B + 1) // output idx: (n * meanActive) dim 0 for number of points in the ball, idx in n // output start_len: (n, 2), int hipError_t err; dim3 blocks(DIVUP(n, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); int cumsum = 0; int* p_cumsum; hipMalloc((void**)&p_cumsum, sizeof(int)); hipMemcpy(p_cumsum, &cumsum, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( ballquery_batch_p_cuda_), dim3(blocks), dim3(threads), 0, stream, n, meanActive, radius, xyz, batch_idxs, batch_offsets, idx, start_len, p_cumsum); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } hipMemcpy(&cumsum, p_cumsum, sizeof(int), hipMemcpyDeviceToHost); return cumsum; }
bc84a22903254956b1a935a855be2ff05c7f7634.cu
/* Ball Query with BatchIdx Written by Li Jiang All Rights Reserved 2020. */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define TOTAL_THREADS 1024 #define THREADS_PER_BLOCK 512 #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) /* ================================== ballquery_batch_p ================================== */ __global__ void ballquery_batch_p_cuda_(int n, int meanActive, float radius, const float *xyz, const int *batch_idxs, const int *batch_offsets, int *idx, int *start_len, int *cumsum) { int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pt_idx >= n) return; start_len += (pt_idx * 2); int idx_temp[1000]; float radius2 = radius * radius; float o_x = xyz[pt_idx * 3 + 0]; float o_y = xyz[pt_idx * 3 + 1]; float o_z = xyz[pt_idx * 3 + 2]; int batch_idx = batch_idxs[pt_idx]; int start = batch_offsets[batch_idx]; int end = batch_offsets[batch_idx + 1]; int cnt = 0; for(int k = start; k < end; k++){ float x = xyz[k * 3 + 0]; float y = xyz[k * 3 + 1]; float z = xyz[k * 3 + 2]; float d2 = (o_x - x) * (o_x - x) + (o_y - y) * (o_y - y) + (o_z - z) * (o_z - z); if(d2 < radius2){ if(cnt < 1000){ idx_temp[cnt] = k; } else{ break; } ++cnt; } } start_len[0] = atomicAdd(cumsum, cnt); start_len[1] = cnt; int thre = n * meanActive; if(start_len[0] >= thre) return; idx += start_len[0]; if(start_len[0] + cnt >= thre) cnt = thre - start_len[0]; for(int k = 0; k < cnt; k++){ idx[k] = idx_temp[k]; } } int ballquery_batch_p_cuda(int n, int meanActive, float radius, const float *xyz, const int *batch_idxs, const int *batch_offsets, int *idx, int *start_len, cudaStream_t stream) { // param xyz: (n, 3) // param batch_idxs: (n) // param batch_offsets: (B + 1) // output idx: (n * meanActive) dim 0 for number of points in the ball, idx in n // output start_len: (n, 2), int cudaError_t err; dim3 blocks(DIVUP(n, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); int cumsum = 0; int* p_cumsum; cudaMalloc((void**)&p_cumsum, sizeof(int)); cudaMemcpy(p_cumsum, &cumsum, sizeof(int), cudaMemcpyHostToDevice); ballquery_batch_p_cuda_<<<blocks, threads, 0, stream>>>(n, meanActive, radius, xyz, batch_idxs, batch_offsets, idx, start_len, p_cumsum); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } cudaMemcpy(&cumsum, p_cumsum, sizeof(int), cudaMemcpyDeviceToHost); return cumsum; }
013432fadb20b3197a7b7b58137d769722682492.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "../cuda_debug.h" #include "../cuda_note.h" #define LEN 2000000 // define the size of input/output data #define BLOCK_DIM 256 int main(){ size_t maskSize = sizeof(float) * MASK_WIDTH; size_t dataSize = sizeof(float) * LEN; float *mask = (float *) malloc(maskSize); float *h_input = (float *) malloc(dataSize); float *h_output = (float *) malloc(dataSize); initial2DMatrix<float>(h_input,1, LEN, 1); initial2DMatrix<float>(mask, 1, MASK_WIDTH, 0); //peakMatrix(mask, 1, MASK_WIDTH); float *d_input, *d_output; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMalloc(&d_input, dataSize); hipMalloc(&d_output, dataSize); hipMemcpy(d_input, h_input, dataSize, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_mask, mask, maskSize); dim3 block(BLOCK_DIM); dim3 grid(ceil(LEN/(float)BLOCK_DIM)); hipEventRecord(start); hipLaunchKernelGGL(( convolution1D), dim3(grid), dim3(block), 0, 0, d_input, d_output, LEN, MASK_WIDTH); hipEventRecord(stop); hipEventSynchronize(stop); hipMemcpy(h_output, d_output, dataSize, hipMemcpyDeviceToHost); float ms = 0; hipEventElapsedTime(&ms, start, stop); printf("Kernel execution time is %f ms \n", ms); hipFree(d_input); hipFree(d_output); check1Dconvolution<float>(h_input, mask, h_output, LEN, MASK_WIDTH); free(h_input); free(h_output); free(mask); return 0; }
013432fadb20b3197a7b7b58137d769722682492.cu
#include <iostream> #include "../cuda_debug.h" #include "../cuda_note.h" #define LEN 2000000 // define the size of input/output data #define BLOCK_DIM 256 int main(){ size_t maskSize = sizeof(float) * MASK_WIDTH; size_t dataSize = sizeof(float) * LEN; float *mask = (float *) malloc(maskSize); float *h_input = (float *) malloc(dataSize); float *h_output = (float *) malloc(dataSize); initial2DMatrix<float>(h_input,1, LEN, 1); initial2DMatrix<float>(mask, 1, MASK_WIDTH, 0); //peakMatrix(mask, 1, MASK_WIDTH); float *d_input, *d_output; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc(&d_input, dataSize); cudaMalloc(&d_output, dataSize); cudaMemcpy(d_input, h_input, dataSize, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_mask, mask, maskSize); dim3 block(BLOCK_DIM); dim3 grid(ceil(LEN/(float)BLOCK_DIM)); cudaEventRecord(start); convolution1D<<<grid, block>>>(d_input, d_output, LEN, MASK_WIDTH); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaMemcpy(h_output, d_output, dataSize, cudaMemcpyDeviceToHost); float ms = 0; cudaEventElapsedTime(&ms, start, stop); printf("Kernel execution time is %f ms \n", ms); cudaFree(d_input); cudaFree(d_output); check1Dconvolution<float>(h_input, mask, h_output, LEN, MASK_WIDTH); free(h_input); free(h_output); free(mask); return 0; }
aaccf41ae7df664b96c1887a1fffcaa68e1d03e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../inc/cudarkdgsolver.h" CCUDARkdgSolver::CCUDARkdgSolver(): num_commu(0), title("Unknown case"), alpha(5.0), gamma(1.4), mach(0.4), cfl(0.18), rhoref(1.0), pref(1.0), _terminal_time(15), log_history('Y'), print_interval(1000), gridconf("input/mesh.conf"), solution_file("output/solution.dat"), log_file("output/log.dat"), residual_file("output/residual.dat"), threads_per_block(512), reduction_threads(512), _freedom_rho(NULL), _freedom_rhou(NULL), _freedom_rhov(NULL), _freedom_rhoE(NULL), _dt(NULL), _residual(NULL) {} void CCUDARkdgSolver::detectCUDADevice( void ) { int count(0); hipGetDeviceCount( &count ); cout<<"count:"<<count; /*if ( 0==count ) throw CMyException("No device surpports CUDA found!");*/ //if ( count < nprocs) { // throw CMyException("No enough device surpports CUDA found!"); //} hipDeviceProp_t prop; bool double_support(false); /*for ( int i=0; i<count; ++i ) { hipGetDeviceProperties( &prop, i ); if ( prop.major>1 ) { double_support = true; break; } } if ( !double_support ) throw CMyException("No device has capability of 2.0 or higher is found!");*/ int double_support_count(0); for ( int i=0; i<count; ++i ) { hipGetDeviceProperties( &prop, i ); if ( prop.major>1 ) { double_support_count++; if (double_support_count == myid) { hipSetDevice(i); } } cout<<"gpu("<<i<<"):"<<prop.major<<"."<<prop.minor<<endl; } // if ( double_support_count < nprocs ) // throw CMyException("No enough device has capability of 2.0 or higher is found!"); /*memset( &prop, 0, sizeof(hipDeviceProp_t) ); prop.major = 2; prop.minor = 0; int devid; hipChooseDevice(&devid, &prop);*/ //cout<<"\nThere are "<<count<<" device surpports CUDA, and the "<<devid+1<<"th device will be used."<<endl; } void CCUDARkdgSolver::initConfig(void) { string conf_items[] = { "title", "gamma", "alpha", "mach", "cfl", "rhoref","pref", "time", "gridconf", "logfile", "solutionfile", "residualfile", "threadsperblock", "reductionthreads", "loghistory", "printinterval" }; CConfig program_conf(config_file, conf_items, 16); program_conf.parseConfigFile(); // if ( program_conf.config_items["title"]!="" ) title = program_conf.config_items["title"]; if ( program_conf.config_items["gamma"]!="" ) gamma = atof(program_conf.config_items["gamma"].c_str()); if ( program_conf.config_items["alpha"]!="" ) alpha = atof(program_conf.config_items["alpha"].c_str())*atan(1.0)*4 / 180; if ( program_conf.config_items["mach"]!="" ) mach = atof(program_conf.config_items["mach"].c_str()); if ( program_conf.config_items["cfl"]!="" ) cfl = atof(program_conf.config_items["cfl"].c_str()); if ( program_conf.config_items["rhoref"]!="" ) rhoref = atof(program_conf.config_items["rhoref"].c_str()); if ( program_conf.config_items["pref"]!="" ) pref = atof(program_conf.config_items["pref"].c_str()); if ( program_conf.config_items["time"]!="" ) _terminal_time = atof(program_conf.config_items["time"].c_str()); if ( program_conf.config_items["gridconf"]!="" ) gridconf = program_conf.config_items["gridconf"]; if ( program_conf.config_items["solutionfile"]!="" ) solution_file = program_conf.config_items["solutionfile"]; if ( program_conf.config_items["logfile"]!="" ) log_file = program_conf.config_items["logfile"]; if ( program_conf.config_items["residualfile"]!="" ) residual_file = program_conf.config_items["residualfile"]; if ( program_conf.config_items["loghistory"]!="" ) log_history = toupper(program_conf.config_items["loghistory"].at(0)); if ( program_conf.config_items["threadsperblock"]!="" ) threads_per_block = atoi(program_conf.config_items["threadsperblock"].c_str()); if ( program_conf.config_items["printinterval"]!="" ) print_interval = abs(atoi(program_conf.config_items["printinterval"].c_str())); if ( program_conf.config_items["reductionthreads"]!="" ) reduction_threads = atoi(program_conf.config_items["reductionthreads"].c_str()); } void CCUDARkdgSolver::run(int myid, int nprocs) { this->myid = myid; this->nprocs = nprocs; ofstream fout(log_file.c_str()); if ( !fout ) throw CMyException("Failed to open log file: "+log_file); CMyTime mt; fout<<mt.getCurrentTime()<<": programs starts"<<endl; // CUDA detectCUDADevice(); fout<<mt.getCurrentTime()<<": Device with capability of 2.0 is found."<<endl; // initConfig(); fout<<mt.getCurrentTime()<<": initialize configure from file."<<endl<<endl; fout<<"Title: "<<title<<endl<<endl; printConfig(cout); printConfig(fout); fout<<mt.getCurrentTime()<<": reading grid information."<<endl; // grid.config_file = gridconf; grid.initializeGrid(myid, nprocs); fout.close(); } void CCUDARkdgSolver::runNext() { grid.initializeGridNext(); //grid.outputGrid(); //grid.outputGridWithGhostCells("output/ghostmesh.plt"); //fout<<mt.getCurrentTime()<<": complete grid initialization."<<endl; // // grid.testTrianglesAntiwise(); grid.testLocalTriangleAntiwise(); // // grid.triangle_infos.allocateMemory(grid.getCellNumber()); grid.triangle_infos.allocateMemory(grid.getLocalCellNumber()); grid.initializeTriangleInfos(); // fout<<mt.getCurrentTime()<<": complete grid information initialization."<<endl; // // grid.markBoundaryTriangles(); grid.markLocalBoundaryTriangles(); // GPU // _cuarrays.allocateMemory(grid.getCellNumber()); _cuarrays.allocateMemory(grid.getLocalCellNumber()); // GPU copyTriangleInfosToGPU(); // RKDGGPU initRKDG(); // fout<<mt.getCurrentTime()<<": program initialization complete."<<endl; // fout<<mt.getCurrentTime()<<": begin to solve flow."<<endl<<endl; /** */ //mt.beginTimer(); //rkdgAdvance(); // fout.close(); } void CCUDARkdgSolver::runAfter() { //mt.endTimer(); //fout<<"RKDG performance:"<<endl; //fout<<"CPU time: "<<mt.getCPUElapsedTime()<<" s"<<endl; //fout<<"wall time: "<<mt.getWallElapsedTime()<<" s"<<endl<<endl; //fout<<mt.getCurrentTime()<<": complete solving flow."<<endl; // copyFreedomToHost(); // //outputSolution(); //fout<<mt.getCurrentTime()<<": complete solution output."<<endl; //fout.close(); } void CCUDARkdgSolver::copyFreedomToHost() { size_t size = sizeof(double)*grid.getLocalCellNumber(); size_t pitch = _cuarrays.getDoublePitch(); hipMemcpy2D(_freedom_rho, size, _cuarrays.freedom_rho, pitch, size, BASIS_FUNCTIONS, hipMemcpyDeviceToHost); hipMemcpy2D(_freedom_rhou, size, _cuarrays.freedom_rhou, pitch, size, BASIS_FUNCTIONS, hipMemcpyDeviceToHost); hipMemcpy2D(_freedom_rhov, size, _cuarrays.freedom_rhov, pitch, size, BASIS_FUNCTIONS, hipMemcpyDeviceToHost); hipMemcpy2D(_freedom_rhoE, size, _cuarrays.freedom_rhoE, pitch, size, BASIS_FUNCTIONS, hipMemcpyDeviceToHost); hipDeviceSynchronize(); } void CCUDARkdgSolver::copyTriangleInfosToGPU(void) { // int num = grid.getCellNumber(); int num = grid.getLocalCellNumber(); size_t int_pitch = _cuarrays.getIntPitch(); size_t double_pitch = _cuarrays.getDoublePitch(); // hipMemcpy2DAsync(_cuarrays.neighbour, int_pitch, grid.tri_neighbour, sizeof(int)*num, sizeof(int)*num, TRIANGLE_EDGES, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.neighbour, int_pitch, grid.local_tri_neighbour, sizeof(int)*num, sizeof(int)*num, TRIANGLE_EDGES, hipMemcpyHostToDevice); // hipMemcpy2DAsync(_cuarrays.sharedEdge, int_pitch, grid.tri_sharedEdge, sizeof(int)*num, sizeof(int)*num, TRIANGLE_EDGES, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.sharedEdge, int_pitch, grid.local_tri_sharedEdge, sizeof(int)*num, sizeof(int)*num, TRIANGLE_EDGES, hipMemcpyHostToDevice); // hipMemcpy2DAsync(_cuarrays.triangle_flag, int_pitch, grid.tri_flag, sizeof(int)*num, sizeof(int)*num, 1, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.triangle_flag, int_pitch, grid.local_tri_flag, sizeof(int)*num, sizeof(int)*num, 1, hipMemcpyHostToDevice); size_t gsize = sizeof(double)*num; /*hipMemcpy2DAsync(_cuarrays.area, double_pitch, grid.triangle_infos.area, gsize, gsize, 1, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.perimeter, double_pitch, grid.triangle_infos.perimeter, gsize, gsize, 1, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.outer_normal_vector, double_pitch, grid.triangle_infos.outer_normal_vector, gsize, gsize, TRIANGLE_EDGES*2, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.mass_coeff, double_pitch, grid.triangle_infos.mass_coeff, gsize, gsize, BASIS_FUNCTIONS, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.vol_bf_value, double_pitch, grid.triangle_infos.vol_bf_value, gsize, gsize, VOLUME_GPOINTS*BASIS_FUNCTIONS, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.vol_bdf_value, double_pitch, grid.triangle_infos.vol_bdf_value, gsize, gsize, VOLUME_GPOINTS*BASIS_FUNCTIONS*2, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.edge_bf_value, double_pitch, grid.triangle_infos.edge_bf_value, gsize, gsize, TRIANGLE_EDGES*EDGE_GPOINTS*BASIS_FUNCTIONS, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.vol_gauss_weight, double_pitch, grid.triangle_infos.vol_gauss_weight, gsize, gsize, VOLUME_GPOINTS, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.edge_gauss_weight, double_pitch, grid.triangle_infos.edge_gauss_weight, gsize, gsize, EDGE_GPOINTS*TRIANGLE_EDGES, hipMemcpyHostToDevice);*/ hipMemcpy2DAsync(_cuarrays.area, double_pitch, grid.triangle_infos.area, gsize, gsize, 1, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.perimeter, double_pitch, grid.triangle_infos.perimeter, gsize, gsize, 1, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.outer_normal_vector, double_pitch, grid.triangle_infos.outer_normal_vector, gsize, gsize, TRIANGLE_EDGES*2, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.mass_coeff, double_pitch, grid.triangle_infos.mass_coeff, gsize, gsize, BASIS_FUNCTIONS, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.vol_bf_value, double_pitch, grid.triangle_infos.vol_bf_value, gsize, gsize, VOLUME_GPOINTS*BASIS_FUNCTIONS, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.vol_bdf_value, double_pitch, grid.triangle_infos.vol_bdf_value, gsize, gsize, VOLUME_GPOINTS*BASIS_FUNCTIONS*2, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.edge_bf_value, double_pitch, grid.triangle_infos.edge_bf_value, gsize, gsize, TRIANGLE_EDGES*EDGE_GPOINTS*BASIS_FUNCTIONS, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.vol_gauss_weight, double_pitch, grid.triangle_infos.vol_gauss_weight, gsize, gsize, VOLUME_GPOINTS, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.edge_gauss_weight, double_pitch, grid.triangle_infos.edge_gauss_weight, gsize, gsize, EDGE_GPOINTS*TRIANGLE_EDGES, hipMemcpyHostToDevice); if ( hipPeekAtLastError()!=hipSuccess ) { throw CMyException(hipGetErrorString(hipPeekAtLastError())); } } void CCUDARkdgSolver::initRKDG() { // int num = grid.getCellNumber(); int num = grid.getLocalCellNumber(); double ut = sqrt(gamma*pref/rhoref)*mach; double u = ut * cos(alpha); double v = ut * sin(alpha); // _freedom_rho = new double[num*BASIS_FUNCTIONS]; _freedom_rhou = new double[num*BASIS_FUNCTIONS]; _freedom_rhov = new double[num*BASIS_FUNCTIONS]; _freedom_rhoE = new double[num*BASIS_FUNCTIONS]; hipHostMalloc((void**)&_dt, sizeof(double), hipHostMallocDefault); hipHostMalloc((void**)&_residual, sizeof(double)*RESIDUAL_VARS, hipHostMallocDefault); if ( hipPeekAtLastError()!=hipSuccess ) throw CMyException(hipGetErrorString(hipPeekAtLastError())); // for ( int i=0; i<num; ++i ) { _freedom_rho[i] = rhoref; //_freedom_rho[i] = myid + 1; _freedom_rhou[i] = rhoref *u; _freedom_rhov[i] = rhoref*v; _freedom_rhoE[i] = rhoref*(ut*ut)/2 + pref/(rhoref*(gamma-1)); } int dev_pitch = _cuarrays.getDoublePitch(); int host_pitch = sizeof(double)*num; hipMemsetAsync(_cuarrays.freedom_rho, 0, dev_pitch*BASIS_FUNCTIONS); hipMemsetAsync(_cuarrays.freedom_rhou, 0, dev_pitch*BASIS_FUNCTIONS); hipMemsetAsync(_cuarrays.freedom_rhov, 0, dev_pitch*BASIS_FUNCTIONS); hipMemsetAsync(_cuarrays.freedom_rhoE, 0, dev_pitch*BASIS_FUNCTIONS); hipMemcpy2DAsync(_cuarrays.freedom_rho, dev_pitch, _freedom_rho, host_pitch, host_pitch, 1, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.freedom_rhou, dev_pitch, _freedom_rhou, host_pitch, host_pitch, 1, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.freedom_rhov, dev_pitch, _freedom_rhov, host_pitch, host_pitch, 1, hipMemcpyHostToDevice); hipMemcpy2DAsync(_cuarrays.freedom_rhoE, dev_pitch, _freedom_rhoE, host_pitch, host_pitch, 1, hipMemcpyHostToDevice); } void CCUDARkdgSolver::getTimeStep(int tnum) { hipLaunchKernelGGL(( kernel_getTimeStep), dim3(1),dim3(reduction_threads), sizeof(double)*reduction_threads, 0, tnum, gamma, cfl, _cuarrays.ddt, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.perimeter, _cuarrays.area ); } void CCUDARkdgSolver::calculateConVars(int tnum, int double_pitch, int blocks) { size_t size = sizeof(double)*threads_per_block*CONSERVATIVE_VARS; hipLaunchKernelGGL(( kernel_calculateConVars), dim3(blocks),dim3(threads_per_block), size, 0, tnum, double_pitch, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.convar_rho_vol, _cuarrays.convar_rhou_vol, _cuarrays.convar_rhov_vol, _cuarrays.convar_rhoE_vol, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.vol_bf_value, _cuarrays.edge_bf_value ); } void CCUDARkdgSolver::boundaryCondition(int tnum, int num, int double_pitch, double rho, double rhou, double rhov, double rhoE) { // block int threads = 64; int blocks = ((num-tnum)%threads) ? (num-tnum)/threads+1 : (num-tnum)/threads; hipLaunchKernelGGL(( kernel_boundaryCondition), dim3(blocks),dim3(threads), 0, 0, tnum, num, double_pitch, rho, rhou, rhov, rhoE, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.neighbour, _cuarrays.sharedEdge, _cuarrays.triangle_flag, _cuarrays.outer_normal_vector ); } void CCUDARkdgSolver::calculateVolumeRHS(int tnum, int double_pitch, int blocks) { size_t size = sizeof(double)*threads_per_block*VOLUME_GPOINTS; hipLaunchKernelGGL(( kernel_calculateVolumeRHS), dim3(blocks), dim3(threads_per_block), size, 0, tnum, double_pitch, gamma, _cuarrays.convar_rho_vol, _cuarrays.convar_rhou_vol, _cuarrays.convar_rhov_vol, _cuarrays.convar_rhoE_vol, _cuarrays.rhs_volume_rho, _cuarrays.rhs_volume_rhou, _cuarrays.rhs_volume_rhov, _cuarrays.rhs_volume_rhoE, _cuarrays.vol_gauss_weight, _cuarrays.vol_bdf_value ); } void CCUDARkdgSolver::calculateLFCoeff(int tnum, int ipitch_num, int dpitch_num, int blocks) { hipLaunchKernelGGL(( kernel_calculateLFCoeff), dim3(blocks), dim3(threads_per_block), 0, 0, tnum, ipitch_num, dpitch_num, gamma, _cuarrays.outer_normal_vector, _cuarrays.neighbour, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.lfflux_coeff ); } void CCUDARkdgSolver::calculateEdgeFG(int tnum, int num, int double_pitch, int blocks) { // blocks = (num%threads_per_block) ? num/threads_per_block+1 : num/threads_per_block; hipLaunchKernelGGL(( kernel_calculateEdgeFG), dim3(blocks), dim3(threads_per_block), 0, 0, tnum, num, double_pitch, gamma, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.fedge_rho, _cuarrays.fedge_rhou, _cuarrays.fedge_rhov, _cuarrays.fedge_rhoE, _cuarrays.gedge_rho, _cuarrays.gedge_rhou, _cuarrays.gedge_rhov, _cuarrays.gedge_rhoE ); } void CCUDARkdgSolver::calculateFlux(int tnum, int int_pitch, int double_pitch, int blocks) { /* hipLaunchKernelGGL(( kernel_calculateFlux), dim3(blocks), dim3(threads_per_block), 0, 0, tnum, int_pitch, double_pitch, _cuarrays.neighbour, _cuarrays.sharedEdge, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.fedge_rho, _cuarrays.fedge_rhou, _cuarrays.fedge_rhov, _cuarrays.fedge_rhoE, _cuarrays.gedge_rho, _cuarrays.gedge_rhou, _cuarrays.gedge_rhov, _cuarrays.gedge_rhoE, _cuarrays.outer_normal_vector, _cuarrays.lfflux_coeff, _cuarrays.lfflux_rho, _cuarrays.lfflux_rhou, _cuarrays.lfflux_rhov, _cuarrays.lfflux_rhoE ); */ hipLaunchKernelGGL(( kernel_calculateFlux), dim3(blocks), dim3(threads_per_block), 0, 0, tnum, int_pitch, double_pitch, _cuarrays.neighbour, _cuarrays.sharedEdge, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, // _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.fedge_rho, _cuarrays.fedge_rhou, // _cuarrays.fedge_rhov, _cuarrays.fedge_rhoE, _cuarrays.gedge_rho, _cuarrays.gedge_rhou, // _cuarrays.gedge_rhov, _cuarrays.gedge_rhoE, _cuarrays.outer_normal_vector, _cuarrays.lfflux_coeff, _cuarrays.lfflux_rho, _cuarrays.lfflux_rhou // _cuarrays.lfflux_rhov, _cuarrays.lfflux_rhoE ); hipLaunchKernelGGL(( kernel_calculateFlux), dim3(blocks), dim3(threads_per_block), 0, 0, tnum, int_pitch, double_pitch, _cuarrays.neighbour, _cuarrays.sharedEdge, // _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, // _cuarrays.fedge_rho, _cuarrays.fedge_rhou, _cuarrays.fedge_rhov, _cuarrays.fedge_rhoE, // _cuarrays.gedge_rho, _cuarrays.gedge_rhou, _cuarrays.gedge_rhov, _cuarrays.gedge_rhoE, _cuarrays.outer_normal_vector, _cuarrays.lfflux_coeff, // _cuarrays.lfflux_rho, _cuarrays.lfflux_rhou, _cuarrays.lfflux_rhov, _cuarrays.lfflux_rhoE ); } void CCUDARkdgSolver::calculateEdgeRHS(int tnum, int double_pitch, int blocks) { size_t size = sizeof(double)*threads_per_block*TRIANGLE_EDGES*EDGE_GPOINTS; hipLaunchKernelGGL(( kernel_calculateEdgeRHS), dim3(blocks), dim3(threads_per_block), size, 0, tnum, double_pitch, _cuarrays.edge_gauss_weight, _cuarrays.edge_bf_value, _cuarrays.lfflux_rho, _cuarrays.lfflux_rhou, _cuarrays.lfflux_rhov, _cuarrays.lfflux_rhoE, _cuarrays.rhs_edge_rho, _cuarrays.rhs_edge_rhou, _cuarrays.rhs_edge_rhov, _cuarrays.rhs_edge_rhoE, _cuarrays.rhs_volume_rho, _cuarrays.rhs_volume_rhou, _cuarrays.rhs_volume_rhov, _cuarrays.rhs_volume_rhoE ); } void CCUDARkdgSolver::rkdgStepOne(double dt, int tnum, int double_pitch, int blocks) { hipLaunchKernelGGL(( kernel_rkdgStepOne), dim3(blocks), dim3(threads_per_block), 0, 0, tnum, double_pitch, dt, _cuarrays.mass_coeff, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.rhs_edge_rho, _cuarrays.rhs_edge_rhou, _cuarrays.rhs_edge_rhov, _cuarrays.rhs_edge_rhoE ); } void CCUDARkdgSolver::rkdgStepTwo(double dt, int tnum, int double_pitch, int blocks) { hipLaunchKernelGGL(( kernel_rkdgStepTwo), dim3(blocks), dim3(threads_per_block), 0, 0, tnum, double_pitch, dt, _cuarrays.mass_coeff, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.rhs_edge_rho, _cuarrays.rhs_edge_rhou, _cuarrays.rhs_edge_rhov, _cuarrays.rhs_edge_rhoE, _cuarrays.freedom_rho_old, _cuarrays.freedom_rhou_old, _cuarrays.freedom_rhov_old, _cuarrays.freedom_rhoE_old ); } void CCUDARkdgSolver::rkdgStepThree(double dt, int tnum, int double_pitch, int blocks) { hipLaunchKernelGGL(( kernel_rkdgStepThree), dim3(blocks), dim3(threads_per_block), 0, 0, tnum, double_pitch, dt, _cuarrays.mass_coeff, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.rhs_edge_rho, _cuarrays.rhs_edge_rhou, _cuarrays.rhs_edge_rhov, _cuarrays.rhs_edge_rhoE, _cuarrays.freedom_rho_old, _cuarrays.freedom_rhou_old, _cuarrays.freedom_rhov_old, _cuarrays.freedom_rhoE_old ); } void CCUDARkdgSolver::calculateResidual(int tnum) { hipLaunchKernelGGL(( kernel_calculateResidual), dim3(1),dim3(reduction_threads), sizeof(double)*reduction_threads*RESIDUAL_VARS, 0, tnum, _cuarrays.freedom_rho, _cuarrays.freedom_rhoE, _cuarrays.freedom_rho_old, _cuarrays.freedom_rhoE_old, _cuarrays.residual ); } //void CCUDARkdgSolver::rkdgAdvance(void) //{ // ofstream fout; // if ( log_history=='Y' ) // { // fout.open(residual_file.c_str()); // if ( !fout ) // throw CMyException("Failed to open residual log file: "+residual_file); // // fout<<"N, rho"<<endl; // } // // // double nt(0); // int count(0); // // /*int tnum = grid.getTriangleNumber(); // int num = grid.getCellNumber();*/ // int tnum = grid.getLocalTriangleNumber(); // int num = grid.getLocalCellNumber(); // // // int blocks = (tnum%threads_per_block) ? tnum/threads_per_block+1 : tnum/threads_per_block; // // double ut = sqrt(gamma*pref/rhoref)*mach; // double rhou = rhoref*ut*cos(alpha); // double rhov = rhoref*ut*sin(alpha); // double rhoE = 0.5*rhoref*(ut*ut) + pref/(gamma-1); // // bool copy(false); // // hipError_t error; // size_t pitch = _cuarrays.getDoublePitch(); // int pitch_num = pitch / sizeof(double); // int ipitch_num = _cuarrays.getIntPitch() / sizeof(int); // // hipEvent_t time_start, time_stop; // // hipEventCreateWithFlags(&time_start, hipEventDisableTiming|hipEventBlockingSync); // hipEventCreateWithFlags(&time_stop, hipEventDisableTiming|hipEventBlockingSync); // // if ( log_history=='Y' ) // copy = true; // // // CUDA // hipDeviceSynchronize(); // // do // { // ++ count; // // hipEventRecord(time_start); // // // getTimeStep(tnum); // // hipEventRecord(time_stop); // // // // /*hipMemcpy2DAsync(_cuarrays.freedom_rho_old, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyDeviceToDevice); // hipMemcpy2DAsync(_cuarrays.freedom_rhou_old, pitch, _cuarrays.freedom_rhou, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyDeviceToDevice); // hipMemcpy2DAsync(_cuarrays.freedom_rhov_old, pitch, _cuarrays.freedom_rhov, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyDeviceToDevice); // hipMemcpy2DAsync(_cuarrays.freedom_rhoE_old, pitch, _cuarrays.freedom_rhoE, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyDeviceToDevice);*/ // // hipMemcpy2DAsync(_freedom_rho, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyDeviceToHost); // hipMemcpy2DAsync(_freedom_rho, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyDeviceToHost); // hipMemcpy2DAsync(_freedom_rho, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyDeviceToHost); // hipMemcpy2DAsync(_freedom_rho, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyDeviceToHost); // // // // MPI_Request request[4], request1; // MPI_Status status; // int x = 1; //0 // if(myid = 0 && count % 5 == 0) { // for (int i = 1; i < nprocs; i++) { // MPI_Isend(&x, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &request1); // } // commuInfo(); // } else if(myid != 0) { // int flag; // MPI_Status status; // if (count == 0) { // MPI_Irecv(&x,1, MPI_INT, 0, 0, MPI_COMM_WORLD, &request1); // } // MPI_Test(&request1, &flag, &status); // if (flag == 1 && x == 1) { // commuInfo(); // MPI_Irecv(&x,1, MPI_INT, 0, 0, MPI_COMM_WORLD, &request1); // } // } // // // for ( int i=0; i<RUNGE_KUTTA_STEPS; ++i ) // { // // // calculateConVars(tnum, pitch_num, blocks); // // // // boundaryCondition(tnum, num, pitch_num, rhoref, rhou, rhov, rhoE); // // // // calculateVolumeRHS(tnum, pitch_num, blocks); // // // LF // calculateLFCoeff(tnum, ipitch_num, pitch_num, blocks); // // // f, g // calculateEdgeFG(tnum, num, pitch_num, blocks); // // calculateFlux(tnum, ipitch_num, pitch_num, blocks); // // // // calculateEdgeRHS(tnum, pitch_num, blocks); // // // // switch (i) // { // case 0: // hipEventSynchronize(time_stop); // // // // hipMemcpy(_dt, _cuarrays.ddt, sizeof(double), hipMemcpyDeviceToHost); // // if ( 0==(count-1)%print_interval ) // cout<<"Step: "<<count<<", time step: "<<_dt[0]<<endl; // // if ( (_terminal_time-nt)<_dt[0] ) // { // _dt[0] = _terminal_time - nt; // } // // // // rkdgStepOne(_dt[0], tnum, pitch_num, blocks); // // break; // // case 1: // rkdgStepTwo(_dt[0], tnum, pitch_num, blocks); // break; // // case 2: // rkdgStepThree(_dt[0], tnum, pitch_num, blocks); // break; // // default: // throw CMyException("impossible case!"); // break; // } // } // // // if ( copy && (count-1) ) // { // // // hipMemcpy(_residual, _cuarrays.residual, // sizeof(double)*RESIDUAL_VARS, hipMemcpyDeviceToHost); // // if ( 0==(count-1)%print_interval ) // cout<<"Current time: "<<nt<<" rhomax: "<<_residual[0]/rhoref<<" E: "<<_residual[1]/rhoE<<endl; // // fout<<count<<" "<<log(_residual[0]/rhoref)/log(10.0)<<endl; // } // // // // calculateResidual(tnum); // // // // // nt += _dt[0]; // // error = hipPeekAtLastError(); // if ( error!=hipSuccess ) // throw CMyException(hipGetErrorString(error)); // // } while ( nt<_terminal_time ); // // hipDeviceSynchronize(); // // if ( copy ) // { // // // hipMemcpy(_residual, _cuarrays.residual, // sizeof(double)*RESIDUAL_VARS, hipMemcpyDeviceToHost); // // if ( 0==(count-1)%print_interval ) // cout<<" "<<nt-_dt[0]<<" rhomax: "<<_residual[0]/rhoref<<" E: "<<_residual[1]/rhoE<<endl; // // fout<<count<<" "<<log(_residual[0]/rhoref)/log(10.0)<<endl; // } // // // hipEventDestroy(time_start); // hipEventDestroy(time_stop); // // if ( log_history=='Y' ) // fout.close(); // //} //void CCUDARkdgSolver::commuInfo() { // MPI_Request request; // // int *rho_buffer, *rhou_buffer, *rhov_buffer, *rhoE_buffer; // for (int i = 0; i < nprocs - 1; i++) { // int num = grid.local_innerBoundary_index[i].size(); // rho_buffer = new int[num]; // rhou_buffer = new int[num]; // rhov_buffer = new int[num]; // rhoE_buffer = new int[num]; // for (int j = 0; j < num; j++) { // rho_buffer[j] = _cuarrays.freedom_rho[grid.local_innerBoundary_index[i].at(j)]; // rhou_buffer[j] = _cuarrays.freedom_rhou[grid.local_innerBoundary_index[i].at(j)]; // rhov_buffer[j] = _cuarrays.freedom_rhov[grid.local_innerBoundary_index[i].at(j)]; // rhoE_buffer[j] = _cuarrays.freedom_rhoE[grid.local_innerBoundary_index[i].at(j)]; // } // int dest = i < myid ? i : i + 1; // MPI_Isend(rho_buffer, num, MPI_DOUBLE, dest, 1 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Isend(rhou_buffer, num, MPI_DOUBLE, dest, 2 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Isend(rhov_buffer, num, MPI_DOUBLE, dest, 3 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Isend(rhoE_buffer, num, MPI_DOUBLE, dest, 4 + 4 * num_commu, MPI_COMM_WORLD, &request); // // MPI_Irecv(rho_buffer, num, MPI_DOUBLE, dest, 1 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Irecv(rhou_buffer, num, MPI_DOUBLE, dest, 2 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Irecv(rhov_buffer, num, MPI_DOUBLE, dest, 3 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Irecv(rhoE_buffer, num, MPI_DOUBLE, dest, 4 + 4 * num_commu++, MPI_COMM_WORLD, &request); // } // MPI_Barrier(MPI_COMM_WORLD); // dealCommuData(); //} // //void CCUDARkdgSolver::dealCommuData() { // size_t pitch = _cuarrays.getDoublePitch(); // for (int i = 0; i < nprocs - 1; i++) { // for (int j = 0; j < grid.local_innerBoundary_index[i].size(); j++) { // _freedom_rho[grid.local_innerBoundary_index[i].at(j)] = rho_buffer[j]; // _freedom_rhou[grid.local_innerBoundary_index[i].at(j)] = rhou_buffer[j]; // _freedom_rhov[grid.local_innerBoundary_index[i].at(j)] = rhov_buffer[j]; // _freedom_rhoE[grid.local_innerBoundary_index[i].at(j)] = rhoE_buffer[j]; // } // } // // hipMemcpy2DAsync(_cuarrays.freedom_rho, pitch, _freedom_rho, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyHostToDevice); // hipMemcpy2DAsync(_cuarrays.freedom_rhou, pitch, _freedom_rhou, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyHostToDevice); // hipMemcpy2DAsync(_cuarrays.freedom_rhov, pitch, _freedom_rhov, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyHostToDevice); // hipMemcpy2DAsync(_cuarrays.freedom_rhoE, pitch, _freedom_rhoE, pitch, pitch, BASIS_FUNCTIONS, hipMemcpyHostToDevice); //} void CCUDARkdgSolver::outputSolution(int* data_size, double* result_rho, double* result_rhou, double* result_rhov, double* result_rhoE) { cout<<"ofstream"<<endl; ofstream fout(solution_file.c_str()); cout<<"file :"<<solution_file.c_str()<<endl; cout<<"result file: "<<solution_file.c_str()<<endl; cout<<"start if"<<endl; if ( !fout ) { cout<<"Failed to open solution file: "<<solution_file<<" and output will be omitted."<<endl; return; } cout<<"end if"<<endl; int i; int vnum, tnum; double rho, u, v, rhoE, p, a, ma; vnum = grid.getVerticeNumber(); tnum = grid.getTriangleNumber(); cout<<"-1"<<endl; fout<<"TITLE=RKDG"<<endl; cout<<"-2"<<endl; fout<<"VARIABLES=X , Y , rho , u , v , p, Ma , FLAG"<<endl; fout<<"ZONE T= T1 N= "<<vnum<<" , E= "<<tnum<<" , ZONETYPE=FETRIANGLE"<<endl; fout<<"DATAPACKING=BLOCK"<<endl; fout<<"VARLOCATION=([1-2]=NODAL,[3-8]=CELLCENTERED)"<<endl; fout<<"DT=(SINGLE SINGLE SINGLE SINGLE SINGLE SINGLE SINGLE SINGLE)"<<endl; cout<<"0"<<endl; for ( i=0; i<vnum; ++i ) { fout<<grid.vertice[i].getX()<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"1"<<endl; for ( i=0; i<vnum; ++i ) { fout<<grid.vertice[i].getY()<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"2"<<endl; fout<<endl; int *index; index = (int*)malloc(nprocs * sizeof(int)); for (int i = 0; i < nprocs; i++) { index[i] = 0; } cout<<"3"<<endl; for ( i=0; i<tnum; ++i ) { int start_loc(0); //fout<<result_rho[i]<<" "; for (int j = 0; j < grid.elem_location[i]; j++) { start_loc += data_size[j]; } fout<<result_rho[start_loc + index[grid.elem_location[i]]++]<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<index[0]<<", "<<index[1]<<endl; cout<<"4"<<endl; fout<<endl; for ( i=0; i<tnum; ++i ) { fout<<result_rhou[i]/result_rho[i]<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"5"<<endl; fout<<endl; for ( i=0; i<tnum; ++i ) { fout<<result_rhov[i]/result_rho[i]<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"6"<<endl; fout<<endl; for ( i=0; i<tnum; ++i ) { rho = result_rho[i]; u = result_rhou[i]/rho; v = result_rhov[i]/rho; rhoE = result_rhoE[i]; p = (gamma-1)*(rhoE-0.5*rho*(u*u+v*v)); fout<<p<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"7"<<endl; fout<<endl; for ( i=0; i<tnum; ++i ) { rho = result_rho[i]; u = result_rhou[i]/rho; v = result_rhov[i]/rho; rhoE = result_rhoE[i]; p = (gamma-1)*(rhoE-0.5*rho*(u*u+v*v)); a = sqrt(gamma*p/rho); ma = sqrt(u*u+v*v)/a; fout<<ma<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"8"<<endl; fout<<endl; // for ( i=0; i<tnum; ++i ) { fout<<"1"<<" "; if ( i%6==0 ) { fout<<endl; } } fout<<endl; for ( i=0; i<tnum; ++i ) { fout<<grid.tri_vertice[3*i]+1<<" "<<grid.tri_vertice[3*i+1]+1<<" "<<grid.tri_vertice[3*i+2]+1<<endl; } cout<<"before close"<<endl; fout.close(); } void CCUDARkdgSolver::printConfig( ostream& out ) { if ( !out ) { cerr<<"Invalid output stream and output will be omitted."<<endl; return; } // out<<"===================="<<endl; out<<"Program configures: "<<endl; out<<"===================="<<endl; out<<"gamma: "<<gamma<<endl; out<<"alpha: "<<alpha*180/(4*atan(1.0))<<endl; out<<"mach: "<<mach<<endl; out<<"cfl: "<<cfl<<endl; out<<"rhoref: "<<rhoref<<endl; out<<"pref: "<<pref<<endl; out<<"time: "<<_terminal_time<<endl; out<<"===================="<<endl; out<<"gridconf: "<<gridconf<<endl; out<<"solution: "<<solution_file<<endl; out<<"residualfile: "<<residual_file<<endl; out<<"printinterval: "<<print_interval<<endl; out<<"loghistory: "<<log_history<<endl; out<<"===================="<<endl; out<<"threads_per_block: "<<threads_per_block<<endl; out<<"reduction_threads: "<<reduction_threads<<endl; out<<"===================="<<endl<<endl; } CCUDARkdgSolver::~CCUDARkdgSolver() { delete []_freedom_rho; delete []_freedom_rhou; delete []_freedom_rhov; delete []_freedom_rhoE; hipHostFree(_residual); hipHostFree(_dt); }
aaccf41ae7df664b96c1887a1fffcaa68e1d03e3.cu
#include "../inc/cudarkdgsolver.h" CCUDARkdgSolver::CCUDARkdgSolver(): num_commu(0), title("Unknown case"), alpha(5.0), gamma(1.4), mach(0.4), cfl(0.18), rhoref(1.0), pref(1.0), _terminal_time(15), log_history('Y'), print_interval(1000), gridconf("input/mesh.conf"), solution_file("output/solution.dat"), log_file("output/log.dat"), residual_file("output/residual.dat"), threads_per_block(512), reduction_threads(512), _freedom_rho(NULL), _freedom_rhou(NULL), _freedom_rhov(NULL), _freedom_rhoE(NULL), _dt(NULL), _residual(NULL) {} void CCUDARkdgSolver::detectCUDADevice( void ) { int count(0); cudaGetDeviceCount( &count ); cout<<"count:"<<count; /*if ( 0==count ) throw CMyException("No device surpports CUDA found!");*/ //if ( count < nprocs) { // throw CMyException("No enough device surpports CUDA found!"); //} cudaDeviceProp prop; bool double_support(false); /*for ( int i=0; i<count; ++i ) { cudaGetDeviceProperties( &prop, i ); if ( prop.major>1 ) { double_support = true; break; } } if ( !double_support ) throw CMyException("No device has capability of 2.0 or higher is found!");*/ int double_support_count(0); for ( int i=0; i<count; ++i ) { cudaGetDeviceProperties( &prop, i ); if ( prop.major>1 ) { double_support_count++; if (double_support_count == myid) { cudaSetDevice(i); } } cout<<"gpu("<<i<<"):"<<prop.major<<"."<<prop.minor<<endl; } // if ( double_support_count < nprocs ) // throw CMyException("No enough device has capability of 2.0 or higher is found!"); /*memset( &prop, 0, sizeof(cudaDeviceProp) ); prop.major = 2; prop.minor = 0; int devid; cudaChooseDevice(&devid, &prop);*/ //cout<<"\nThere are "<<count<<" device surpports CUDA, and the "<<devid+1<<"th device will be used."<<endl; } void CCUDARkdgSolver::initConfig(void) { string conf_items[] = { "title", "gamma", "alpha", "mach", "cfl", "rhoref","pref", "time", "gridconf", "logfile", "solutionfile", "residualfile", "threadsperblock", "reductionthreads", "loghistory", "printinterval" }; CConfig program_conf(config_file, conf_items, 16); program_conf.parseConfigFile(); // 转换配置参数 if ( program_conf.config_items["title"]!="" ) title = program_conf.config_items["title"]; if ( program_conf.config_items["gamma"]!="" ) gamma = atof(program_conf.config_items["gamma"].c_str()); if ( program_conf.config_items["alpha"]!="" ) alpha = atof(program_conf.config_items["alpha"].c_str())*atan(1.0)*4 / 180; if ( program_conf.config_items["mach"]!="" ) mach = atof(program_conf.config_items["mach"].c_str()); if ( program_conf.config_items["cfl"]!="" ) cfl = atof(program_conf.config_items["cfl"].c_str()); if ( program_conf.config_items["rhoref"]!="" ) rhoref = atof(program_conf.config_items["rhoref"].c_str()); if ( program_conf.config_items["pref"]!="" ) pref = atof(program_conf.config_items["pref"].c_str()); if ( program_conf.config_items["time"]!="" ) _terminal_time = atof(program_conf.config_items["time"].c_str()); if ( program_conf.config_items["gridconf"]!="" ) gridconf = program_conf.config_items["gridconf"]; if ( program_conf.config_items["solutionfile"]!="" ) solution_file = program_conf.config_items["solutionfile"]; if ( program_conf.config_items["logfile"]!="" ) log_file = program_conf.config_items["logfile"]; if ( program_conf.config_items["residualfile"]!="" ) residual_file = program_conf.config_items["residualfile"]; if ( program_conf.config_items["loghistory"]!="" ) log_history = toupper(program_conf.config_items["loghistory"].at(0)); if ( program_conf.config_items["threadsperblock"]!="" ) threads_per_block = atoi(program_conf.config_items["threadsperblock"].c_str()); if ( program_conf.config_items["printinterval"]!="" ) print_interval = abs(atoi(program_conf.config_items["printinterval"].c_str())); if ( program_conf.config_items["reductionthreads"]!="" ) reduction_threads = atoi(program_conf.config_items["reductionthreads"].c_str()); } void CCUDARkdgSolver::run(int myid, int nprocs) { this->myid = myid; this->nprocs = nprocs; ofstream fout(log_file.c_str()); if ( !fout ) throw CMyException("Failed to open log file: "+log_file); CMyTime mt; fout<<mt.getCurrentTime()<<": programs starts"<<endl; // 检查CUDA设备 detectCUDADevice(); fout<<mt.getCurrentTime()<<": Device with capability of 2.0 is found."<<endl; // 初始化程序配置并输出程序配置 initConfig(); fout<<mt.getCurrentTime()<<": initialize configure from file."<<endl<<endl; fout<<"Title: "<<title<<endl<<endl; printConfig(cout); printConfig(fout); fout<<mt.getCurrentTime()<<": reading grid information."<<endl; // 初始化网格 grid.config_file = gridconf; grid.initializeGrid(myid, nprocs); fout.close(); } void CCUDARkdgSolver::runNext() { grid.initializeGridNext(); //grid.outputGrid(); //grid.outputGridWithGhostCells("output/ghostmesh.plt"); //fout<<mt.getCurrentTime()<<": complete grid initialization."<<endl; // 测试三角形顶点是否逆时针排序 // grid.testTrianglesAntiwise(); grid.testLocalTriangleAntiwise(); // 初始化网格上基函数等信息 // grid.triangle_infos.allocateMemory(grid.getCellNumber()); grid.triangle_infos.allocateMemory(grid.getLocalCellNumber()); grid.initializeTriangleInfos(); // fout<<mt.getCurrentTime()<<": complete grid information initialization."<<endl; // 标记网格单元 // grid.markBoundaryTriangles(); grid.markLocalBoundaryTriangles(); // 分配GPU内存 // _cuarrays.allocateMemory(grid.getCellNumber()); _cuarrays.allocateMemory(grid.getLocalCellNumber()); // 将三角单元信息传送到GPU copyTriangleInfosToGPU(); // 初始化RKDG自由度,并将初始化数据传到GPU initRKDG(); // fout<<mt.getCurrentTime()<<": program initialization complete."<<endl; // fout<<mt.getCurrentTime()<<": begin to solve flow."<<endl<<endl; /** 时间推进*/ //mt.beginTimer(); //rkdgAdvance(); // fout.close(); } void CCUDARkdgSolver::runAfter() { //mt.endTimer(); //fout<<"RKDG performance:"<<endl; //fout<<"CPU time: "<<mt.getCPUElapsedTime()<<" s"<<endl; //fout<<"wall time: "<<mt.getWallElapsedTime()<<" s"<<endl<<endl; //fout<<mt.getCurrentTime()<<": complete solving flow."<<endl; // 复制自由度到本地 copyFreedomToHost(); // 输出解 //outputSolution(); //fout<<mt.getCurrentTime()<<": complete solution output."<<endl; //fout.close(); } void CCUDARkdgSolver::copyFreedomToHost() { size_t size = sizeof(double)*grid.getLocalCellNumber(); size_t pitch = _cuarrays.getDoublePitch(); cudaMemcpy2D(_freedom_rho, size, _cuarrays.freedom_rho, pitch, size, BASIS_FUNCTIONS, cudaMemcpyDeviceToHost); cudaMemcpy2D(_freedom_rhou, size, _cuarrays.freedom_rhou, pitch, size, BASIS_FUNCTIONS, cudaMemcpyDeviceToHost); cudaMemcpy2D(_freedom_rhov, size, _cuarrays.freedom_rhov, pitch, size, BASIS_FUNCTIONS, cudaMemcpyDeviceToHost); cudaMemcpy2D(_freedom_rhoE, size, _cuarrays.freedom_rhoE, pitch, size, BASIS_FUNCTIONS, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } void CCUDARkdgSolver::copyTriangleInfosToGPU(void) { // int num = grid.getCellNumber(); int num = grid.getLocalCellNumber(); size_t int_pitch = _cuarrays.getIntPitch(); size_t double_pitch = _cuarrays.getDoublePitch(); // cudaMemcpy2DAsync(_cuarrays.neighbour, int_pitch, grid.tri_neighbour, sizeof(int)*num, sizeof(int)*num, TRIANGLE_EDGES, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.neighbour, int_pitch, grid.local_tri_neighbour, sizeof(int)*num, sizeof(int)*num, TRIANGLE_EDGES, cudaMemcpyHostToDevice); // cudaMemcpy2DAsync(_cuarrays.sharedEdge, int_pitch, grid.tri_sharedEdge, sizeof(int)*num, sizeof(int)*num, TRIANGLE_EDGES, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.sharedEdge, int_pitch, grid.local_tri_sharedEdge, sizeof(int)*num, sizeof(int)*num, TRIANGLE_EDGES, cudaMemcpyHostToDevice); // cudaMemcpy2DAsync(_cuarrays.triangle_flag, int_pitch, grid.tri_flag, sizeof(int)*num, sizeof(int)*num, 1, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.triangle_flag, int_pitch, grid.local_tri_flag, sizeof(int)*num, sizeof(int)*num, 1, cudaMemcpyHostToDevice); size_t gsize = sizeof(double)*num; /*cudaMemcpy2DAsync(_cuarrays.area, double_pitch, grid.triangle_infos.area, gsize, gsize, 1, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.perimeter, double_pitch, grid.triangle_infos.perimeter, gsize, gsize, 1, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.outer_normal_vector, double_pitch, grid.triangle_infos.outer_normal_vector, gsize, gsize, TRIANGLE_EDGES*2, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.mass_coeff, double_pitch, grid.triangle_infos.mass_coeff, gsize, gsize, BASIS_FUNCTIONS, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.vol_bf_value, double_pitch, grid.triangle_infos.vol_bf_value, gsize, gsize, VOLUME_GPOINTS*BASIS_FUNCTIONS, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.vol_bdf_value, double_pitch, grid.triangle_infos.vol_bdf_value, gsize, gsize, VOLUME_GPOINTS*BASIS_FUNCTIONS*2, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.edge_bf_value, double_pitch, grid.triangle_infos.edge_bf_value, gsize, gsize, TRIANGLE_EDGES*EDGE_GPOINTS*BASIS_FUNCTIONS, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.vol_gauss_weight, double_pitch, grid.triangle_infos.vol_gauss_weight, gsize, gsize, VOLUME_GPOINTS, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.edge_gauss_weight, double_pitch, grid.triangle_infos.edge_gauss_weight, gsize, gsize, EDGE_GPOINTS*TRIANGLE_EDGES, cudaMemcpyHostToDevice);*/ cudaMemcpy2DAsync(_cuarrays.area, double_pitch, grid.triangle_infos.area, gsize, gsize, 1, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.perimeter, double_pitch, grid.triangle_infos.perimeter, gsize, gsize, 1, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.outer_normal_vector, double_pitch, grid.triangle_infos.outer_normal_vector, gsize, gsize, TRIANGLE_EDGES*2, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.mass_coeff, double_pitch, grid.triangle_infos.mass_coeff, gsize, gsize, BASIS_FUNCTIONS, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.vol_bf_value, double_pitch, grid.triangle_infos.vol_bf_value, gsize, gsize, VOLUME_GPOINTS*BASIS_FUNCTIONS, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.vol_bdf_value, double_pitch, grid.triangle_infos.vol_bdf_value, gsize, gsize, VOLUME_GPOINTS*BASIS_FUNCTIONS*2, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.edge_bf_value, double_pitch, grid.triangle_infos.edge_bf_value, gsize, gsize, TRIANGLE_EDGES*EDGE_GPOINTS*BASIS_FUNCTIONS, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.vol_gauss_weight, double_pitch, grid.triangle_infos.vol_gauss_weight, gsize, gsize, VOLUME_GPOINTS, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.edge_gauss_weight, double_pitch, grid.triangle_infos.edge_gauss_weight, gsize, gsize, EDGE_GPOINTS*TRIANGLE_EDGES, cudaMemcpyHostToDevice); if ( cudaPeekAtLastError()!=cudaSuccess ) { throw CMyException(cudaGetErrorString(cudaPeekAtLastError())); } } void CCUDARkdgSolver::initRKDG() { // int num = grid.getCellNumber(); int num = grid.getLocalCellNumber(); double ut = sqrt(gamma*pref/rhoref)*mach; double u = ut * cos(alpha); double v = ut * sin(alpha); // 分配内存 _freedom_rho = new double[num*BASIS_FUNCTIONS]; _freedom_rhou = new double[num*BASIS_FUNCTIONS]; _freedom_rhov = new double[num*BASIS_FUNCTIONS]; _freedom_rhoE = new double[num*BASIS_FUNCTIONS]; cudaHostAlloc((void**)&_dt, sizeof(double), cudaHostAllocDefault); cudaHostAlloc((void**)&_residual, sizeof(double)*RESIDUAL_VARS, cudaHostAllocDefault); if ( cudaPeekAtLastError()!=cudaSuccess ) throw CMyException(cudaGetErrorString(cudaPeekAtLastError())); // 初始化自由度的值 for ( int i=0; i<num; ++i ) { _freedom_rho[i] = rhoref; //_freedom_rho[i] = myid + 1; _freedom_rhou[i] = rhoref *u; _freedom_rhov[i] = rhoref*v; _freedom_rhoE[i] = rhoref*(ut*ut)/2 + pref/(rhoref*(gamma-1)); } int dev_pitch = _cuarrays.getDoublePitch(); int host_pitch = sizeof(double)*num; cudaMemsetAsync(_cuarrays.freedom_rho, 0, dev_pitch*BASIS_FUNCTIONS); cudaMemsetAsync(_cuarrays.freedom_rhou, 0, dev_pitch*BASIS_FUNCTIONS); cudaMemsetAsync(_cuarrays.freedom_rhov, 0, dev_pitch*BASIS_FUNCTIONS); cudaMemsetAsync(_cuarrays.freedom_rhoE, 0, dev_pitch*BASIS_FUNCTIONS); cudaMemcpy2DAsync(_cuarrays.freedom_rho, dev_pitch, _freedom_rho, host_pitch, host_pitch, 1, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.freedom_rhou, dev_pitch, _freedom_rhou, host_pitch, host_pitch, 1, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.freedom_rhov, dev_pitch, _freedom_rhov, host_pitch, host_pitch, 1, cudaMemcpyHostToDevice); cudaMemcpy2DAsync(_cuarrays.freedom_rhoE, dev_pitch, _freedom_rhoE, host_pitch, host_pitch, 1, cudaMemcpyHostToDevice); } void CCUDARkdgSolver::getTimeStep(int tnum) { kernel_getTimeStep<<<1,reduction_threads, sizeof(double)*reduction_threads>>>( tnum, gamma, cfl, _cuarrays.ddt, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.perimeter, _cuarrays.area ); } void CCUDARkdgSolver::calculateConVars(int tnum, int double_pitch, int blocks) { size_t size = sizeof(double)*threads_per_block*CONSERVATIVE_VARS; kernel_calculateConVars<<<blocks,threads_per_block, size>>>( tnum, double_pitch, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.convar_rho_vol, _cuarrays.convar_rhou_vol, _cuarrays.convar_rhov_vol, _cuarrays.convar_rhoE_vol, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.vol_bf_value, _cuarrays.edge_bf_value ); } void CCUDARkdgSolver::boundaryCondition(int tnum, int num, int double_pitch, double rho, double rhou, double rhov, double rhoE) { // 边界单元总数目很少,为了提高性能,在每个block里面可减少线程数以提高性能 int threads = 64; int blocks = ((num-tnum)%threads) ? (num-tnum)/threads+1 : (num-tnum)/threads; kernel_boundaryCondition<<<blocks,threads>>>( tnum, num, double_pitch, rho, rhou, rhov, rhoE, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.neighbour, _cuarrays.sharedEdge, _cuarrays.triangle_flag, _cuarrays.outer_normal_vector ); } void CCUDARkdgSolver::calculateVolumeRHS(int tnum, int double_pitch, int blocks) { size_t size = sizeof(double)*threads_per_block*VOLUME_GPOINTS; kernel_calculateVolumeRHS<<<blocks, threads_per_block, size>>>( tnum, double_pitch, gamma, _cuarrays.convar_rho_vol, _cuarrays.convar_rhou_vol, _cuarrays.convar_rhov_vol, _cuarrays.convar_rhoE_vol, _cuarrays.rhs_volume_rho, _cuarrays.rhs_volume_rhou, _cuarrays.rhs_volume_rhov, _cuarrays.rhs_volume_rhoE, _cuarrays.vol_gauss_weight, _cuarrays.vol_bdf_value ); } void CCUDARkdgSolver::calculateLFCoeff(int tnum, int ipitch_num, int dpitch_num, int blocks) { kernel_calculateLFCoeff<<<blocks, threads_per_block>>>( tnum, ipitch_num, dpitch_num, gamma, _cuarrays.outer_normal_vector, _cuarrays.neighbour, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.lfflux_coeff ); } void CCUDARkdgSolver::calculateEdgeFG(int tnum, int num, int double_pitch, int blocks) { // 此处需要计算的单元与其他函数不一样,从而线程块需要重新定义 blocks = (num%threads_per_block) ? num/threads_per_block+1 : num/threads_per_block; kernel_calculateEdgeFG<<<blocks, threads_per_block>>>( tnum, num, double_pitch, gamma, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.fedge_rho, _cuarrays.fedge_rhou, _cuarrays.fedge_rhov, _cuarrays.fedge_rhoE, _cuarrays.gedge_rho, _cuarrays.gedge_rhou, _cuarrays.gedge_rhov, _cuarrays.gedge_rhoE ); } void CCUDARkdgSolver::calculateFlux(int tnum, int int_pitch, int double_pitch, int blocks) { /* kernel_calculateFlux<<<blocks, threads_per_block>>>( tnum, int_pitch, double_pitch, _cuarrays.neighbour, _cuarrays.sharedEdge, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.fedge_rho, _cuarrays.fedge_rhou, _cuarrays.fedge_rhov, _cuarrays.fedge_rhoE, _cuarrays.gedge_rho, _cuarrays.gedge_rhou, _cuarrays.gedge_rhov, _cuarrays.gedge_rhoE, _cuarrays.outer_normal_vector, _cuarrays.lfflux_coeff, _cuarrays.lfflux_rho, _cuarrays.lfflux_rhou, _cuarrays.lfflux_rhov, _cuarrays.lfflux_rhoE ); */ kernel_calculateFlux<<<blocks, threads_per_block>>>( tnum, int_pitch, double_pitch, _cuarrays.neighbour, _cuarrays.sharedEdge, _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, // _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, _cuarrays.fedge_rho, _cuarrays.fedge_rhou, // _cuarrays.fedge_rhov, _cuarrays.fedge_rhoE, _cuarrays.gedge_rho, _cuarrays.gedge_rhou, // _cuarrays.gedge_rhov, _cuarrays.gedge_rhoE, _cuarrays.outer_normal_vector, _cuarrays.lfflux_coeff, _cuarrays.lfflux_rho, _cuarrays.lfflux_rhou // _cuarrays.lfflux_rhov, _cuarrays.lfflux_rhoE ); kernel_calculateFlux<<<blocks, threads_per_block>>>( tnum, int_pitch, double_pitch, _cuarrays.neighbour, _cuarrays.sharedEdge, // _cuarrays.convar_rho_edge, _cuarrays.convar_rhou_edge, _cuarrays.convar_rhov_edge, _cuarrays.convar_rhoE_edge, // _cuarrays.fedge_rho, _cuarrays.fedge_rhou, _cuarrays.fedge_rhov, _cuarrays.fedge_rhoE, // _cuarrays.gedge_rho, _cuarrays.gedge_rhou, _cuarrays.gedge_rhov, _cuarrays.gedge_rhoE, _cuarrays.outer_normal_vector, _cuarrays.lfflux_coeff, // _cuarrays.lfflux_rho, _cuarrays.lfflux_rhou, _cuarrays.lfflux_rhov, _cuarrays.lfflux_rhoE ); } void CCUDARkdgSolver::calculateEdgeRHS(int tnum, int double_pitch, int blocks) { size_t size = sizeof(double)*threads_per_block*TRIANGLE_EDGES*EDGE_GPOINTS; kernel_calculateEdgeRHS<<<blocks, threads_per_block, size>>>( tnum, double_pitch, _cuarrays.edge_gauss_weight, _cuarrays.edge_bf_value, _cuarrays.lfflux_rho, _cuarrays.lfflux_rhou, _cuarrays.lfflux_rhov, _cuarrays.lfflux_rhoE, _cuarrays.rhs_edge_rho, _cuarrays.rhs_edge_rhou, _cuarrays.rhs_edge_rhov, _cuarrays.rhs_edge_rhoE, _cuarrays.rhs_volume_rho, _cuarrays.rhs_volume_rhou, _cuarrays.rhs_volume_rhov, _cuarrays.rhs_volume_rhoE ); } void CCUDARkdgSolver::rkdgStepOne(double dt, int tnum, int double_pitch, int blocks) { kernel_rkdgStepOne<<<blocks, threads_per_block>>>( tnum, double_pitch, dt, _cuarrays.mass_coeff, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.rhs_edge_rho, _cuarrays.rhs_edge_rhou, _cuarrays.rhs_edge_rhov, _cuarrays.rhs_edge_rhoE ); } void CCUDARkdgSolver::rkdgStepTwo(double dt, int tnum, int double_pitch, int blocks) { kernel_rkdgStepTwo<<<blocks, threads_per_block>>>( tnum, double_pitch, dt, _cuarrays.mass_coeff, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.rhs_edge_rho, _cuarrays.rhs_edge_rhou, _cuarrays.rhs_edge_rhov, _cuarrays.rhs_edge_rhoE, _cuarrays.freedom_rho_old, _cuarrays.freedom_rhou_old, _cuarrays.freedom_rhov_old, _cuarrays.freedom_rhoE_old ); } void CCUDARkdgSolver::rkdgStepThree(double dt, int tnum, int double_pitch, int blocks) { kernel_rkdgStepThree<<<blocks, threads_per_block>>>( tnum, double_pitch, dt, _cuarrays.mass_coeff, _cuarrays.freedom_rho, _cuarrays.freedom_rhou, _cuarrays.freedom_rhov, _cuarrays.freedom_rhoE, _cuarrays.rhs_edge_rho, _cuarrays.rhs_edge_rhou, _cuarrays.rhs_edge_rhov, _cuarrays.rhs_edge_rhoE, _cuarrays.freedom_rho_old, _cuarrays.freedom_rhou_old, _cuarrays.freedom_rhov_old, _cuarrays.freedom_rhoE_old ); } void CCUDARkdgSolver::calculateResidual(int tnum) { kernel_calculateResidual<<<1,reduction_threads, sizeof(double)*reduction_threads*RESIDUAL_VARS>>>( tnum, _cuarrays.freedom_rho, _cuarrays.freedom_rhoE, _cuarrays.freedom_rho_old, _cuarrays.freedom_rhoE_old, _cuarrays.residual ); } //void CCUDARkdgSolver::rkdgAdvance(void) //{ // ofstream fout; // if ( log_history=='Y' ) // { // fout.open(residual_file.c_str()); // if ( !fout ) // throw CMyException("Failed to open residual log file: "+residual_file); // // fout<<"N, rho"<<endl; // } // // // double nt(0); // int count(0); // // /*int tnum = grid.getTriangleNumber(); // int num = grid.getCellNumber();*/ // int tnum = grid.getLocalTriangleNumber(); // int num = grid.getLocalCellNumber(); // // // int blocks = (tnum%threads_per_block) ? tnum/threads_per_block+1 : tnum/threads_per_block; // // double ut = sqrt(gamma*pref/rhoref)*mach; // double rhou = rhoref*ut*cos(alpha); // double rhov = rhoref*ut*sin(alpha); // double rhoE = 0.5*rhoref*(ut*ut) + pref/(gamma-1); // // bool copy(false); // // cudaError_t error; // size_t pitch = _cuarrays.getDoublePitch(); // int pitch_num = pitch / sizeof(double); // int ipitch_num = _cuarrays.getIntPitch() / sizeof(int); // // cudaEvent_t time_start, time_stop; // // cudaEventCreateWithFlags(&time_start, cudaEventDisableTiming|cudaEventBlockingSync); // cudaEventCreateWithFlags(&time_stop, cudaEventDisableTiming|cudaEventBlockingSync); // // if ( log_history=='Y' ) // copy = true; // // // 确保之前CUDA的初始化工作都已经完成 // cudaDeviceSynchronize(); // // do // { // ++ count; // // cudaEventRecord(time_start); // // 计算当前时间步长 // getTimeStep(tnum); // // cudaEventRecord(time_stop); // // // 保存旧自由度 // /*cudaMemcpy2DAsync(_cuarrays.freedom_rho_old, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyDeviceToDevice); // cudaMemcpy2DAsync(_cuarrays.freedom_rhou_old, pitch, _cuarrays.freedom_rhou, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyDeviceToDevice); // cudaMemcpy2DAsync(_cuarrays.freedom_rhov_old, pitch, _cuarrays.freedom_rhov, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyDeviceToDevice); // cudaMemcpy2DAsync(_cuarrays.freedom_rhoE_old, pitch, _cuarrays.freedom_rhoE, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyDeviceToDevice);*/ // // cudaMemcpy2DAsync(_freedom_rho, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyDeviceToHost); // cudaMemcpy2DAsync(_freedom_rho, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyDeviceToHost); // cudaMemcpy2DAsync(_freedom_rho, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyDeviceToHost); // cudaMemcpy2DAsync(_freedom_rho, pitch, _cuarrays.freedom_rho, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyDeviceToHost); // // // // MPI_Request request[4], request1; // MPI_Status status; // int x = 1; //通知其他节点信息交换,0表示结束 // if(myid = 0 && count % 5 == 0) { // for (int i = 1; i < nprocs; i++) { // MPI_Isend(&x, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &request1); // } // commuInfo(); // } else if(myid != 0) { // int flag; // MPI_Status status; // if (count == 0) { // MPI_Irecv(&x,1, MPI_INT, 0, 0, MPI_COMM_WORLD, &request1); // } // MPI_Test(&request1, &flag, &status); // if (flag == 1 && x == 1) { // commuInfo(); // MPI_Irecv(&x,1, MPI_INT, 0, 0, MPI_COMM_WORLD, &request1); // } // } // // // for ( int i=0; i<RUNGE_KUTTA_STEPS; ++i ) // { // // 计算守恒量的值 // calculateConVars(tnum, pitch_num, blocks); // // // 处理边界条件 // boundaryCondition(tnum, num, pitch_num, rhoref, rhou, rhov, rhoE); // // // 计算体积分残差 // calculateVolumeRHS(tnum, pitch_num, blocks); // // // 计算LF通量系数 // calculateLFCoeff(tnum, ipitch_num, pitch_num, blocks); // // // 计算f, g在边上的值 // calculateEdgeFG(tnum, num, pitch_num, blocks); // // calculateFlux(tnum, ipitch_num, pitch_num, blocks); // // // 计算线积分残差 // calculateEdgeRHS(tnum, pitch_num, blocks); // // // 时间推进 // switch (i) // { // case 0: // cudaEventSynchronize(time_stop); // // // 将时间步长传送到本地 // cudaMemcpy(_dt, _cuarrays.ddt, sizeof(double), cudaMemcpyDeviceToHost); // // if ( 0==(count-1)%print_interval ) // cout<<"Step: "<<count<<", time step: "<<_dt[0]<<endl; // // if ( (_terminal_time-nt)<_dt[0] ) // { // _dt[0] = _terminal_time - nt; // } // // // 时间步推进 // rkdgStepOne(_dt[0], tnum, pitch_num, blocks); // // break; // // case 1: // rkdgStepTwo(_dt[0], tnum, pitch_num, blocks); // break; // // case 2: // rkdgStepThree(_dt[0], tnum, pitch_num, blocks); // break; // // default: // throw CMyException("impossible case!"); // break; // } // } // // // if ( copy && (count-1) ) // { // // 复制残差数据 // cudaMemcpy(_residual, _cuarrays.residual, // sizeof(double)*RESIDUAL_VARS, cudaMemcpyDeviceToHost); // // if ( 0==(count-1)%print_interval ) // cout<<"Current time: "<<nt<<" rhomax: "<<_residual[0]/rhoref<<" E: "<<_residual[1]/rhoE<<endl; // // fout<<count<<" "<<log(_residual[0]/rhoref)/log(10.0)<<endl; // } // // // 计算残差 // calculateResidual(tnum); // // // // 计时推进 // nt += _dt[0]; // // error = cudaPeekAtLastError(); // if ( error!=cudaSuccess ) // throw CMyException(cudaGetErrorString(error)); // // } while ( nt<_terminal_time ); // // cudaDeviceSynchronize(); // // if ( copy ) // { // // 复制残差数据 // cudaMemcpy(_residual, _cuarrays.residual, // sizeof(double)*RESIDUAL_VARS, cudaMemcpyDeviceToHost); // // if ( 0==(count-1)%print_interval ) // cout<<"当前时间: "<<nt-_dt[0]<<" rhomax: "<<_residual[0]/rhoref<<" E: "<<_residual[1]/rhoE<<endl; // // fout<<count<<" "<<log(_residual[0]/rhoref)/log(10.0)<<endl; // } // // // cudaEventDestroy(time_start); // cudaEventDestroy(time_stop); // // if ( log_history=='Y' ) // fout.close(); // //} //void CCUDARkdgSolver::commuInfo() { // MPI_Request request; // // int *rho_buffer, *rhou_buffer, *rhov_buffer, *rhoE_buffer; // for (int i = 0; i < nprocs - 1; i++) { // int num = grid.local_innerBoundary_index[i].size(); // rho_buffer = new int[num]; // rhou_buffer = new int[num]; // rhov_buffer = new int[num]; // rhoE_buffer = new int[num]; // for (int j = 0; j < num; j++) { // rho_buffer[j] = _cuarrays.freedom_rho[grid.local_innerBoundary_index[i].at(j)]; // rhou_buffer[j] = _cuarrays.freedom_rhou[grid.local_innerBoundary_index[i].at(j)]; // rhov_buffer[j] = _cuarrays.freedom_rhov[grid.local_innerBoundary_index[i].at(j)]; // rhoE_buffer[j] = _cuarrays.freedom_rhoE[grid.local_innerBoundary_index[i].at(j)]; // } // int dest = i < myid ? i : i + 1; // MPI_Isend(rho_buffer, num, MPI_DOUBLE, dest, 1 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Isend(rhou_buffer, num, MPI_DOUBLE, dest, 2 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Isend(rhov_buffer, num, MPI_DOUBLE, dest, 3 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Isend(rhoE_buffer, num, MPI_DOUBLE, dest, 4 + 4 * num_commu, MPI_COMM_WORLD, &request); // // MPI_Irecv(rho_buffer, num, MPI_DOUBLE, dest, 1 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Irecv(rhou_buffer, num, MPI_DOUBLE, dest, 2 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Irecv(rhov_buffer, num, MPI_DOUBLE, dest, 3 + 4 * num_commu, MPI_COMM_WORLD, &request); // MPI_Irecv(rhoE_buffer, num, MPI_DOUBLE, dest, 4 + 4 * num_commu++, MPI_COMM_WORLD, &request); // } // MPI_Barrier(MPI_COMM_WORLD); // dealCommuData(); //} // //void CCUDARkdgSolver::dealCommuData() { // size_t pitch = _cuarrays.getDoublePitch(); // for (int i = 0; i < nprocs - 1; i++) { // for (int j = 0; j < grid.local_innerBoundary_index[i].size(); j++) { // _freedom_rho[grid.local_innerBoundary_index[i].at(j)] = rho_buffer[j]; // _freedom_rhou[grid.local_innerBoundary_index[i].at(j)] = rhou_buffer[j]; // _freedom_rhov[grid.local_innerBoundary_index[i].at(j)] = rhov_buffer[j]; // _freedom_rhoE[grid.local_innerBoundary_index[i].at(j)] = rhoE_buffer[j]; // } // } // // cudaMemcpy2DAsync(_cuarrays.freedom_rho, pitch, _freedom_rho, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyHostToDevice); // cudaMemcpy2DAsync(_cuarrays.freedom_rhou, pitch, _freedom_rhou, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyHostToDevice); // cudaMemcpy2DAsync(_cuarrays.freedom_rhov, pitch, _freedom_rhov, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyHostToDevice); // cudaMemcpy2DAsync(_cuarrays.freedom_rhoE, pitch, _freedom_rhoE, pitch, pitch, BASIS_FUNCTIONS, cudaMemcpyHostToDevice); //} void CCUDARkdgSolver::outputSolution(int* data_size, double* result_rho, double* result_rhou, double* result_rhov, double* result_rhoE) { cout<<"ofstream"<<endl; ofstream fout(solution_file.c_str()); cout<<"file :"<<solution_file.c_str()<<endl; cout<<"result file: "<<solution_file.c_str()<<endl; cout<<"start if"<<endl; if ( !fout ) { cout<<"Failed to open solution file: "<<solution_file<<" and output will be omitted."<<endl; return; } cout<<"end if"<<endl; int i; int vnum, tnum; double rho, u, v, rhoE, p, a, ma; vnum = grid.getVerticeNumber(); tnum = grid.getTriangleNumber(); cout<<"-1"<<endl; fout<<"TITLE=RKDG"<<endl; cout<<"-2"<<endl; fout<<"VARIABLES=X , Y , rho , u , v , p, Ma , FLAG"<<endl; fout<<"ZONE T= T1 N= "<<vnum<<" , E= "<<tnum<<" , ZONETYPE=FETRIANGLE"<<endl; fout<<"DATAPACKING=BLOCK"<<endl; fout<<"VARLOCATION=([1-2]=NODAL,[3-8]=CELLCENTERED)"<<endl; fout<<"DT=(SINGLE SINGLE SINGLE SINGLE SINGLE SINGLE SINGLE SINGLE)"<<endl; cout<<"0"<<endl; for ( i=0; i<vnum; ++i ) { fout<<grid.vertice[i].getX()<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"1"<<endl; for ( i=0; i<vnum; ++i ) { fout<<grid.vertice[i].getY()<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"2"<<endl; fout<<endl; int *index; index = (int*)malloc(nprocs * sizeof(int)); for (int i = 0; i < nprocs; i++) { index[i] = 0; } cout<<"3"<<endl; for ( i=0; i<tnum; ++i ) { int start_loc(0); //fout<<result_rho[i]<<" "; for (int j = 0; j < grid.elem_location[i]; j++) { start_loc += data_size[j]; } fout<<result_rho[start_loc + index[grid.elem_location[i]]++]<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<index[0]<<", "<<index[1]<<endl; cout<<"4"<<endl; fout<<endl; for ( i=0; i<tnum; ++i ) { fout<<result_rhou[i]/result_rho[i]<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"5"<<endl; fout<<endl; for ( i=0; i<tnum; ++i ) { fout<<result_rhov[i]/result_rho[i]<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"6"<<endl; fout<<endl; for ( i=0; i<tnum; ++i ) { rho = result_rho[i]; u = result_rhou[i]/rho; v = result_rhov[i]/rho; rhoE = result_rhoE[i]; p = (gamma-1)*(rhoE-0.5*rho*(u*u+v*v)); fout<<p<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"7"<<endl; fout<<endl; for ( i=0; i<tnum; ++i ) { rho = result_rho[i]; u = result_rhou[i]/rho; v = result_rhov[i]/rho; rhoE = result_rhoE[i]; p = (gamma-1)*(rhoE-0.5*rho*(u*u+v*v)); a = sqrt(gamma*p/rho); ma = sqrt(u*u+v*v)/a; fout<<ma<<" "; if ( i%6==0 ) { fout<<endl; } } cout<<"8"<<endl; fout<<endl; // 限制器标记 for ( i=0; i<tnum; ++i ) { fout<<"1"<<" "; if ( i%6==0 ) { fout<<endl; } } fout<<endl; for ( i=0; i<tnum; ++i ) { fout<<grid.tri_vertice[3*i]+1<<" "<<grid.tri_vertice[3*i+1]+1<<" "<<grid.tri_vertice[3*i+2]+1<<endl; } cout<<"before close"<<endl; fout.close(); } void CCUDARkdgSolver::printConfig( ostream& out ) { if ( !out ) { cerr<<"Invalid output stream and output will be omitted."<<endl; return; } // 输出程序配置 out<<"===================="<<endl; out<<"Program configures: "<<endl; out<<"===================="<<endl; out<<"gamma: "<<gamma<<endl; out<<"alpha: "<<alpha*180/(4*atan(1.0))<<endl; out<<"mach: "<<mach<<endl; out<<"cfl: "<<cfl<<endl; out<<"rhoref: "<<rhoref<<endl; out<<"pref: "<<pref<<endl; out<<"time: "<<_terminal_time<<endl; out<<"===================="<<endl; out<<"gridconf: "<<gridconf<<endl; out<<"solution: "<<solution_file<<endl; out<<"residualfile: "<<residual_file<<endl; out<<"printinterval: "<<print_interval<<endl; out<<"loghistory: "<<log_history<<endl; out<<"===================="<<endl; out<<"threads_per_block: "<<threads_per_block<<endl; out<<"reduction_threads: "<<reduction_threads<<endl; out<<"===================="<<endl<<endl; } CCUDARkdgSolver::~CCUDARkdgSolver() { delete []_freedom_rho; delete []_freedom_rhou; delete []_freedom_rhov; delete []_freedom_rhoE; cudaFreeHost(_residual); cudaFreeHost(_dt); }
5bfa030f4710fafcd666b0da23f4fa5c6da2aa94.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime_api.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" //#include "esp_def.hpp" #include "osa.h" #define THREAD_NUM 32 #define BLOCK_NUM 32 __global__ static void cuda_tran_gray(unsigned char *src,unsigned char* dst,int nWidth,int nHeight) { int ImgHeight, ImgWidth; int i; ImgWidth = nWidth; ImgHeight = nHeight; uint8_t * pDst8_t; uint8_t * pSrc8_t; pSrc8_t = (uint8_t*)(src); pDst8_t = (uint8_t*)(dst); //const int x = blockDim.x * blockIdx.x + threadIdx.x; const int tid = threadIdx.x; const int bid = blockIdx.x; //for(int x = 0; x < ImgHeight*ImgWidth; x++) for(i = bid*THREAD_NUM + tid;i<ImgHeight*ImgWidth;i += BLOCK_NUM * THREAD_NUM) { pDst8_t[i] = pSrc8_t[i*3+1]; } } __global__ void kernel_Sobel(unsigned char *src, unsigned char *dst, int width, int height) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; int Gx, Gy; int sobel; Gx = Gy = 0; //if(x>=1 && x<(width-1) && y>=1 && y<(height-1)) { Gx = src[(y-1)*width + x-1]*(-1) + src[(y-1)*width + x]*(0) + src[(y-1)*width + x+1]*(1) + src[y*width + x-1]*(-2) + src[y*width + x]*(0) + src[y*width + x+1]*(2) + src[(y+1)*width + x-1]*(-1) + src[(y+1)*width + x]*(0) + src[(y+1)*width + x+1]*(1); /* Gy = src[(y-1)*width + x-1]*(-1) + src[(y-1)*width + x]*(-2) + src[(y-1)*width + x+1]*(-1) + src[y*width + x-1]*(0) + src[y*width + x]*(0) + src[y*width + x+1]*(0) + src[(y+1)*width + x-1]*(1) + src[(y+1)*width + x]*(2) + src[(y+1)*width + x+1]*(1); */ Gy = src[(y-1)*width + x-1]*(1) + src[(y-1)*width + x]*(2) + src[(y-1)*width + x+1]*(1) + src[y*width + x-1]*(0) + src[y*width + x]*(0) + src[y*width + x+1]*(0) + src[(y+1)*width + x-1]*(-1) + src[(y+1)*width + x]*(-2) + src[(y+1)*width + x+1]*(-1); sobel = (int)sqrt((float)(Gx * Gx + Gy * Gy)); //sobel = sobel < 20 ? 0 : sobel; dst[y*width + x] = sobel; } //else { //dst[y*width + x] = 0; } } __global__ void kernel_RotImgProgress_(unsigned char *src, unsigned char *dst, int src_width, int src_height, int m, int n,int p, int q) { int x, y; unsigned char *pdst; const int r_x = blockDim.x * blockIdx.x + threadIdx.x; const int r_y = blockDim.y * blockIdx.y + threadIdx.y; if ((r_x < 0) || (r_x >= src_width) || (r_y < 0) || (r_y >= src_height)) return; x = m * r_x + n * r_y + p; y = -n * r_x + m * r_y + q; x = x >> 10; y = y >> 10; if ((x < 0) || (x >= src_width) || (y < 0) || (y >= src_height)) { pdst = dst + r_y * src_width*3 + r_x*3; pdst[0] = 0x00; pdst[1] = 0x00; pdst[2] = 0x00; }else{ pdst = dst + r_y * src_width*3 + r_x*3; pdst[0] = src[y * src_width*3 + x*3]; pdst[1] = src[y * src_width*3 + x*3+1]; pdst[2] = src[y * src_width*3 + x*3+2]; } } /**************extern*******************/ extern "C" void tran_gray_cuda(unsigned char *src,unsigned char* dst,int nWidth,int nHeight) { hipLaunchKernelGGL(( cuda_tran_gray), dim3(BLOCK_NUM),dim3(THREAD_NUM), 0, 0, src,dst,nWidth,nHeight); } extern "C" void Sobel_cuda(unsigned char *src, unsigned char *dst, int width, int height) { hipLaunchKernelGGL(( kernel_Sobel), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, src, dst, width, height); } extern "C" void RotImgProgress_cuda(unsigned char *src, unsigned char *dst, float cos, float sin, float dx, float dy, int src_width, int src_height) { dim3 block((src_width+31)/32, (src_height+31)/32); dim3 thread(32,32); float a, b, c,d ; int m,n,p,q; a = cos; b = sin; c = dx; d = dy; m = (int)((a / (a * a + b * b)) * 1024.0); n = (int)((b / (a * a + b * b)) * 1024.0); p = (int)(-((a * c + b * d) / (a * a + b * b)) * 1024.0) + 512; q = (int)(-((a * d - b * c) / (a * a + b * b)) * 1024.0) + 512; hipLaunchKernelGGL(( kernel_RotImgProgress_), dim3(block), dim3(thread), 0, 0, src, dst, src_width, src_height, m, n, p, q); }
5bfa030f4710fafcd666b0da23f4fa5c6da2aa94.cu
#include "cuda_runtime_api.h" #include "device_launch_parameters.h" #include "cuda.h" //#include "esp_def.hpp" #include "osa.h" #define THREAD_NUM 32 #define BLOCK_NUM 32 __global__ static void cuda_tran_gray(unsigned char *src,unsigned char* dst,int nWidth,int nHeight) { int ImgHeight, ImgWidth; int i; ImgWidth = nWidth; ImgHeight = nHeight; uint8_t * pDst8_t; uint8_t * pSrc8_t; pSrc8_t = (uint8_t*)(src); pDst8_t = (uint8_t*)(dst); //const int x = blockDim.x * blockIdx.x + threadIdx.x; const int tid = threadIdx.x; const int bid = blockIdx.x; //for(int x = 0; x < ImgHeight*ImgWidth; x++) for(i = bid*THREAD_NUM + tid;i<ImgHeight*ImgWidth;i += BLOCK_NUM * THREAD_NUM) { pDst8_t[i] = pSrc8_t[i*3+1]; } } __global__ void kernel_Sobel(unsigned char *src, unsigned char *dst, int width, int height) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; int Gx, Gy; int sobel; Gx = Gy = 0; //if(x>=1 && x<(width-1) && y>=1 && y<(height-1)) { Gx = src[(y-1)*width + x-1]*(-1) + src[(y-1)*width + x]*(0) + src[(y-1)*width + x+1]*(1) + src[y*width + x-1]*(-2) + src[y*width + x]*(0) + src[y*width + x+1]*(2) + src[(y+1)*width + x-1]*(-1) + src[(y+1)*width + x]*(0) + src[(y+1)*width + x+1]*(1); /* Gy = src[(y-1)*width + x-1]*(-1) + src[(y-1)*width + x]*(-2) + src[(y-1)*width + x+1]*(-1) + src[y*width + x-1]*(0) + src[y*width + x]*(0) + src[y*width + x+1]*(0) + src[(y+1)*width + x-1]*(1) + src[(y+1)*width + x]*(2) + src[(y+1)*width + x+1]*(1); */ Gy = src[(y-1)*width + x-1]*(1) + src[(y-1)*width + x]*(2) + src[(y-1)*width + x+1]*(1) + src[y*width + x-1]*(0) + src[y*width + x]*(0) + src[y*width + x+1]*(0) + src[(y+1)*width + x-1]*(-1) + src[(y+1)*width + x]*(-2) + src[(y+1)*width + x+1]*(-1); sobel = (int)sqrt((float)(Gx * Gx + Gy * Gy)); //sobel = sobel < 20 ? 0 : sobel; dst[y*width + x] = sobel; } //else { //dst[y*width + x] = 0; } } __global__ void kernel_RotImgProgress_(unsigned char *src, unsigned char *dst, int src_width, int src_height, int m, int n,int p, int q) { int x, y; unsigned char *pdst; const int r_x = blockDim.x * blockIdx.x + threadIdx.x; const int r_y = blockDim.y * blockIdx.y + threadIdx.y; if ((r_x < 0) || (r_x >= src_width) || (r_y < 0) || (r_y >= src_height)) return; x = m * r_x + n * r_y + p; y = -n * r_x + m * r_y + q; x = x >> 10; y = y >> 10; if ((x < 0) || (x >= src_width) || (y < 0) || (y >= src_height)) { pdst = dst + r_y * src_width*3 + r_x*3; pdst[0] = 0x00; pdst[1] = 0x00; pdst[2] = 0x00; }else{ pdst = dst + r_y * src_width*3 + r_x*3; pdst[0] = src[y * src_width*3 + x*3]; pdst[1] = src[y * src_width*3 + x*3+1]; pdst[2] = src[y * src_width*3 + x*3+2]; } } /**************extern*******************/ extern "C" void tran_gray_cuda(unsigned char *src,unsigned char* dst,int nWidth,int nHeight) { cuda_tran_gray<<<BLOCK_NUM,THREAD_NUM>>>(src,dst,nWidth,nHeight); } extern "C" void Sobel_cuda(unsigned char *src, unsigned char *dst, int width, int height) { kernel_Sobel<<<BLOCK_NUM, THREAD_NUM>>>(src, dst, width, height); } extern "C" void RotImgProgress_cuda(unsigned char *src, unsigned char *dst, float cos, float sin, float dx, float dy, int src_width, int src_height) { dim3 block((src_width+31)/32, (src_height+31)/32); dim3 thread(32,32); float a, b, c,d ; int m,n,p,q; a = cos; b = sin; c = dx; d = dy; m = (int)((a / (a * a + b * b)) * 1024.0); n = (int)((b / (a * a + b * b)) * 1024.0); p = (int)(-((a * c + b * d) / (a * a + b * b)) * 1024.0) + 512; q = (int)(-((a * d - b * c) / (a * a + b * b)) * 1024.0) + 512; kernel_RotImgProgress_<<<block, thread>>>(src, dst, src_width, src_height, m, n, p, q); }
3f7647425389267e657cfb04c49f0df3b01925a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cudaGetShiftedMidPrice(int N_inst, int batch_size, float *alphas, float *mid, float *shifted_prc){ int b_sz = blockDim.x, b_id = blockIdx.x, t_id = threadIdx.x; if(b_id < N_inst){ for(int i=t_id; i<batch_size; i += b_sz){ shifted_prc[b_id * batch_size + i] = (1. + alphas[b_id * batch_size + i]) * mid[i]; } } }
3f7647425389267e657cfb04c49f0df3b01925a4.cu
#include "includes.h" __global__ void cudaGetShiftedMidPrice(int N_inst, int batch_size, float *alphas, float *mid, float *shifted_prc){ int b_sz = blockDim.x, b_id = blockIdx.x, t_id = threadIdx.x; if(b_id < N_inst){ for(int i=t_id; i<batch_size; i += b_sz){ shifted_prc[b_id * batch_size + i] = (1. + alphas[b_id * batch_size + i]) * mid[i]; } } }
2aa38e8bad9e03638579347bde18213714d01364.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * Licensed under The Apache-2.0 License [see LICENSE for details] * \file positional_pooling.cu * \brief positional pooling operator * \author Jiajian Zeng */ #include "./positional_pooling-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" using mxnet::TShape; namespace mshadow{ namespace cuda { /*! * \brief Positional pooling gpu kernel for 2-D images. * Do not call this kernel directly. Use the interface PositionalPoolForward(). */ template <typename DType> __global__ void PositionalPoolForwardKernel(const int nthreads, const DType* in_data, const DType* in_map, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int pool_type, DType* out_data) { using mshadow::red::limits::MinValue; using mxnet::op::ppool::kProd; // index is the output image's pixel index in NCHW // suppose a pixel in the output image's location is (n, c, ph, pw) // then index = pooled_height * pooled_width * (n * channels + c) + // ph * pooled_width + pw CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); const DType* map_slice = in_map + n * height * width; const DType* data_slice = in_data + (n * channels + c) * height * width; DType max_map_val = MinValue<DType>(); DType data_val = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const DType map_val = map_slice[h * width + w]; if (map_val >= max_map_val) { // NOTE: use >= here to facilitate the backward computation max_map_val = map_val; data_val = data_slice[h * width + w]; if (pool_type == kProd) { data_val *= map_val; } } } } out_data[index] = data_val; } } template<typename DType> inline void PositionalPoolForward(Stream<gpu>* s, const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 4, DType> &map, const TShape& kernel, const TShape& pad, const TShape& stride, const int pool_type) { const DType* pdata = data.dptr_; const DType* pmap = map.dptr_; DType* pout = out.dptr_; const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); const int kernel_h = kernel[0]; const int kernel_w = kernel[1]; const int stride_h = stride[0]; const int stride_w = stride[1]; const int pad_h = pad[0]; const int pad_w = pad[1]; const int count = out.shape_.Size(); hipLaunchKernelGGL(( PositionalPoolForwardKernel<DType>), dim3(mxnet::op::mxnet_op::cuda_get_num_blocks(count)), dim3(kBaseThreadNum), 0, Stream<gpu>::GetStream(s) , count, pdata, pmap, channels, height, width, pooled_height, pooled_width, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, pool_type, pout); MSHADOW_CUDA_POST_KERNEL_CHECK(PositionalPoolForwardKernel); } /*! * \brief Positional pooling backward gpu kernel for 2-D images. * Do not call this kernel directly. Use the interface PositionalPoolBackward(). */ template <typename DType> __global__ void PositionalPoolBackwardKernel(const int nthreads, const DType* out_grad, const DType* in_data, const DType* map_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int pool_type, DType* in_grad, DType* map_grad) { using mshadow::red::limits::MinValue; using mxnet::op::ppool::kNormal; // index is the output image's pixel index in NCHW // the order has to be consistent with pooling max // to avoid adding out_grad to the wrong in_grad // in the case where there are multiple max pixels // covered by a kernel window CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); // in data/grad offset batch and channel dims int map_offset = n * height * width; const DType* map_slice = map_data + map_offset; int max_map_idx = -1; DType max_map_val = MinValue<DType>(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int map_idx = h * width + w; if (map_slice[map_idx] >= max_map_val) { // NOTE: use >= here to make backward consistent with forward max_map_val = map_slice[map_idx]; max_map_idx = map_idx; } } } // In the case where pad > 0 and kernel = 1, for example, // max_idx can be -1 reaching this step. int in_offset = (n * channels + c) * height * width; if (max_map_idx >= 0) { // Normal positional pooling if (pool_type == kNormal) { atomicAdd(&in_grad[in_offset + max_map_idx], out_grad[index]); } else { // Prod positional pooling atomicAdd(&in_grad[in_offset + max_map_idx], out_grad[index] * max_map_val); atomicAdd(&map_grad[map_offset + max_map_idx], out_grad[index] * in_data[in_offset + max_map_idx]); } } } } template<typename DType> inline void PositionalPoolBackward(Stream<gpu>* s, const Tensor<gpu, 4, DType>& in_grad, const Tensor<gpu, 4, DType>& out_grad, const Tensor<gpu, 4, DType>& map_grad, const Tensor<gpu, 4, DType>& in_data, const Tensor<gpu, 4, DType>& map_data, const TShape& kernel, const TShape& pad, const TShape& stride, const int pool_type) { const DType* pdata = in_data.dptr_; const DType* pmap = map_data.dptr_; const DType* pout_grad = out_grad.dptr_; DType* pin_grad = in_grad.dptr_; DType* pmap_grad = map_grad.dptr_; const int channels = in_data.size(1); const int height = in_data.size(2); const int width = in_data.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); const int kernel_h = kernel[0]; const int kernel_w = kernel[1]; const int stride_h = stride[0]; const int stride_w = stride[1]; const int pad_h = pad[0]; const int pad_w = pad[1]; const int count = out_grad.shape_.Size(); hipLaunchKernelGGL(( PositionalPoolBackwardKernel<DType>), dim3(mxnet::op::mxnet_op::cuda_get_num_blocks(count)), dim3(kBaseThreadNum), 0, Stream<gpu>::GetStream(s) , count, pout_grad, pdata, pmap, channels, height, width, pooled_height, pooled_width, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, pool_type, pin_grad, pmap_grad); MSHADOW_CUDA_POST_KERNEL_CHECK(PositionalPoolBackwardKernel); } } // namespace cuda template<typename DType> inline void PositionalPoolForward(Stream<gpu>* s, const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 4, DType> &map, const TShape& kernel, const TShape& pad, const TShape& stride, const int pool_type) { cuda::PositionalPoolForward(s, out, data, map, kernel, pad, stride, pool_type); } template<typename DType> inline void PositionalPoolBackward(Stream<gpu>* s, const Tensor<gpu, 4, DType>& in_grad, const Tensor<gpu, 4, DType>& out_grad, const Tensor<gpu, 4, DType>& map_grad, const Tensor<gpu, 4, DType>& in_data, const Tensor<gpu, 4, DType>& map_data, const TShape& kernel, const TShape& pad, const TShape& stride, const int pool_type) { cuda::PositionalPoolBackward(s, in_grad, out_grad, map_grad, in_data, map_data, kernel, pad, stride, pool_type); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(PositionalPoolingParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PositionalPoolingOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
2aa38e8bad9e03638579347bde18213714d01364.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * Licensed under The Apache-2.0 License [see LICENSE for details] * \file positional_pooling.cu * \brief positional pooling operator * \author Jiajian Zeng */ #include "./positional_pooling-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" using mxnet::TShape; namespace mshadow{ namespace cuda { /*! * \brief Positional pooling gpu kernel for 2-D images. * Do not call this kernel directly. Use the interface PositionalPoolForward(). */ template <typename DType> __global__ void PositionalPoolForwardKernel(const int nthreads, const DType* in_data, const DType* in_map, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int pool_type, DType* out_data) { using mshadow::red::limits::MinValue; using mxnet::op::ppool::kProd; // index is the output image's pixel index in NCHW // suppose a pixel in the output image's location is (n, c, ph, pw) // then index = pooled_height * pooled_width * (n * channels + c) + // ph * pooled_width + pw CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); const DType* map_slice = in_map + n * height * width; const DType* data_slice = in_data + (n * channels + c) * height * width; DType max_map_val = MinValue<DType>(); DType data_val = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const DType map_val = map_slice[h * width + w]; if (map_val >= max_map_val) { // NOTE: use >= here to facilitate the backward computation max_map_val = map_val; data_val = data_slice[h * width + w]; if (pool_type == kProd) { data_val *= map_val; } } } } out_data[index] = data_val; } } template<typename DType> inline void PositionalPoolForward(Stream<gpu>* s, const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 4, DType> &map, const TShape& kernel, const TShape& pad, const TShape& stride, const int pool_type) { const DType* pdata = data.dptr_; const DType* pmap = map.dptr_; DType* pout = out.dptr_; const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); const int kernel_h = kernel[0]; const int kernel_w = kernel[1]; const int stride_h = stride[0]; const int stride_w = stride[1]; const int pad_h = pad[0]; const int pad_w = pad[1]; const int count = out.shape_.Size(); PositionalPoolForwardKernel<DType><<< mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, Stream<gpu>::GetStream(s) >>>( count, pdata, pmap, channels, height, width, pooled_height, pooled_width, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, pool_type, pout); MSHADOW_CUDA_POST_KERNEL_CHECK(PositionalPoolForwardKernel); } /*! * \brief Positional pooling backward gpu kernel for 2-D images. * Do not call this kernel directly. Use the interface PositionalPoolBackward(). */ template <typename DType> __global__ void PositionalPoolBackwardKernel(const int nthreads, const DType* out_grad, const DType* in_data, const DType* map_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int pool_type, DType* in_grad, DType* map_grad) { using mshadow::red::limits::MinValue; using mxnet::op::ppool::kNormal; // index is the output image's pixel index in NCHW // the order has to be consistent with pooling max // to avoid adding out_grad to the wrong in_grad // in the case where there are multiple max pixels // covered by a kernel window CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); // in data/grad offset batch and channel dims int map_offset = n * height * width; const DType* map_slice = map_data + map_offset; int max_map_idx = -1; DType max_map_val = MinValue<DType>(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int map_idx = h * width + w; if (map_slice[map_idx] >= max_map_val) { // NOTE: use >= here to make backward consistent with forward max_map_val = map_slice[map_idx]; max_map_idx = map_idx; } } } // In the case where pad > 0 and kernel = 1, for example, // max_idx can be -1 reaching this step. int in_offset = (n * channels + c) * height * width; if (max_map_idx >= 0) { // Normal positional pooling if (pool_type == kNormal) { atomicAdd(&in_grad[in_offset + max_map_idx], out_grad[index]); } else { // Prod positional pooling atomicAdd(&in_grad[in_offset + max_map_idx], out_grad[index] * max_map_val); atomicAdd(&map_grad[map_offset + max_map_idx], out_grad[index] * in_data[in_offset + max_map_idx]); } } } } template<typename DType> inline void PositionalPoolBackward(Stream<gpu>* s, const Tensor<gpu, 4, DType>& in_grad, const Tensor<gpu, 4, DType>& out_grad, const Tensor<gpu, 4, DType>& map_grad, const Tensor<gpu, 4, DType>& in_data, const Tensor<gpu, 4, DType>& map_data, const TShape& kernel, const TShape& pad, const TShape& stride, const int pool_type) { const DType* pdata = in_data.dptr_; const DType* pmap = map_data.dptr_; const DType* pout_grad = out_grad.dptr_; DType* pin_grad = in_grad.dptr_; DType* pmap_grad = map_grad.dptr_; const int channels = in_data.size(1); const int height = in_data.size(2); const int width = in_data.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); const int kernel_h = kernel[0]; const int kernel_w = kernel[1]; const int stride_h = stride[0]; const int stride_w = stride[1]; const int pad_h = pad[0]; const int pad_w = pad[1]; const int count = out_grad.shape_.Size(); PositionalPoolBackwardKernel<DType><<< mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, Stream<gpu>::GetStream(s) >>>( count, pout_grad, pdata, pmap, channels, height, width, pooled_height, pooled_width, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, pool_type, pin_grad, pmap_grad); MSHADOW_CUDA_POST_KERNEL_CHECK(PositionalPoolBackwardKernel); } } // namespace cuda template<typename DType> inline void PositionalPoolForward(Stream<gpu>* s, const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 4, DType> &map, const TShape& kernel, const TShape& pad, const TShape& stride, const int pool_type) { cuda::PositionalPoolForward(s, out, data, map, kernel, pad, stride, pool_type); } template<typename DType> inline void PositionalPoolBackward(Stream<gpu>* s, const Tensor<gpu, 4, DType>& in_grad, const Tensor<gpu, 4, DType>& out_grad, const Tensor<gpu, 4, DType>& map_grad, const Tensor<gpu, 4, DType>& in_data, const Tensor<gpu, 4, DType>& map_data, const TShape& kernel, const TShape& pad, const TShape& stride, const int pool_type) { cuda::PositionalPoolBackward(s, in_grad, out_grad, map_grad, in_data, map_data, kernel, pad, stride, pool_type); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(PositionalPoolingParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PositionalPoolingOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
e940be777112b43254039b07820b01aaa2b3e919.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"initialize.h" __global__ void initialize(int *glcm) { int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; glcm[ip] = 0; } __global__ void initialize_tex(float *texture){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; atomicExch(&texture[ip], 0.0); //printf("%f", texture_glcm[0]); } __global__ void initialize_mtex(float *texture){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; atomicExch(&texture[ip], 255.0); //printf("%f", texture_glcm[0]); } __global__ void Preprocessing_image_GLCM(int *dev_image, const int *image, int Min_V, int Max_V, int bin_width, int MASK_V){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; atomicExch(&dev_image[ip], image[ip]==-1?-1:(image[ip]-Min_V)/bin_width); } __global__ void Preprocessing_image_firstorder(int *dev_image, const int *image, int Min_V, int Max_V, int MASK_V){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; atomicExch(&dev_image[ip], image[ip]-Min_V); }
e940be777112b43254039b07820b01aaa2b3e919.cu
#include"initialize.h" __global__ void initialize(int *glcm) { int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; glcm[ip] = 0; } __global__ void initialize_tex(float *texture){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; atomicExch(&texture[ip], 0.0); //printf("%f", texture_glcm[0]); } __global__ void initialize_mtex(float *texture){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; atomicExch(&texture[ip], 255.0); //printf("%f", texture_glcm[0]); } __global__ void Preprocessing_image_GLCM(int *dev_image, const int *image, int Min_V, int Max_V, int bin_width, int MASK_V){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; atomicExch(&dev_image[ip], image[ip]==-1?-1:(image[ip]-Min_V)/bin_width); } __global__ void Preprocessing_image_firstorder(int *dev_image, const int *image, int Min_V, int Max_V, int MASK_V){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; atomicExch(&dev_image[ip], image[ip]-Min_V); }
485ef10d712971813eb4ba27edfc590bbd5e89c9.hip
// !!! This is a file automatically generated by hipify!!! /** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/Xblas_core.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 3.0.0 * @author Ali Charara * @date 2018-11-14 **/ #include <stdlib.h> #include <stdio.h> #include <set> #include <rocblas.h> #include "kblas.h" #include "operators.h" #include "Xblas_core.ch" #include "Xhelper_funcs.ch" #include "workspace_queries.ch" //#################################################################################################### // wrapper functions around cuBLAS //#################################################################################################### //============================================================================================== // A, B, C: host pointers to device buffers int kblasXgemm( kblasHandle_t handle, char transa, char transb, int m, int n, int k, const TYPE alpha, const TYPE *A, int lda, const TYPE *B, int ldb, const TYPE beta, TYPE *C, int ldc) { hipblasStatus_t status = cublasXgemm( handle->cublas_handle, transa == KBLAS_Trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, transb == KBLAS_Trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //============================================================================================== // A, B: host pointers to device buffers int kblasXsyrk( kblasHandle_t handle, char uplo, char trans, int m, int n, const TYPE alpha, const TYPE* A, int lda, const TYPE beta, TYPE* B, int ldb) { hipblasStatus_t status = cublasXsyrk( handle->cublas_handle, uplo == KBLAS_Lower ? HIPBLAS_FILL_MODE_LOWER : HIPBLAS_FILL_MODE_UPPER, trans == KBLAS_Trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, m, n, &alpha, A, lda, &beta, B, ldb); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= // A, B, C: host pointers to device buffers int kblasXsymm( kblasHandle_t handle, char side, char uplo, int m, int n, const TYPE alpha, const TYPE *A, int lda, const TYPE *B, int ldb, const TYPE beta, TYPE *C, int ldc) { hipblasStatus_t status = cublasXsymm( handle->cublas_handle, side == KBLAS_Left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT, uplo == KBLAS_Lower ? HIPBLAS_FILL_MODE_LOWER : HIPBLAS_FILL_MODE_UPPER, m, n, &alpha, (const TYPE*) A, lda, (const TYPE*) B, ldb, &beta, C, ldc); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= int kblasXtrsm( kblasHandle_t handle, char side, char uplo, char trans, char diag, int m, int n, const TYPE alpha, const TYPE* A, int lda, TYPE* B, int ldb) { //TODO if cuda version >= 8, call cublas instead //TODO verify this is better than cublas if cuda >= 8 hipblasStatus_t status = kblasXtrsm( handle->cublas_handle, side == KBLAS_Left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT, uplo == KBLAS_Lower ? HIPBLAS_FILL_MODE_LOWER : HIPBLAS_FILL_MODE_UPPER, trans == KBLAS_Trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, diag == KBLAS_NonUnit ? HIPBLAS_DIAG_NON_UNIT : HIPBLAS_DIAG_UNIT, m, n, &alpha, A, lda, B, ldb); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= int kblasXtrmm( kblasHandle_t handle, char side, char uplo, char trans, char diag, int m, int n, const TYPE alpha, const TYPE* A, int lda, TYPE* B, int ldb) { //TODO if cuda version >= 8, call cublas instead //TODO verify this is better than cublas if cuda >= 8 hipblasStatus_t status = kblasXtrmm( handle->cublas_handle, side == KBLAS_Left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT, uplo == KBLAS_Lower ? HIPBLAS_FILL_MODE_LOWER : HIPBLAS_FILL_MODE_UPPER, trans == KBLAS_Trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, diag == KBLAS_NonUnit ? HIPBLAS_DIAG_NON_UNIT : HIPBLAS_DIAG_UNIT, m, n, &alpha, A, lda, B, ldb); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= int kblasXscal( kblasHandle_t handle, int n, const TYPE alpha, TYPE *x, int incx) { check_error_ret( cublasXscal( handle->cublas_handle, n, &alpha, x, incx), KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= int kblasXgeam( kblasHandle_t handle, char transa, char transb, int m, int n, const TYPE alpha, const TYPE *A, int lda, const TYPE beta, const TYPE *B, int ldb, TYPE *C, int ldc) { check_error_ret( cublasXgeam( handle->cublas_handle, transa == KBLAS_Trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, transb == KBLAS_Trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, m, n, &alpha, A, lda, &beta, B, ldb, C, ldc), KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= #ifdef USE_MAGMA //workspace needed: none // A_array, B_array, C_array: host pointer to array of device pointers to device buffers int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int m, int n, TYPE alpha, TYPE **dA_array, int ldda, long strideA, TYPE **dB_array, int lddb, long strideB, TYPE beta, TYPE **dC_array, int lddc, long strideC, int batchCount) { (void)strideA; (void)strideB; (void)strideC; #if (defined PREC_c) || (defined PREC_z) return KBLAS_NotSupported; #else magma_Xsymm_batched((magma_side_t)(side == KBLAS_Left ? MagmaLeft : MagmaRight), (magma_uplo_t)(uplo == KBLAS_Lower ? MagmaLower : MagmaUpper), m, n, alpha, (TYPE**)dA_array, ldda, (TYPE**)dB_array, lddb, beta, dC_array, lddc, batchCount, handle->magma_queue ); return KBLAS_Success; #endif } //------------------------------------------------------------------------- // workspace needed: device pointers // d_A, d_B, d_C: host pointers to device buffers // TODO better implementation is needed int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int m, int n, TYPE alpha, TYPE *d_A, int ldda, long strideA, TYPE *d_B, int lddb, long strideB, TYPE beta, TYPE *d_C, int lddc, long strideC, int batchCount) { #if (defined PREC_c) || (defined PREC_z) return KBLAS_NotSupported; #else KBlasWorkspaceState ws_needed; symm_batch_wsquery_core<true>( batchCount, (kblasWorkspaceState_t)&ws_needed); if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ) return KBLAS_InsufficientWorkspace; TYPE **dA_array, **dB_array, **dC_array; dA_array = (TYPE**)handle->work_space.d_ptrs; dB_array = dA_array + batchCount; dC_array = dB_array + batchCount; Xset_pointer_3(dA_array, d_A, ldda, strideA, dB_array, d_B, lddb, strideB, dC_array, d_C, lddc, strideC, batchCount, handle->stream); magma_Xsymm_batched((magma_side_t)(side == KBLAS_Left ? MagmaLeft : MagmaRight), (magma_uplo_t)(uplo == KBLAS_Lower ? MagmaLower : MagmaUpper), m, n, alpha, dA_array, ldda, dB_array, lddb, beta, dC_array, lddc, batchCount, handle->magma_queue ); return KBLAS_Success; #endif } int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int* m, int* n, int max_m, int max_n, TYPE alpha, TYPE **dA_array, int* ldda, long strideA, TYPE **dB_array, int* lddb, long strideB, TYPE beta, TYPE **dC_array, int* lddc, long strideC, int batchCount) { (void)strideA; (void)strideB; (void)strideC; #if (defined PREC_c) || (defined PREC_z) return KBLAS_NotSupported; #else if(handle->use_magma){ KBlasWorkspaceState ws_needed; symm_batch_nonuniform_wsquery_core((kblasWorkspaceState_t)&ws_needed); if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){ return KBLAS_InsufficientWorkspace; } int h_max_mn[2]; kblasWorkspace_t ws_current = &(handle->work_space); int* d_max_mn = (int*)(ws_current->d_data); //take care of batch size limitation with magma int batch_increment = 65535; int batch_start = 0; if(max_m > 0 || max_n > 0){ h_max_mn[0] = max_m; h_max_mn[1] = max_n; } while(batch_start != batchCount) { int batch_size = kmin(batch_increment, batchCount - batch_start); if((batchCount > batch_increment) || (max_m <= 0 && max_n <= 0)){ // compute the max. dimensions kblas_imax_size_2(handle, m, n, *d_max_mn, *(d_max_mn+1), batch_size); check_error_ret( hipblasGetVectorAsync( 2, sizeof(int), d_max_mn, 1, h_max_mn, 1, handle->stream ), KBLAS_cuBLAS_Error); check_error_ret( hipStreamSynchronize(handle->stream), KBLAS_CUDA_Error ); } magmablas_Xsymm_vbatched_max_nocheck( (magma_side_t)(side == KBLAS_Left ? MagmaLeft : MagmaRight), (magma_uplo_t)(uplo == KBLAS_Lower ? MagmaLower : MagmaUpper), m, n, alpha, dA_array, ldda, dB_array, lddb, beta, dC_array, lddc, batch_size, h_max_mn[0], h_max_mn[1], handle->magma_queue); dA_array += batch_size; dB_array += batch_size; dC_array += batch_size; m += batch_size; n += batch_size; ldda += batch_size; lddb += batch_size; lddc += batch_size; batch_start += batch_size; check_error_ret( hipGetLastError(), KBLAS_MAGMA_Error); } return KBLAS_Success; }else{ printf("Configuration error at %s in file %s at line %d, MAGMA required but not enabled!\n", __func__, __FILE__, __LINE__ ); return KBLAS_WrongConfig; } #endif } //========================================================================= #else//USE_MAGMA int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int m, int n, TYPE alpha, TYPE **dA_array, int ldda, long strideA, TYPE **dB_array, int lddb, long strideB, TYPE beta, TYPE **dC_array, int lddc, long strideC, int batchCount) { //TODO need to provide this return KBLAS_NotSupported; } //------------------------------------------------------------------------- int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int m, int n, TYPE alpha, TYPE *d_A, int ldda, long strideA, TYPE *d_B, int lddb, long strideB, TYPE beta, TYPE *d_C, int lddc, long strideC, int batchCount) { //TODO need to provide this return KBLAS_NotSupported; } #endif//USE_MAGMA
485ef10d712971813eb4ba27edfc590bbd5e89c9.cu
/** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/Xblas_core.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 3.0.0 * @author Ali Charara * @date 2018-11-14 **/ #include <stdlib.h> #include <stdio.h> #include <set> #include <cublas_v2.h> #include "kblas.h" #include "operators.h" #include "Xblas_core.ch" #include "Xhelper_funcs.ch" #include "workspace_queries.ch" //#################################################################################################### // wrapper functions around cuBLAS //#################################################################################################### //============================================================================================== // A, B, C: host pointers to device buffers int kblasXgemm( kblasHandle_t handle, char transa, char transb, int m, int n, int k, const TYPE alpha, const TYPE *A, int lda, const TYPE *B, int ldb, const TYPE beta, TYPE *C, int ldc) { cublasStatus_t status = cublasXgemm( handle->cublas_handle, transa == KBLAS_Trans ? CUBLAS_OP_T : CUBLAS_OP_N, transb == KBLAS_Trans ? CUBLAS_OP_T : CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //============================================================================================== // A, B: host pointers to device buffers int kblasXsyrk( kblasHandle_t handle, char uplo, char trans, int m, int n, const TYPE alpha, const TYPE* A, int lda, const TYPE beta, TYPE* B, int ldb) { cublasStatus_t status = cublasXsyrk( handle->cublas_handle, uplo == KBLAS_Lower ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER, trans == KBLAS_Trans ? CUBLAS_OP_T : CUBLAS_OP_N, m, n, &alpha, A, lda, &beta, B, ldb); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= // A, B, C: host pointers to device buffers int kblasXsymm( kblasHandle_t handle, char side, char uplo, int m, int n, const TYPE alpha, const TYPE *A, int lda, const TYPE *B, int ldb, const TYPE beta, TYPE *C, int ldc) { cublasStatus_t status = cublasXsymm( handle->cublas_handle, side == KBLAS_Left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT, uplo == KBLAS_Lower ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER, m, n, &alpha, (const TYPE*) A, lda, (const TYPE*) B, ldb, &beta, C, ldc); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= int kblasXtrsm( kblasHandle_t handle, char side, char uplo, char trans, char diag, int m, int n, const TYPE alpha, const TYPE* A, int lda, TYPE* B, int ldb) { //TODO if cuda version >= 8, call cublas instead //TODO verify this is better than cublas if cuda >= 8 cublasStatus_t status = kblasXtrsm( handle->cublas_handle, side == KBLAS_Left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT, uplo == KBLAS_Lower ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER, trans == KBLAS_Trans ? CUBLAS_OP_T : CUBLAS_OP_N, diag == KBLAS_NonUnit ? CUBLAS_DIAG_NON_UNIT : CUBLAS_DIAG_UNIT, m, n, &alpha, A, lda, B, ldb); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= int kblasXtrmm( kblasHandle_t handle, char side, char uplo, char trans, char diag, int m, int n, const TYPE alpha, const TYPE* A, int lda, TYPE* B, int ldb) { //TODO if cuda version >= 8, call cublas instead //TODO verify this is better than cublas if cuda >= 8 cublasStatus_t status = kblasXtrmm( handle->cublas_handle, side == KBLAS_Left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT, uplo == KBLAS_Lower ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER, trans == KBLAS_Trans ? CUBLAS_OP_T : CUBLAS_OP_N, diag == KBLAS_NonUnit ? CUBLAS_DIAG_NON_UNIT : CUBLAS_DIAG_UNIT, m, n, &alpha, A, lda, B, ldb); check_error_ret(status, KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= int kblasXscal( kblasHandle_t handle, int n, const TYPE alpha, TYPE *x, int incx) { check_error_ret( cublasXscal( handle->cublas_handle, n, &alpha, x, incx), KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= int kblasXgeam( kblasHandle_t handle, char transa, char transb, int m, int n, const TYPE alpha, const TYPE *A, int lda, const TYPE beta, const TYPE *B, int ldb, TYPE *C, int ldc) { check_error_ret( cublasXgeam( handle->cublas_handle, transa == KBLAS_Trans ? CUBLAS_OP_T : CUBLAS_OP_N, transb == KBLAS_Trans ? CUBLAS_OP_T : CUBLAS_OP_N, m, n, &alpha, A, lda, &beta, B, ldb, C, ldc), KBLAS_cuBLAS_Error); return KBLAS_Success; } //========================================================================= #ifdef USE_MAGMA //workspace needed: none // A_array, B_array, C_array: host pointer to array of device pointers to device buffers int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int m, int n, TYPE alpha, TYPE **dA_array, int ldda, long strideA, TYPE **dB_array, int lddb, long strideB, TYPE beta, TYPE **dC_array, int lddc, long strideC, int batchCount) { (void)strideA; (void)strideB; (void)strideC; #if (defined PREC_c) || (defined PREC_z) return KBLAS_NotSupported; #else magma_Xsymm_batched((magma_side_t)(side == KBLAS_Left ? MagmaLeft : MagmaRight), (magma_uplo_t)(uplo == KBLAS_Lower ? MagmaLower : MagmaUpper), m, n, alpha, (TYPE**)dA_array, ldda, (TYPE**)dB_array, lddb, beta, dC_array, lddc, batchCount, handle->magma_queue ); return KBLAS_Success; #endif } //------------------------------------------------------------------------- // workspace needed: device pointers // d_A, d_B, d_C: host pointers to device buffers // TODO better implementation is needed int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int m, int n, TYPE alpha, TYPE *d_A, int ldda, long strideA, TYPE *d_B, int lddb, long strideB, TYPE beta, TYPE *d_C, int lddc, long strideC, int batchCount) { #if (defined PREC_c) || (defined PREC_z) return KBLAS_NotSupported; #else KBlasWorkspaceState ws_needed; symm_batch_wsquery_core<true>( batchCount, (kblasWorkspaceState_t)&ws_needed); if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ) return KBLAS_InsufficientWorkspace; TYPE **dA_array, **dB_array, **dC_array; dA_array = (TYPE**)handle->work_space.d_ptrs; dB_array = dA_array + batchCount; dC_array = dB_array + batchCount; Xset_pointer_3(dA_array, d_A, ldda, strideA, dB_array, d_B, lddb, strideB, dC_array, d_C, lddc, strideC, batchCount, handle->stream); magma_Xsymm_batched((magma_side_t)(side == KBLAS_Left ? MagmaLeft : MagmaRight), (magma_uplo_t)(uplo == KBLAS_Lower ? MagmaLower : MagmaUpper), m, n, alpha, dA_array, ldda, dB_array, lddb, beta, dC_array, lddc, batchCount, handle->magma_queue ); return KBLAS_Success; #endif } int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int* m, int* n, int max_m, int max_n, TYPE alpha, TYPE **dA_array, int* ldda, long strideA, TYPE **dB_array, int* lddb, long strideB, TYPE beta, TYPE **dC_array, int* lddc, long strideC, int batchCount) { (void)strideA; (void)strideB; (void)strideC; #if (defined PREC_c) || (defined PREC_z) return KBLAS_NotSupported; #else if(handle->use_magma){ KBlasWorkspaceState ws_needed; symm_batch_nonuniform_wsquery_core((kblasWorkspaceState_t)&ws_needed); if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){ return KBLAS_InsufficientWorkspace; } int h_max_mn[2]; kblasWorkspace_t ws_current = &(handle->work_space); int* d_max_mn = (int*)(ws_current->d_data); //take care of batch size limitation with magma int batch_increment = 65535; int batch_start = 0; if(max_m > 0 || max_n > 0){ h_max_mn[0] = max_m; h_max_mn[1] = max_n; } while(batch_start != batchCount) { int batch_size = kmin(batch_increment, batchCount - batch_start); if((batchCount > batch_increment) || (max_m <= 0 && max_n <= 0)){ // compute the max. dimensions kblas_imax_size_2(handle, m, n, *d_max_mn, *(d_max_mn+1), batch_size); check_error_ret( cublasGetVectorAsync( 2, sizeof(int), d_max_mn, 1, h_max_mn, 1, handle->stream ), KBLAS_cuBLAS_Error); check_error_ret( cudaStreamSynchronize(handle->stream), KBLAS_CUDA_Error ); } magmablas_Xsymm_vbatched_max_nocheck( (magma_side_t)(side == KBLAS_Left ? MagmaLeft : MagmaRight), (magma_uplo_t)(uplo == KBLAS_Lower ? MagmaLower : MagmaUpper), m, n, alpha, dA_array, ldda, dB_array, lddb, beta, dC_array, lddc, batch_size, h_max_mn[0], h_max_mn[1], handle->magma_queue); dA_array += batch_size; dB_array += batch_size; dC_array += batch_size; m += batch_size; n += batch_size; ldda += batch_size; lddb += batch_size; lddc += batch_size; batch_start += batch_size; check_error_ret( cudaGetLastError(), KBLAS_MAGMA_Error); } return KBLAS_Success; }else{ printf("Configuration error at %s in file %s at line %d, MAGMA required but not enabled!\n", __func__, __FILE__, __LINE__ ); return KBLAS_WrongConfig; } #endif } //========================================================================= #else//USE_MAGMA int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int m, int n, TYPE alpha, TYPE **dA_array, int ldda, long strideA, TYPE **dB_array, int lddb, long strideB, TYPE beta, TYPE **dC_array, int lddc, long strideC, int batchCount) { //TODO need to provide this return KBLAS_NotSupported; } //------------------------------------------------------------------------- int Xsymm_batch(kblasHandle_t handle, char side, char uplo, int m, int n, TYPE alpha, TYPE *d_A, int ldda, long strideA, TYPE *d_B, int lddb, long strideB, TYPE beta, TYPE *d_C, int lddc, long strideC, int batchCount) { //TODO need to provide this return KBLAS_NotSupported; } #endif//USE_MAGMA
925fc60138501ce9293182af48672a3847b52da0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel1_zdir; int xdim0_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim0_advec_cell_kernel1_zdir; int ydim0_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim1_advec_cell_kernel1_zdir; int xdim1_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim1_advec_cell_kernel1_zdir; int ydim1_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim2_advec_cell_kernel1_zdir; int xdim2_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim2_advec_cell_kernel1_zdir; int ydim2_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim3_advec_cell_kernel1_zdir; int xdim3_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim3_advec_cell_kernel1_zdir; int ydim3_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim4_advec_cell_kernel1_zdir; int xdim4_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim4_advec_cell_kernel1_zdir; int ydim4_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim5_advec_cell_kernel1_zdir; int xdim5_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim5_advec_cell_kernel1_zdir; int ydim5_advec_cell_kernel1_zdir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_cell_kernel1_zdir * (y) + \ xdim0_advec_cell_kernel1_zdir * ydim0_advec_cell_kernel1_zdir * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_cell_kernel1_zdir * (y) + \ xdim1_advec_cell_kernel1_zdir * ydim1_advec_cell_kernel1_zdir * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_cell_kernel1_zdir * (y) + \ xdim2_advec_cell_kernel1_zdir * ydim2_advec_cell_kernel1_zdir * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_cell_kernel1_zdir * (y) + \ xdim3_advec_cell_kernel1_zdir * ydim3_advec_cell_kernel1_zdir * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_cell_kernel1_zdir * (y) + \ xdim4_advec_cell_kernel1_zdir * ydim4_advec_cell_kernel1_zdir * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_cell_kernel1_zdir * (y) + \ xdim5_advec_cell_kernel1_zdir * ydim5_advec_cell_kernel1_zdir * (z)) // user function __device__ inline void advec_cell_kernel1_zdir_gpu(double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x, const double *vol_flux_y, const double *vol_flux_z) { pre_vol[OPS_ACC0(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] + (vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)] + vol_flux_y[OPS_ACC4(0, 1, 0)] - vol_flux_y[OPS_ACC4(0, 0, 0)] + vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]); post_vol[OPS_ACC1(0, 0, 0)] = pre_vol[OPS_ACC0(0, 0, 0)] - (vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_advec_cell_kernel1_zdir(double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim0_advec_cell_kernel1_zdir * ydim0_advec_cell_kernel1_zdir; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim1_advec_cell_kernel1_zdir * ydim1_advec_cell_kernel1_zdir; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim2_advec_cell_kernel1_zdir * ydim2_advec_cell_kernel1_zdir; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim3_advec_cell_kernel1_zdir * ydim3_advec_cell_kernel1_zdir; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim4_advec_cell_kernel1_zdir * ydim4_advec_cell_kernel1_zdir; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim5_advec_cell_kernel1_zdir * ydim5_advec_cell_kernel1_zdir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel1_zdir_gpu(arg0, arg1, arg2, arg3, arg4, arg5); } } // host stub function void ops_par_loop_advec_cell_kernel1_zdir(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 6, range, 15)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(15, "advec_cell_kernel1_zdir"); OPS_kernels[15].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel1_zdir_h || ydim0 != ydim0_advec_cell_kernel1_zdir_h || xdim1 != xdim1_advec_cell_kernel1_zdir_h || ydim1 != ydim1_advec_cell_kernel1_zdir_h || xdim2 != xdim2_advec_cell_kernel1_zdir_h || ydim2 != ydim2_advec_cell_kernel1_zdir_h || xdim3 != xdim3_advec_cell_kernel1_zdir_h || ydim3 != ydim3_advec_cell_kernel1_zdir_h || xdim4 != xdim4_advec_cell_kernel1_zdir_h || ydim4 != ydim4_advec_cell_kernel1_zdir_h || xdim5 != xdim5_advec_cell_kernel1_zdir_h || ydim5 != ydim5_advec_cell_kernel1_zdir_h) { hipMemcpyToSymbol(xdim0_advec_cell_kernel1_zdir, &xdim0, sizeof(int)); xdim0_advec_cell_kernel1_zdir_h = xdim0; hipMemcpyToSymbol(ydim0_advec_cell_kernel1_zdir, &ydim0, sizeof(int)); ydim0_advec_cell_kernel1_zdir_h = ydim0; hipMemcpyToSymbol(xdim1_advec_cell_kernel1_zdir, &xdim1, sizeof(int)); xdim1_advec_cell_kernel1_zdir_h = xdim1; hipMemcpyToSymbol(ydim1_advec_cell_kernel1_zdir, &ydim1, sizeof(int)); ydim1_advec_cell_kernel1_zdir_h = ydim1; hipMemcpyToSymbol(xdim2_advec_cell_kernel1_zdir, &xdim2, sizeof(int)); xdim2_advec_cell_kernel1_zdir_h = xdim2; hipMemcpyToSymbol(ydim2_advec_cell_kernel1_zdir, &ydim2, sizeof(int)); ydim2_advec_cell_kernel1_zdir_h = ydim2; hipMemcpyToSymbol(xdim3_advec_cell_kernel1_zdir, &xdim3, sizeof(int)); xdim3_advec_cell_kernel1_zdir_h = xdim3; hipMemcpyToSymbol(ydim3_advec_cell_kernel1_zdir, &ydim3, sizeof(int)); ydim3_advec_cell_kernel1_zdir_h = ydim3; hipMemcpyToSymbol(xdim4_advec_cell_kernel1_zdir, &xdim4, sizeof(int)); xdim4_advec_cell_kernel1_zdir_h = xdim4; hipMemcpyToSymbol(ydim4_advec_cell_kernel1_zdir, &ydim4, sizeof(int)); ydim4_advec_cell_kernel1_zdir_h = ydim4; hipMemcpyToSymbol(xdim5_advec_cell_kernel1_zdir, &xdim5, sizeof(int)); xdim5_advec_cell_kernel1_zdir_h = xdim5; hipMemcpyToSymbol(ydim5_advec_cell_kernel1_zdir, &ydim5, sizeof(int)); ydim5_advec_cell_kernel1_zdir_h = ydim5; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[6]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[15].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_cell_kernel1_zdir), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[15].time += t1 - t2; } ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[15].mpi_time += t2 - t1; OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg5); } }
925fc60138501ce9293182af48672a3847b52da0.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel1_zdir; int xdim0_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim0_advec_cell_kernel1_zdir; int ydim0_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim1_advec_cell_kernel1_zdir; int xdim1_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim1_advec_cell_kernel1_zdir; int ydim1_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim2_advec_cell_kernel1_zdir; int xdim2_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim2_advec_cell_kernel1_zdir; int ydim2_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim3_advec_cell_kernel1_zdir; int xdim3_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim3_advec_cell_kernel1_zdir; int ydim3_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim4_advec_cell_kernel1_zdir; int xdim4_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim4_advec_cell_kernel1_zdir; int ydim4_advec_cell_kernel1_zdir_h = -1; __constant__ int xdim5_advec_cell_kernel1_zdir; int xdim5_advec_cell_kernel1_zdir_h = -1; __constant__ int ydim5_advec_cell_kernel1_zdir; int ydim5_advec_cell_kernel1_zdir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_cell_kernel1_zdir * (y) + \ xdim0_advec_cell_kernel1_zdir * ydim0_advec_cell_kernel1_zdir * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_cell_kernel1_zdir * (y) + \ xdim1_advec_cell_kernel1_zdir * ydim1_advec_cell_kernel1_zdir * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_cell_kernel1_zdir * (y) + \ xdim2_advec_cell_kernel1_zdir * ydim2_advec_cell_kernel1_zdir * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_cell_kernel1_zdir * (y) + \ xdim3_advec_cell_kernel1_zdir * ydim3_advec_cell_kernel1_zdir * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_cell_kernel1_zdir * (y) + \ xdim4_advec_cell_kernel1_zdir * ydim4_advec_cell_kernel1_zdir * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_cell_kernel1_zdir * (y) + \ xdim5_advec_cell_kernel1_zdir * ydim5_advec_cell_kernel1_zdir * (z)) // user function __device__ inline void advec_cell_kernel1_zdir_gpu(double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x, const double *vol_flux_y, const double *vol_flux_z) { pre_vol[OPS_ACC0(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] + (vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)] + vol_flux_y[OPS_ACC4(0, 1, 0)] - vol_flux_y[OPS_ACC4(0, 0, 0)] + vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]); post_vol[OPS_ACC1(0, 0, 0)] = pre_vol[OPS_ACC0(0, 0, 0)] - (vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_advec_cell_kernel1_zdir(double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim0_advec_cell_kernel1_zdir * ydim0_advec_cell_kernel1_zdir; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim1_advec_cell_kernel1_zdir * ydim1_advec_cell_kernel1_zdir; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim2_advec_cell_kernel1_zdir * ydim2_advec_cell_kernel1_zdir; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim3_advec_cell_kernel1_zdir * ydim3_advec_cell_kernel1_zdir; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim4_advec_cell_kernel1_zdir * ydim4_advec_cell_kernel1_zdir; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel1_zdir + idx_z * 1 * 1 * xdim5_advec_cell_kernel1_zdir * ydim5_advec_cell_kernel1_zdir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel1_zdir_gpu(arg0, arg1, arg2, arg3, arg4, arg5); } } // host stub function void ops_par_loop_advec_cell_kernel1_zdir(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 6, range, 15)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(15, "advec_cell_kernel1_zdir"); OPS_kernels[15].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel1_zdir_h || ydim0 != ydim0_advec_cell_kernel1_zdir_h || xdim1 != xdim1_advec_cell_kernel1_zdir_h || ydim1 != ydim1_advec_cell_kernel1_zdir_h || xdim2 != xdim2_advec_cell_kernel1_zdir_h || ydim2 != ydim2_advec_cell_kernel1_zdir_h || xdim3 != xdim3_advec_cell_kernel1_zdir_h || ydim3 != ydim3_advec_cell_kernel1_zdir_h || xdim4 != xdim4_advec_cell_kernel1_zdir_h || ydim4 != ydim4_advec_cell_kernel1_zdir_h || xdim5 != xdim5_advec_cell_kernel1_zdir_h || ydim5 != ydim5_advec_cell_kernel1_zdir_h) { cudaMemcpyToSymbol(xdim0_advec_cell_kernel1_zdir, &xdim0, sizeof(int)); xdim0_advec_cell_kernel1_zdir_h = xdim0; cudaMemcpyToSymbol(ydim0_advec_cell_kernel1_zdir, &ydim0, sizeof(int)); ydim0_advec_cell_kernel1_zdir_h = ydim0; cudaMemcpyToSymbol(xdim1_advec_cell_kernel1_zdir, &xdim1, sizeof(int)); xdim1_advec_cell_kernel1_zdir_h = xdim1; cudaMemcpyToSymbol(ydim1_advec_cell_kernel1_zdir, &ydim1, sizeof(int)); ydim1_advec_cell_kernel1_zdir_h = ydim1; cudaMemcpyToSymbol(xdim2_advec_cell_kernel1_zdir, &xdim2, sizeof(int)); xdim2_advec_cell_kernel1_zdir_h = xdim2; cudaMemcpyToSymbol(ydim2_advec_cell_kernel1_zdir, &ydim2, sizeof(int)); ydim2_advec_cell_kernel1_zdir_h = ydim2; cudaMemcpyToSymbol(xdim3_advec_cell_kernel1_zdir, &xdim3, sizeof(int)); xdim3_advec_cell_kernel1_zdir_h = xdim3; cudaMemcpyToSymbol(ydim3_advec_cell_kernel1_zdir, &ydim3, sizeof(int)); ydim3_advec_cell_kernel1_zdir_h = ydim3; cudaMemcpyToSymbol(xdim4_advec_cell_kernel1_zdir, &xdim4, sizeof(int)); xdim4_advec_cell_kernel1_zdir_h = xdim4; cudaMemcpyToSymbol(ydim4_advec_cell_kernel1_zdir, &ydim4, sizeof(int)); ydim4_advec_cell_kernel1_zdir_h = ydim4; cudaMemcpyToSymbol(xdim5_advec_cell_kernel1_zdir, &xdim5, sizeof(int)); xdim5_advec_cell_kernel1_zdir_h = xdim5; cudaMemcpyToSymbol(ydim5_advec_cell_kernel1_zdir, &ydim5, sizeof(int)); ydim5_advec_cell_kernel1_zdir_h = ydim5; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[6]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[15].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_advec_cell_kernel1_zdir<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[15].time += t1 - t2; } ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[15].mpi_time += t2 - t1; OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg5); } }
a085eb4adc35789c4ef54a8ca0ebcfd666c3e05b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void convert_kernel(unsigned int *bins32, uint8_t *bins8, unsigned int num_bins) { // INSERT CODE HERE int thid = blockIdx.x * blockDim.x + threadIdx.x; while (thid < num_bins){ //Use local register value (avoids copying from global twice) unsigned int reg_bin = bins32[thid]; if(reg_bin > 255){ bins8[thid] = 255u; } else{ bins8[thid] = (uint8_t) reg_bin; } thid += blockDim.x * gridDim.x; } }
a085eb4adc35789c4ef54a8ca0ebcfd666c3e05b.cu
#include "includes.h" __global__ void convert_kernel(unsigned int *bins32, uint8_t *bins8, unsigned int num_bins) { // INSERT CODE HERE int thid = blockIdx.x * blockDim.x + threadIdx.x; while (thid < num_bins){ //Use local register value (avoids copying from global twice) unsigned int reg_bin = bins32[thid]; if(reg_bin > 255){ bins8[thid] = 255u; } else{ bins8[thid] = (uint8_t) reg_bin; } thid += blockDim.x * gridDim.x; } }
bdcbbcec4f321bc2bf5b12d80de9fc7b09e1b6bb.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <cfloat> #include <cmath> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #define M_PI 3.14159265358979323846 #define N_RAND_IND 3 #define MAX_FITNESS 4.25389 #define MUT_RAND_N 3 #define MIN_VALUE -1.0 #define MAX_VALUE 2.0 /* Maximizar a funcao: f(x,y) = x*sin(4*pi*x) - y*sin(4*pi*y + pi) + 1, e que o maximo global de 4.25389 situa-se no ponto (1.62888, 1.62888) */ __global__ void initializePopulation(double *pop, size_t pitch, int num_pop, int num_genes, double *randpop){ int index_gen = blockIdx.x; int index_pop = (blockIdx.x*blockDim.x + threadIdx.x); if(index_pop >= 100) index_pop -= (100*index_gen); if(index_pop < num_pop && index_gen < num_genes){ double *indv = (double *)((char*)pop + index_pop*pitch); double *rand_ind = (double *)((char*)randpop + index_pop*pitch); indv[index_gen] = MIN_VALUE + rand_ind[index_gen]*(MAX_VALUE - MIN_VALUE); } } __global__ void fitness(double *pop, size_t pitch, const int num_genes, double *result){ int index_pop = (blockIdx.x*blockDim.x + threadIdx.x); double *indv = (double *)((char*)pop + index_pop*pitch); result[index_pop] = indv[0]*sin(4*M_PI*indv[0]) - indv[1]*sin(4*M_PI*indv[1] + M_PI) + 1; __syncthreads(); } __host__ void getBestIndividual(double *pop, size_t pitch, const int num_pop, const int num_genes, int *best_index, double *best_score){ double *d_scores; hipMalloc((void**)&d_scores, num_pop*sizeof(double)); hipLaunchKernelGGL(( fitness), dim3(1), dim3(num_pop), 0, 0, pop, pitch, num_genes, d_scores); double h_scores[100]; hipMemcpy(h_scores, d_scores, num_pop*sizeof(double), hipMemcpyDeviceToHost); hipFree(d_scores); *best_index = 0; *best_score = h_scores[*best_index]; for(int i = 1; i < num_pop; i++){ if(h_scores[i] > *best_score){ *best_index = i; *best_score = h_scores[i]; } } } __global__ void mutationRand(double *pop, double *new_pop, size_t pitch_pop, const int num_pop, const int num_genes, double mutation_factor, int *randmut, size_t pitch_mut){ int index_gen = blockIdx.x; int index_pop = (blockIdx.x*blockDim.x + threadIdx.x); if(index_pop >= 100) index_pop -= (100*index_gen); if(index_pop < num_pop && index_gen < num_genes){ int *rand_indvs = (int *)((char*)randmut + index_pop*pitch_mut); double *rand1 = (double *)((char*)pop + rand_indvs[0]*pitch_pop); double *rand2 = (double *)((char*)pop + rand_indvs[1]*pitch_pop); double *rand3 = (double *)((char*)pop + rand_indvs[2]*pitch_pop); double *new_indv = (double *)((char*)new_pop + index_pop*pitch_pop); double value = rand1[index_gen] + (mutation_factor *(rand2[index_gen] - rand3[index_gen])); if(value < MIN_VALUE) value = MIN_VALUE; else if(value > MAX_VALUE) value = MAX_VALUE; new_indv[index_gen] = value; } __syncthreads(); } __host__ void startMutationRand(double *pop, double* new_pop, size_t pitch_pop, const int num_pop, const int num_genes, double mutation_factor, dim3 blkgenes, dim3 thrdpop){ int h_randmut[100][MUT_RAND_N]; for(int i = 0; i < num_pop; i++){ do h_randmut[i][0] = num_pop*((double) rand()/ ((double)RAND_MAX + 1));while(h_randmut[i][0] == i); do h_randmut[i][1] = num_pop*((double) rand()/ ((double)RAND_MAX + 1));while(h_randmut[i][1] == i || h_randmut[i][1] == h_randmut[i][0]); do h_randmut[i][2] = num_pop*((double) rand()/ ((double)RAND_MAX + 1));while(h_randmut[i][2] == i || h_randmut[i][2] == h_randmut[i][0] || h_randmut[i][2] == h_randmut[i][1]); } int *d_randmut; size_t pitch_mut; hipMallocPitch((void**)&d_randmut, &pitch_mut, MUT_RAND_N, num_pop); hipMemcpy2D(d_randmut, pitch_mut, h_randmut, MUT_RAND_N*sizeof(int), MUT_RAND_N*sizeof(int), num_pop, hipMemcpyHostToDevice); hipLaunchKernelGGL(( mutationRand), dim3(blkgenes), dim3(thrdpop), 0, 0, pop, new_pop, pitch_pop, num_pop, num_genes, mutation_factor, d_randmut, pitch_mut); hipFree(d_randmut); } // __device__ void f1(double *x,int D,double *out){ // int i; // __shared__ double result[NUMTHREAD]; // result[threadIdx.x]=0; // for (i=threadIdx.x;i<dim;i+=blockDim.x) // result[threadIdx.x]+= x[i]*__sinf(sqrtf(fabsf(x[i]))); // sum(result,NUMTHREAD); // *out = result[0]; // } // __global__ void generation_new_population (double *pop,int NP, int D, double *npop, double F, double CR,double *rand, int *mutation,double min,double max){ // int first,last,a,b,c,k,i,j; // for(i=blockIdx.x;i<NP;i+=gridDim.x){ // first=i*D; // last=first+D; // k=threadIdx.x; // a=mutation[i*3]; // b=mutation[i*3+1]; // c=mutation[i*3+2]; // j= threadIdx.x; // for(j+=first;j<last;j+=blockDim.x,){ // if(rand[j]<CR) // npop[j]= pop[c+k]+F*(pop[a+k]-pop[b+k]); // else npop[j]=pop[j]; // if(npop[j]>max) npop[j]=max; // else // if(npop[j]<min) npop[j]=min; // k+=blockDim.x; // } // __syncthreads(); // } // } // __global__ void selection(int function ,int NP, int D, double *pop, double *npop, double *fobj){ // int i,first,last,j; // double r; // for(i=blockIdx.x;i<NP;i+=gridDim.x){ // first=i*D; // r=func(function, &npop[first], D); // __syncthreads(); // if(r<fobj[i]){ // first=i*D; // last=first+D; // first+=threadIdx.x; // for(int j=first;j<last;j+=blockDim.x) // pop[j]=npop[j]; // fobj[i]=r; // } // __syncthreads(); // } // } // double **drawPopulationInit(double **d_ran){ // double **randpop = new double*[num_population]; // for (int i = 0; i < num_genes; i++){ // population[i] = new double[num_genes]; // new_population[i] = new double[num_genes]; // } // } // void drawIndividuals(int **randpop, int num_pop){ // for(int i = 0; i < num_pop; i++){ // do{ // rand_a = max_generation*((double)rand()/(double) RAND_MAX); // }while(rand_a == i); // do{ // rand_b = max_generation*((double)rand()/(double) RAND_MAX); // }while(rand_b == i || rand_b == rand_a); // do{ // rand_c = max_generation*((double)rand()/(double) RAND_MAX); // }while(rand_c == i || rand_c == rand_a || rand_b == rand_a); // randpop[i][0]=rand_a; // randpop[i][1]=rand_b; // randpop[i][2]=rand_c; // } // } __host__ void printPopulation(double **pop, int num_pop, int num_genes){ for(int i = 0; i < num_pop; i++){ for(int j = 0; j < num_genes; j++) std::cout << pop[i][j] << " "; std::cout << std::endl; } } int main(int argc, char **argv){ //Differential Evolution parameters const int max_generation = 1; const int num_population = 100; const int num_genes = 2; const double mutation_factor = 0.2; const double crossover_rate = 0.5; //Setting block and grid lenght dim3 blocksforgenes(num_genes, 1, 1); dim3 threadsforpop(num_population, 1, 1); //Creating random seed generator by device // hiprandGenerator_t gen_rand; // hiprandCreateGenerator(&gen_rand, HIPRAND_RNG_PSEUDO_DEFAULT); // hiprandSetPseudoRandomGeneratorSeed(gen_rand, 1234ULL); //Creating random seed by host srand(1); //Setting population vector to host and device size_t pitch_pop; double h_population[num_population][num_genes]; double h_newpopulation[num_population][num_genes]; double *d_population, *d_newpopulation; hipMallocPitch((void**)&d_population, &pitch_pop, num_genes, num_population); hipMallocPitch((void**)&d_newpopulation, &pitch_pop, num_genes, num_population); //Draw rand(0,1) to initialize population double h_randpop[num_population][num_genes]; for(int i = 0; i < num_population; i++){ for(int j = 0; j < num_genes; j++) h_randpop[i][j] = ((double) rand()/ ((double)RAND_MAX + 1)); } //Initialize population double *d_randpop; hipMallocPitch((void**)&d_randpop, &pitch_pop, num_genes, num_population); hipMemcpy2D(d_randpop, pitch_pop, h_randpop, num_genes*sizeof(double), num_genes*sizeof(double), num_population, hipMemcpyHostToDevice); // for(int i = 0; i < num_population; i++) // hiprandGenerateUniform(gen_rand, randpop[i], num_genes); hipLaunchKernelGGL(( initializePopulation), dim3(blocksforgenes),dim3(threadsforpop), 0, 0, d_population, pitch_pop, num_population, num_genes, d_randpop); hipFree(d_randpop); int generation, best_individual_index; double best_individual_score, previous_best_score = DBL_MIN; for(generation = 1; generation <= max_generation; generation++){ //Best individual getBestIndividual(d_population, pitch_pop, num_population, num_genes, &best_individual_index, &best_individual_score); if(previous_best_score < best_individual_score) printf("best individual index: %d - fitness: %f", best_individual_index, best_individual_score); //Stop condition if((MAX_FITNESS - best_individual_score) <= 0.00001){ break; } //Mutation phase startMutationRand(d_population, d_newpopulation, pitch_pop, num_population, num_genes, mutation_factor, blocksforgenes, threadsforpop); //Crossover phase //Selection Phase } // for(int i = 0; i < max_generation; i++){ // int rand_a, rand_b, rand_c; // hipMemcpy(d_mutation, h_mutation, 3*NP*sizeof(int), hipMemcpyHostToDevice); // generation_new_population<<<32,64>>>(d_pop,NP,D,d_npop,F,CR,d_Rand,d_mutation,s_min,s_max); // evaluate_new_population<<<32,64>>>(function,NP,D,d_pop,d_npop,d_fobj); // } // min_value_index<<<1,1>>>(0,NP,D,d_fobj,d_bestNP,d_pop,bestVal); // hipMemcpy(h_best, d_best, D*sizeof(double), hipMemcpyDeviceToHost); // hipMemcpy(&h_bestVal, bestVal, sizeof(double), hipMemcpyDeviceToHost); // hipMemcpy2D(h_population, num_genes*sizeof(double), d_population, pitch_pop, num_genes*sizeof(double), num_population, hipMemcpyDeviceToHost); // std::cout << "POPULATION\n"; // for(int i = 0; i < num_population; i++){ // for(int j = 0; j < num_genes; j++) // std::cout << h_population[i][j] << " "; // std::cout << std::endl; // } // hipMemcpy2D(h_newpopulation, num_genes*sizeof(double), d_newpopulation, pitch_pop, num_genes*sizeof(double), num_population, hipMemcpyDeviceToHost); // std::cout << "MUTANTS:\n"; // for(int i = 0; i < num_population; i++){ // for(int j = 0; j < num_genes; j++) // std::cout << h_newpopulation[i][j] << " "; // std::cout << std::endl; // } //Free memory from host and device hipFree(d_population); hipFree(d_newpopulation); // hiprandDestroyGenerator(gen_rand); }
bdcbbcec4f321bc2bf5b12d80de9fc7b09e1b6bb.cu
#include <iostream> #include <cstdlib> #include <cfloat> #include <cmath> #include <cuda.h> #include <curand.h> #define M_PI 3.14159265358979323846 #define N_RAND_IND 3 #define MAX_FITNESS 4.25389 #define MUT_RAND_N 3 #define MIN_VALUE -1.0 #define MAX_VALUE 2.0 /* Maximizar a funcao: f(x,y) = x*sin(4*pi*x) - y*sin(4*pi*y + pi) + 1, e que o maximo global de 4.25389 situa-se no ponto (1.62888, 1.62888) */ __global__ void initializePopulation(double *pop, size_t pitch, int num_pop, int num_genes, double *randpop){ int index_gen = blockIdx.x; int index_pop = (blockIdx.x*blockDim.x + threadIdx.x); if(index_pop >= 100) index_pop -= (100*index_gen); if(index_pop < num_pop && index_gen < num_genes){ double *indv = (double *)((char*)pop + index_pop*pitch); double *rand_ind = (double *)((char*)randpop + index_pop*pitch); indv[index_gen] = MIN_VALUE + rand_ind[index_gen]*(MAX_VALUE - MIN_VALUE); } } __global__ void fitness(double *pop, size_t pitch, const int num_genes, double *result){ int index_pop = (blockIdx.x*blockDim.x + threadIdx.x); double *indv = (double *)((char*)pop + index_pop*pitch); result[index_pop] = indv[0]*sin(4*M_PI*indv[0]) - indv[1]*sin(4*M_PI*indv[1] + M_PI) + 1; __syncthreads(); } __host__ void getBestIndividual(double *pop, size_t pitch, const int num_pop, const int num_genes, int *best_index, double *best_score){ double *d_scores; cudaMalloc((void**)&d_scores, num_pop*sizeof(double)); fitness<<<1, num_pop>>>(pop, pitch, num_genes, d_scores); double h_scores[100]; cudaMemcpy(h_scores, d_scores, num_pop*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_scores); *best_index = 0; *best_score = h_scores[*best_index]; for(int i = 1; i < num_pop; i++){ if(h_scores[i] > *best_score){ *best_index = i; *best_score = h_scores[i]; } } } __global__ void mutationRand(double *pop, double *new_pop, size_t pitch_pop, const int num_pop, const int num_genes, double mutation_factor, int *randmut, size_t pitch_mut){ int index_gen = blockIdx.x; int index_pop = (blockIdx.x*blockDim.x + threadIdx.x); if(index_pop >= 100) index_pop -= (100*index_gen); if(index_pop < num_pop && index_gen < num_genes){ int *rand_indvs = (int *)((char*)randmut + index_pop*pitch_mut); double *rand1 = (double *)((char*)pop + rand_indvs[0]*pitch_pop); double *rand2 = (double *)((char*)pop + rand_indvs[1]*pitch_pop); double *rand3 = (double *)((char*)pop + rand_indvs[2]*pitch_pop); double *new_indv = (double *)((char*)new_pop + index_pop*pitch_pop); double value = rand1[index_gen] + (mutation_factor *(rand2[index_gen] - rand3[index_gen])); if(value < MIN_VALUE) value = MIN_VALUE; else if(value > MAX_VALUE) value = MAX_VALUE; new_indv[index_gen] = value; } __syncthreads(); } __host__ void startMutationRand(double *pop, double* new_pop, size_t pitch_pop, const int num_pop, const int num_genes, double mutation_factor, dim3 blkgenes, dim3 thrdpop){ int h_randmut[100][MUT_RAND_N]; for(int i = 0; i < num_pop; i++){ do h_randmut[i][0] = num_pop*((double) rand()/ ((double)RAND_MAX + 1));while(h_randmut[i][0] == i); do h_randmut[i][1] = num_pop*((double) rand()/ ((double)RAND_MAX + 1));while(h_randmut[i][1] == i || h_randmut[i][1] == h_randmut[i][0]); do h_randmut[i][2] = num_pop*((double) rand()/ ((double)RAND_MAX + 1));while(h_randmut[i][2] == i || h_randmut[i][2] == h_randmut[i][0] || h_randmut[i][2] == h_randmut[i][1]); } int *d_randmut; size_t pitch_mut; cudaMallocPitch((void**)&d_randmut, &pitch_mut, MUT_RAND_N, num_pop); cudaMemcpy2D(d_randmut, pitch_mut, h_randmut, MUT_RAND_N*sizeof(int), MUT_RAND_N*sizeof(int), num_pop, cudaMemcpyHostToDevice); mutationRand<<<blkgenes, thrdpop>>>(pop, new_pop, pitch_pop, num_pop, num_genes, mutation_factor, d_randmut, pitch_mut); cudaFree(d_randmut); } // __device__ void f1(double *x,int D,double *out){ // int i; // __shared__ double result[NUMTHREAD]; // result[threadIdx.x]=0; // for (i=threadIdx.x;i<dim;i+=blockDim.x) // result[threadIdx.x]+= x[i]*__sinf(sqrtf(fabsf(x[i]))); // sum(result,NUMTHREAD); // *out = result[0]; // } // __global__ void generation_new_population (double *pop,int NP, int D, double *npop, double F, double CR,double *rand, int *mutation,double min,double max){ // int first,last,a,b,c,k,i,j; // for(i=blockIdx.x;i<NP;i+=gridDim.x){ // first=i*D; // last=first+D; // k=threadIdx.x; // a=mutation[i*3]; // b=mutation[i*3+1]; // c=mutation[i*3+2]; // j= threadIdx.x; // for(j+=first;j<last;j+=blockDim.x,){ // if(rand[j]<CR) // npop[j]= pop[c+k]+F*(pop[a+k]-pop[b+k]); // else npop[j]=pop[j]; // if(npop[j]>max) npop[j]=max; // else // if(npop[j]<min) npop[j]=min; // k+=blockDim.x; // } // __syncthreads(); // } // } // __global__ void selection(int function ,int NP, int D, double *pop, double *npop, double *fobj){ // int i,first,last,j; // double r; // for(i=blockIdx.x;i<NP;i+=gridDim.x){ // first=i*D; // r=func(function, &npop[first], D); // __syncthreads(); // if(r<fobj[i]){ // first=i*D; // last=first+D; // first+=threadIdx.x; // for(int j=first;j<last;j+=blockDim.x) // pop[j]=npop[j]; // fobj[i]=r; // } // __syncthreads(); // } // } // double **drawPopulationInit(double **d_ran){ // double **randpop = new double*[num_population]; // for (int i = 0; i < num_genes; i++){ // population[i] = new double[num_genes]; // new_population[i] = new double[num_genes]; // } // } // void drawIndividuals(int **randpop, int num_pop){ // for(int i = 0; i < num_pop; i++){ // do{ // rand_a = max_generation*((double)rand()/(double) RAND_MAX); // }while(rand_a == i); // do{ // rand_b = max_generation*((double)rand()/(double) RAND_MAX); // }while(rand_b == i || rand_b == rand_a); // do{ // rand_c = max_generation*((double)rand()/(double) RAND_MAX); // }while(rand_c == i || rand_c == rand_a || rand_b == rand_a); // randpop[i][0]=rand_a; // randpop[i][1]=rand_b; // randpop[i][2]=rand_c; // } // } __host__ void printPopulation(double **pop, int num_pop, int num_genes){ for(int i = 0; i < num_pop; i++){ for(int j = 0; j < num_genes; j++) std::cout << pop[i][j] << " "; std::cout << std::endl; } } int main(int argc, char **argv){ //Differential Evolution parameters const int max_generation = 1; const int num_population = 100; const int num_genes = 2; const double mutation_factor = 0.2; const double crossover_rate = 0.5; //Setting block and grid lenght dim3 blocksforgenes(num_genes, 1, 1); dim3 threadsforpop(num_population, 1, 1); //Creating random seed generator by device // curandGenerator_t gen_rand; // curandCreateGenerator(&gen_rand, CURAND_RNG_PSEUDO_DEFAULT); // curandSetPseudoRandomGeneratorSeed(gen_rand, 1234ULL); //Creating random seed by host srand(1); //Setting population vector to host and device size_t pitch_pop; double h_population[num_population][num_genes]; double h_newpopulation[num_population][num_genes]; double *d_population, *d_newpopulation; cudaMallocPitch((void**)&d_population, &pitch_pop, num_genes, num_population); cudaMallocPitch((void**)&d_newpopulation, &pitch_pop, num_genes, num_population); //Draw rand(0,1) to initialize population double h_randpop[num_population][num_genes]; for(int i = 0; i < num_population; i++){ for(int j = 0; j < num_genes; j++) h_randpop[i][j] = ((double) rand()/ ((double)RAND_MAX + 1)); } //Initialize population double *d_randpop; cudaMallocPitch((void**)&d_randpop, &pitch_pop, num_genes, num_population); cudaMemcpy2D(d_randpop, pitch_pop, h_randpop, num_genes*sizeof(double), num_genes*sizeof(double), num_population, cudaMemcpyHostToDevice); // for(int i = 0; i < num_population; i++) // curandGenerateUniform(gen_rand, randpop[i], num_genes); initializePopulation<<<blocksforgenes,threadsforpop>>>(d_population, pitch_pop, num_population, num_genes, d_randpop); cudaFree(d_randpop); int generation, best_individual_index; double best_individual_score, previous_best_score = DBL_MIN; for(generation = 1; generation <= max_generation; generation++){ //Best individual getBestIndividual(d_population, pitch_pop, num_population, num_genes, &best_individual_index, &best_individual_score); if(previous_best_score < best_individual_score) printf("best individual index: %d - fitness: %f", best_individual_index, best_individual_score); //Stop condition if((MAX_FITNESS - best_individual_score) <= 0.00001){ break; } //Mutation phase startMutationRand(d_population, d_newpopulation, pitch_pop, num_population, num_genes, mutation_factor, blocksforgenes, threadsforpop); //Crossover phase //Selection Phase } // for(int i = 0; i < max_generation; i++){ // int rand_a, rand_b, rand_c; // cudaMemcpy(d_mutation, h_mutation, 3*NP*sizeof(int), cudaMemcpyHostToDevice); // generation_new_population<<<32,64>>>(d_pop,NP,D,d_npop,F,CR,d_Rand,d_mutation,s_min,s_max); // evaluate_new_population<<<32,64>>>(function,NP,D,d_pop,d_npop,d_fobj); // } // min_value_index<<<1,1>>>(0,NP,D,d_fobj,d_bestNP,d_pop,bestVal); // cudaMemcpy(h_best, d_best, D*sizeof(double), cudaMemcpyDeviceToHost); // cudaMemcpy(&h_bestVal, bestVal, sizeof(double), cudaMemcpyDeviceToHost); // cudaMemcpy2D(h_population, num_genes*sizeof(double), d_population, pitch_pop, num_genes*sizeof(double), num_population, cudaMemcpyDeviceToHost); // std::cout << "POPULATION\n"; // for(int i = 0; i < num_population; i++){ // for(int j = 0; j < num_genes; j++) // std::cout << h_population[i][j] << " "; // std::cout << std::endl; // } // cudaMemcpy2D(h_newpopulation, num_genes*sizeof(double), d_newpopulation, pitch_pop, num_genes*sizeof(double), num_population, cudaMemcpyDeviceToHost); // std::cout << "MUTANTS:\n"; // for(int i = 0; i < num_population; i++){ // for(int j = 0; j < num_genes; j++) // std::cout << h_newpopulation[i][j] << " "; // std::cout << std::endl; // } //Free memory from host and device cudaFree(d_population); cudaFree(d_newpopulation); // curandDestroyGenerator(gen_rand); }
df0d172274847a769ff1cf10f054fd7eff4a801a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <vector> #include <hip/hip_runtime.h> #include <hipfft.h> #include "cuda_utils.h" #include "gpu_utils.h" #include "fbfft/FBFFT.h" #include "fbfft/FBFFTCommon.cuh" #define fbfftCheck(stmt) do { \ facebook::cuda::fbfft::FBFFTParameters::ErrorCode err = stmt; \ if (err != facebook::cuda::fbfft::FBFFTParameters::Success) { \ printf("Error running %s in file %s, function %s\n", #stmt,__FILE__,__FUNCTION__); \ if (err == facebook::cuda::fbfft::FBFFTParameters::UnsupportedSize) \ printf("Error code: UnsupportedSize\n"); \ if (err == facebook::cuda::fbfft::FBFFTParameters::UnsupportedDimension) \ printf("Error code: UnsupportedDimension\n"); \ exit(1); \ } \ } while(0) // // Loads vector from file // template <typename T> void load_vec(const int nind, const char *filename, const int n, T *ind) { std::ifstream file(filename); if (file.is_open()) { for (int i=0;i < n;i++) { for (int k=0;k < nind;k++) { if (!(file >> ind[i*nind+k])) { std::cerr<<"Error reading file "<<filename<<std::endl; exit(1); } } } } else { std::cerr<<"Error opening file "<<filename<<std::endl; exit(1); } } void test_1dfbfft(); void test_2dfbfft(); int main(int argc, char *argv[]) { int numnode = 1; int mynode = 0; std::vector<int> devices; start_gpu(numnode, mynode, devices); test_1dfbfft(); test_2dfbfft(); stop_gpu(); return 0; } void check_err1D(int nfft, int nbatch, float* h_dataOut, float* h_dataOutRef) { double max_err = 0.0; for (int j=0;j < nbatch;++j) { int start = j*(nfft/2+1)*2; int startRef = j*nfft*2; for (int i=0;i < nfft/2+1;++i) { double err1 = fabs(h_dataOut[start+2*i]-h_dataOutRef[startRef+2*i]); double err2 = fabs(h_dataOut[start+2*i+1]-h_dataOutRef[startRef+2*i+1]); max_err = max(max_err, err1); max_err = max(max_err, err2); if (max_err > 1.0e-5) { printf("Maximum error exceeded batch=%d i=%d\n",j,i); printf("Res: %f %f\n",h_dataOut[start+2*i],h_dataOut[start+2*i+1]); printf("Ref: %f %f\n",h_dataOutRef[startRef+2*i],h_dataOutRef[startRef+2*i+1]); break; } } } printf("max_err=%e\n",max_err); } void check_err2DfbfftSMALL(int nfftx, int nffty, int nbatch, float* h_dataOut, float* h_dataOutRef) { double max_err = 0.0; int stride = nfftx*2; int strideRef = nfftx*2; for (int k=0;k < nbatch;++k) { int start = k*stride*nffty; int startRef = k*strideRef*nffty; for (int j=0;j < nffty/2+1;++j) { for (int i=0;i < nfftx;++i) { int pos = i*2 + j*stride; int posRef = i*2 + j*strideRef; double err1 = fabs(h_dataOut[start+pos]-h_dataOutRef[startRef+posRef]); double err2 = fabs(h_dataOut[start+pos+1]-h_dataOutRef[startRef+posRef+1]); max_err = max(max_err, err1); max_err = max(max_err, err2); if (max_err > 1.0e-4) { printf("Maximum error exceeded batch=%d i=%d j=%d | %d %d | %d %d\n",k,i,j,start,pos,startRef,posRef); printf("Res: %f %f\n",h_dataOut[start+pos],h_dataOut[start+pos+1]); printf("Ref: %f %f\n",h_dataOutRef[startRef+posRef],h_dataOutRef[startRef+posRef+1]); return; } } } } printf("max_err=%e\n",max_err); } void check_err2DfbfftBIG(int nfftx, int nffty, int nbatch, float* h_dataOut, float* h_dataOutRef) { double max_err = 0.0; int stride = nfftx*2; int strideRef = nfftx*2; for (int k=0;k < nbatch;++k) { int start = k*stride*nffty; int startRef = k*strideRef*nffty; for (int j=0;j < nffty/2+1;++j) { for (int i=0;i < nfftx;++i) { int pos = i*2 + j*stride; int posRef = i*2 + j*strideRef; double err1 = fabs(h_dataOut[start+pos]-h_dataOutRef[startRef+posRef]); double err2 = fabs(h_dataOut[start+pos+1]-h_dataOutRef[startRef+posRef+1]); max_err = max(max_err, err1); max_err = max(max_err, err2); if (max_err > 1.0e-4) { printf("Maximum error exceeded batch=%d i=%d j=%d | %d %d | %d %d\n",k,i,j,start,pos,startRef,posRef); printf("Res: %f %f\n",h_dataOut[start+pos],h_dataOut[start+pos+1]); printf("Ref: %f %f\n",h_dataOutRef[startRef+posRef],h_dataOutRef[startRef+posRef+1]); return; } } } } printf("max_err=%e\n",max_err); } void check_err2Dcufft(int nfftx, int nffty, int nbatch, float* h_dataOut, float* h_dataOutRef) { double max_err = 0.0; int stride = (nfftx/2+1)*2; int strideRef = nfftx*2; for (int k=0;k < nbatch;++k) { int start = k*stride*nffty; int startRef = k*strideRef*nffty; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx/2+1;++i) { int pos = i*2 + j*stride; int posRef = i*2 + j*strideRef; double err1 = fabs(h_dataOut[start+pos]-h_dataOutRef[startRef+posRef]); double err2 = fabs(h_dataOut[start+pos+1]-h_dataOutRef[startRef+posRef+1]); max_err = max(max_err, err1); max_err = max(max_err, err2); if (max_err > 1.0e-4) { printf("Maximum error exceeded batch=%d i=%d j=%d | %d %d | %d %d\n",k,i,j,start,pos,startRef,posRef); printf("Res: %f %f\n",h_dataOut[start+pos],h_dataOut[start+pos+1]); printf("Ref: %f %f\n",h_dataOutRef[startRef+posRef],h_dataOutRef[startRef+posRef+1]); return; } } } } printf("max_err=%e\n",max_err); } void transpose_xy(int nfftx, int nffty, int nbatch, float* h_dataOut) { float *tmp = new float[nfftx*nffty*2]; for (int k=0;k < nbatch;++k) { int start = k*nfftx*nffty*2; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx;++i) { int ii = i + j*nfftx; int jj = j + i*nfftx; tmp[2*jj] = h_dataOut[start+2*ii]; tmp[2*jj+1] = h_dataOut[start+2*ii+1]; } } memcpy(h_dataOut+start, tmp, nfftx*nffty*2*sizeof(float)); } delete [] tmp; } // // Test Facebook 1D FFT // void test_1dfbfft() { int nfft = 64; int nbatch = 2; float *h_dataIn = new float[nfft*nbatch]; float *h_dataOut = new float[(nfft/2+1)*2*nbatch]; float *h_dataOutRef = new float[nfft*2*nbatch]; float *d_dataIn = NULL; //allocate<float>(&d_dataIn, nfft*nbatch); allocate<float>(&d_dataIn, (nfft/2+1)*2*nbatch); float *d_dataOut = NULL; allocate<float>(&d_dataOut, (nfft/2+1)*2*nbatch); if (nbatch == 1) { load_vec<float>(1, "test_data/dataFFTin64.txt", nfft*nbatch, h_dataIn); load_vec<float>(2, "test_data/dataFFTout64.txt", nfft*nbatch, h_dataOutRef); } else if (nbatch == 2) { load_vec<float>(1, "test_data/dataFFTin64x2.txt", nfft*nbatch, h_dataIn); load_vec<float>(2, "test_data/dataFFTout64x2.txt", nfft*nbatch, h_dataOutRef); } else { std::cerr << "Only nbatch=1 or 2 are supported" << std::endl; exit(1); } hipfftHandle x_r2c_plan; cufftCheck(hipfftPlanMany(&x_r2c_plan, 1, &nfft, NULL, 0, 0, NULL, 0, 0, HIPFFT_R2C, nbatch)); cufftCheck(cufftSetCompatibilityMode(x_r2c_plan, CUFFT_COMPATIBILITY_NATIVE)); copy_HtoD_sync<float>(h_dataIn, d_dataIn, nfft*nbatch); cufftCheck(hipfftExecR2C(x_r2c_plan, (hipfftReal *)d_dataIn, (hipfftComplex *)d_dataOut)); cudaCheck(hipDeviceSynchronize()); copy_DtoH_sync<float>(d_dataOut, h_dataOut, (nfft/2+1)*2*nbatch); check_err1D(nfft, nbatch, h_dataOut, h_dataOutRef); cufftCheck(hipfftDestroy(x_r2c_plan)); clear_gpu_array_sync<float>(d_dataOut, (nfft/2+1)*2*nbatch); { using namespace facebook::cuda::fbfft; int dataInSize[2] = {nbatch, nfft}; int dataOutSize[3] = {nbatch, nfft/2+1, 2}; DeviceTensor<float, 2> dataInTensor(d_dataIn, dataInSize); //DeviceTensor<float, 3> dataOutTensor(d_dataOut, dataOutSize); DeviceTensor<float, 3> dataOutTensor(d_dataIn, dataOutSize); fbfftCheck(fbfft1D<1>(dataInTensor, dataOutTensor)); cudaCheck(hipDeviceSynchronize()); //copy_DtoH_sync<float>(d_dataOut, h_dataOut, (nfft/2+1)*2*nbatch); copy_DtoH_sync<float>(d_dataIn, h_dataOut, (nfft/2+1)*2*nbatch); check_err1D(nfft, nbatch, h_dataOut, h_dataOutRef); //for (int i=0;i < nfft/2+1;i++) { // printf("%f %f\n",h_dataOut[2*i],h_dataOut[2*i+1]); //} } deallocate<float>(&d_dataIn); deallocate<float>(&d_dataOut); delete [] h_dataIn; delete [] h_dataOut; delete [] h_dataOutRef; } // // Test Facebook 2D FFT // void test_2dfbfft() { int nfftx = 64; int nffty = 64; int nbatch = 1; float *h_dataIn = new float[nfftx*nffty*nbatch]; float *h_dataOut = new float[nfftx*(nffty/2+1)*2*nbatch]; float *h_dataOutRef = new float[nfftx*nffty*2*nbatch]; float *h_dataOutRefTransp = new float[nfftx*nffty*2*nbatch]; float *d_dataIn = NULL; allocate<float>(&d_dataIn, nfftx*nffty*nbatch); float *d_dataOut = NULL; allocate<float>(&d_dataOut, (nfftx/2+1)*2*nffty*nbatch); if (nbatch == 1) { if (nfftx == 64 && nffty == 64) { load_vec<float>(1, "test_data/dataFFTin64x64.txt", nfftx*nffty*nbatch, h_dataIn); load_vec<float>(2, "test_data/dataFFTout64x64.txt", nfftx*nffty*nbatch, h_dataOutRef); } else if (nfftx == 4 && nffty == 4) { load_vec<float>(1, "test_data/dataFFTin4x4.txt", nfftx*nffty*nbatch, h_dataIn); load_vec<float>(2, "test_data/dataFFTout4x4.txt", nfftx*nffty*nbatch, h_dataOutRef); } else { std::cerr << "FFT size not supported" << std::endl; exit(1); } } else { std::cerr << "Only nbatch=1 is supported" << std::endl; exit(1); } memcpy(h_dataOutRefTransp, h_dataOutRef, nfftx*nffty*2*nbatch*sizeof(float)); transpose_xy(nfftx, nffty, nbatch, h_dataOutRefTransp); int n[2] = {nffty, nfftx}; hipfftHandle xy_r2c_plan; cufftCheck(hipfftPlanMany(&xy_r2c_plan, 2, n, NULL, 0, 0, NULL, 0, 0, HIPFFT_R2C, nbatch)); cufftCheck(cufftSetCompatibilityMode(xy_r2c_plan, CUFFT_COMPATIBILITY_NATIVE)); copy_HtoD_sync<float>(h_dataIn, d_dataIn, nfftx*nffty*nbatch); cufftCheck(hipfftExecR2C(xy_r2c_plan, (hipfftReal *)d_dataIn, (hipfftComplex *)d_dataOut)); cudaCheck(hipDeviceSynchronize()); copy_DtoH_sync<float>(d_dataOut, h_dataOut, nfftx*(nffty/2+1)*2*nbatch); check_err2Dcufft(nfftx, nffty, nbatch, h_dataOut, h_dataOutRef); cufftCheck(hipfftDestroy(xy_r2c_plan)); clear_gpu_array_sync<float>(d_dataOut, (nfftx/2+1)*2*nffty*nbatch); /* printf("-------------------------------\n"); printf("h_dataOut (cufft)\n"); printf("-------------------------------\n"); int pos = 0; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx/2+1;i++,pos+=2) { printf("%d %d %f %f\n",i,j,h_dataOut[pos],h_dataOut[pos+1]); } } printf("-------------------------------\n"); printf("h_dataOutRef\n"); printf("-------------------------------\n"); pos = 0; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx;i++,pos+=2) { printf("%d %d %f %f\n",i,j,h_dataOutRef[pos],h_dataOutRef[pos+1]); } } printf("-------------------------------\n"); printf("h_dataOutRefTransp\n"); printf("-------------------------------\n"); pos = 0; for (int j=0;j < nffty/2+1;++j) { for (int i=0;i < nfftx;i++,pos+=2) { printf("%d %d %f %f\n",i,j,h_dataOutRefTransp[pos],h_dataOutRefTransp[pos+1]); } } */ /* int pos = 0; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx/2+1;i++,pos+=2) { if (j == 1) printf("%f %f\n",h_dataOut[pos],h_dataOut[pos+1]); } } */ { using namespace facebook::cuda::fbfft; int dataInSize[3] = {nbatch, nffty, nfftx}; int dataOutSize[4] = {nbatch, 0, 0, 2}; if (nfftx == 64 && nffty == 64) { dataOutSize[1] = nfftx; dataOutSize[2] = nffty/2+1; } else { dataOutSize[1] = nfftx/2+1; dataOutSize[2] = nffty; } DeviceTensor<float, 3> dataInTensor(d_dataIn, dataInSize); DeviceTensor<float, 4> dataOutTensor(d_dataOut, dataOutSize); fbfftCheck(fbfft2D<1>(dataInTensor, dataOutTensor)); //int dataInSize[3] = {nbatch, nffty, nfftx}; //int dataOutSize[3] = {nbatch, nffty, nfftx/2+1}; //DeviceTensor<Complex, 3> dataInTensor((Complex *)d_dataIn, dataInSize); //DeviceTensor<Complex, 3> dataOutTensor((Complex *)d_dataOut, dataOutSize); //fbfftCheck(fbfft2D<1>(dataInTensor, dataOutTensor)); cudaCheck(hipDeviceSynchronize()); copy_DtoH_sync<float>(d_dataOut, h_dataOut, (nfftx/2+1)*2*nffty*nbatch); if (nfftx == 64 && nffty == 64) { check_err2DfbfftBIG(nfftx, nffty, nbatch, h_dataOut, h_dataOutRefTransp); } else { check_err2DfbfftSMALL(nfftx, nffty, nbatch, h_dataOut, h_dataOutRefTransp); } /* if (nfftx == 64 && nffty == 64) { float *h_dataInCmplx = new float[nfftx*nffty*2*nbatch]; float *d_dataInCmplx = NULL; allocate<float>(&d_dataInCmplx, nfftx*nffty*2*nbatch); int dataInCmplxSize[3] = {nbatch, nffty, nfftx/2+1}; int dataOutCmplxSize[3] = {nbatch, nffty, nfftx/2+1}; DeviceTensor<Complex, 3> dataInCmplxTensor((Complex *)d_dataInCmplx, dataInCmplxSize); DeviceTensor<Complex, 3> dataOutCmplxTensor((Complex *)d_dataOut, dataOutCmplxSize); fbfftCheck(fbfft2D<1>(dataInCmplxTensor, dataOutCmplxTensor)); cudaCheck(hipDeviceSynchronize()); delete [] h_dataInCmplx; deallocate<float>(&d_dataInCmplx); } */ //for (int i=0;i < nfft/2+1;i++) { // printf("%f %f\n",h_dataOut[2*i],h_dataOut[2*i+1]); //} } /* printf("-------------------------------\n"); printf("h_dataOut (fbfft)\n"); printf("-------------------------------\n"); pos = 0; for (int j=0;j < nffty/2+1;++j) { for (int i=0;i < nfftx;i++,pos+=2) { printf("%d %d %f %f\n",i,j,h_dataOut[pos],h_dataOut[pos+1]); } } */ deallocate<float>(&d_dataIn); deallocate<float>(&d_dataOut); delete [] h_dataIn; delete [] h_dataOut; delete [] h_dataOutRef; delete [] h_dataOutRefTransp; }
df0d172274847a769ff1cf10f054fd7eff4a801a.cu
#include <iostream> #include <fstream> #include <vector> #include <cuda.h> #include <cufft.h> #include "cuda_utils.h" #include "gpu_utils.h" #include "fbfft/FBFFT.h" #include "fbfft/FBFFTCommon.cuh" #define fbfftCheck(stmt) do { \ facebook::cuda::fbfft::FBFFTParameters::ErrorCode err = stmt; \ if (err != facebook::cuda::fbfft::FBFFTParameters::Success) { \ printf("Error running %s in file %s, function %s\n", #stmt,__FILE__,__FUNCTION__); \ if (err == facebook::cuda::fbfft::FBFFTParameters::UnsupportedSize) \ printf("Error code: UnsupportedSize\n"); \ if (err == facebook::cuda::fbfft::FBFFTParameters::UnsupportedDimension) \ printf("Error code: UnsupportedDimension\n"); \ exit(1); \ } \ } while(0) // // Loads vector from file // template <typename T> void load_vec(const int nind, const char *filename, const int n, T *ind) { std::ifstream file(filename); if (file.is_open()) { for (int i=0;i < n;i++) { for (int k=0;k < nind;k++) { if (!(file >> ind[i*nind+k])) { std::cerr<<"Error reading file "<<filename<<std::endl; exit(1); } } } } else { std::cerr<<"Error opening file "<<filename<<std::endl; exit(1); } } void test_1dfbfft(); void test_2dfbfft(); int main(int argc, char *argv[]) { int numnode = 1; int mynode = 0; std::vector<int> devices; start_gpu(numnode, mynode, devices); test_1dfbfft(); test_2dfbfft(); stop_gpu(); return 0; } void check_err1D(int nfft, int nbatch, float* h_dataOut, float* h_dataOutRef) { double max_err = 0.0; for (int j=0;j < nbatch;++j) { int start = j*(nfft/2+1)*2; int startRef = j*nfft*2; for (int i=0;i < nfft/2+1;++i) { double err1 = fabs(h_dataOut[start+2*i]-h_dataOutRef[startRef+2*i]); double err2 = fabs(h_dataOut[start+2*i+1]-h_dataOutRef[startRef+2*i+1]); max_err = max(max_err, err1); max_err = max(max_err, err2); if (max_err > 1.0e-5) { printf("Maximum error exceeded batch=%d i=%d\n",j,i); printf("Res: %f %f\n",h_dataOut[start+2*i],h_dataOut[start+2*i+1]); printf("Ref: %f %f\n",h_dataOutRef[startRef+2*i],h_dataOutRef[startRef+2*i+1]); break; } } } printf("max_err=%e\n",max_err); } void check_err2DfbfftSMALL(int nfftx, int nffty, int nbatch, float* h_dataOut, float* h_dataOutRef) { double max_err = 0.0; int stride = nfftx*2; int strideRef = nfftx*2; for (int k=0;k < nbatch;++k) { int start = k*stride*nffty; int startRef = k*strideRef*nffty; for (int j=0;j < nffty/2+1;++j) { for (int i=0;i < nfftx;++i) { int pos = i*2 + j*stride; int posRef = i*2 + j*strideRef; double err1 = fabs(h_dataOut[start+pos]-h_dataOutRef[startRef+posRef]); double err2 = fabs(h_dataOut[start+pos+1]-h_dataOutRef[startRef+posRef+1]); max_err = max(max_err, err1); max_err = max(max_err, err2); if (max_err > 1.0e-4) { printf("Maximum error exceeded batch=%d i=%d j=%d | %d %d | %d %d\n",k,i,j,start,pos,startRef,posRef); printf("Res: %f %f\n",h_dataOut[start+pos],h_dataOut[start+pos+1]); printf("Ref: %f %f\n",h_dataOutRef[startRef+posRef],h_dataOutRef[startRef+posRef+1]); return; } } } } printf("max_err=%e\n",max_err); } void check_err2DfbfftBIG(int nfftx, int nffty, int nbatch, float* h_dataOut, float* h_dataOutRef) { double max_err = 0.0; int stride = nfftx*2; int strideRef = nfftx*2; for (int k=0;k < nbatch;++k) { int start = k*stride*nffty; int startRef = k*strideRef*nffty; for (int j=0;j < nffty/2+1;++j) { for (int i=0;i < nfftx;++i) { int pos = i*2 + j*stride; int posRef = i*2 + j*strideRef; double err1 = fabs(h_dataOut[start+pos]-h_dataOutRef[startRef+posRef]); double err2 = fabs(h_dataOut[start+pos+1]-h_dataOutRef[startRef+posRef+1]); max_err = max(max_err, err1); max_err = max(max_err, err2); if (max_err > 1.0e-4) { printf("Maximum error exceeded batch=%d i=%d j=%d | %d %d | %d %d\n",k,i,j,start,pos,startRef,posRef); printf("Res: %f %f\n",h_dataOut[start+pos],h_dataOut[start+pos+1]); printf("Ref: %f %f\n",h_dataOutRef[startRef+posRef],h_dataOutRef[startRef+posRef+1]); return; } } } } printf("max_err=%e\n",max_err); } void check_err2Dcufft(int nfftx, int nffty, int nbatch, float* h_dataOut, float* h_dataOutRef) { double max_err = 0.0; int stride = (nfftx/2+1)*2; int strideRef = nfftx*2; for (int k=0;k < nbatch;++k) { int start = k*stride*nffty; int startRef = k*strideRef*nffty; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx/2+1;++i) { int pos = i*2 + j*stride; int posRef = i*2 + j*strideRef; double err1 = fabs(h_dataOut[start+pos]-h_dataOutRef[startRef+posRef]); double err2 = fabs(h_dataOut[start+pos+1]-h_dataOutRef[startRef+posRef+1]); max_err = max(max_err, err1); max_err = max(max_err, err2); if (max_err > 1.0e-4) { printf("Maximum error exceeded batch=%d i=%d j=%d | %d %d | %d %d\n",k,i,j,start,pos,startRef,posRef); printf("Res: %f %f\n",h_dataOut[start+pos],h_dataOut[start+pos+1]); printf("Ref: %f %f\n",h_dataOutRef[startRef+posRef],h_dataOutRef[startRef+posRef+1]); return; } } } } printf("max_err=%e\n",max_err); } void transpose_xy(int nfftx, int nffty, int nbatch, float* h_dataOut) { float *tmp = new float[nfftx*nffty*2]; for (int k=0;k < nbatch;++k) { int start = k*nfftx*nffty*2; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx;++i) { int ii = i + j*nfftx; int jj = j + i*nfftx; tmp[2*jj] = h_dataOut[start+2*ii]; tmp[2*jj+1] = h_dataOut[start+2*ii+1]; } } memcpy(h_dataOut+start, tmp, nfftx*nffty*2*sizeof(float)); } delete [] tmp; } // // Test Facebook 1D FFT // void test_1dfbfft() { int nfft = 64; int nbatch = 2; float *h_dataIn = new float[nfft*nbatch]; float *h_dataOut = new float[(nfft/2+1)*2*nbatch]; float *h_dataOutRef = new float[nfft*2*nbatch]; float *d_dataIn = NULL; //allocate<float>(&d_dataIn, nfft*nbatch); allocate<float>(&d_dataIn, (nfft/2+1)*2*nbatch); float *d_dataOut = NULL; allocate<float>(&d_dataOut, (nfft/2+1)*2*nbatch); if (nbatch == 1) { load_vec<float>(1, "test_data/dataFFTin64.txt", nfft*nbatch, h_dataIn); load_vec<float>(2, "test_data/dataFFTout64.txt", nfft*nbatch, h_dataOutRef); } else if (nbatch == 2) { load_vec<float>(1, "test_data/dataFFTin64x2.txt", nfft*nbatch, h_dataIn); load_vec<float>(2, "test_data/dataFFTout64x2.txt", nfft*nbatch, h_dataOutRef); } else { std::cerr << "Only nbatch=1 or 2 are supported" << std::endl; exit(1); } cufftHandle x_r2c_plan; cufftCheck(cufftPlanMany(&x_r2c_plan, 1, &nfft, NULL, 0, 0, NULL, 0, 0, CUFFT_R2C, nbatch)); cufftCheck(cufftSetCompatibilityMode(x_r2c_plan, CUFFT_COMPATIBILITY_NATIVE)); copy_HtoD_sync<float>(h_dataIn, d_dataIn, nfft*nbatch); cufftCheck(cufftExecR2C(x_r2c_plan, (cufftReal *)d_dataIn, (cufftComplex *)d_dataOut)); cudaCheck(cudaDeviceSynchronize()); copy_DtoH_sync<float>(d_dataOut, h_dataOut, (nfft/2+1)*2*nbatch); check_err1D(nfft, nbatch, h_dataOut, h_dataOutRef); cufftCheck(cufftDestroy(x_r2c_plan)); clear_gpu_array_sync<float>(d_dataOut, (nfft/2+1)*2*nbatch); { using namespace facebook::cuda::fbfft; int dataInSize[2] = {nbatch, nfft}; int dataOutSize[3] = {nbatch, nfft/2+1, 2}; DeviceTensor<float, 2> dataInTensor(d_dataIn, dataInSize); //DeviceTensor<float, 3> dataOutTensor(d_dataOut, dataOutSize); DeviceTensor<float, 3> dataOutTensor(d_dataIn, dataOutSize); fbfftCheck(fbfft1D<1>(dataInTensor, dataOutTensor)); cudaCheck(cudaDeviceSynchronize()); //copy_DtoH_sync<float>(d_dataOut, h_dataOut, (nfft/2+1)*2*nbatch); copy_DtoH_sync<float>(d_dataIn, h_dataOut, (nfft/2+1)*2*nbatch); check_err1D(nfft, nbatch, h_dataOut, h_dataOutRef); //for (int i=0;i < nfft/2+1;i++) { // printf("%f %f\n",h_dataOut[2*i],h_dataOut[2*i+1]); //} } deallocate<float>(&d_dataIn); deallocate<float>(&d_dataOut); delete [] h_dataIn; delete [] h_dataOut; delete [] h_dataOutRef; } // // Test Facebook 2D FFT // void test_2dfbfft() { int nfftx = 64; int nffty = 64; int nbatch = 1; float *h_dataIn = new float[nfftx*nffty*nbatch]; float *h_dataOut = new float[nfftx*(nffty/2+1)*2*nbatch]; float *h_dataOutRef = new float[nfftx*nffty*2*nbatch]; float *h_dataOutRefTransp = new float[nfftx*nffty*2*nbatch]; float *d_dataIn = NULL; allocate<float>(&d_dataIn, nfftx*nffty*nbatch); float *d_dataOut = NULL; allocate<float>(&d_dataOut, (nfftx/2+1)*2*nffty*nbatch); if (nbatch == 1) { if (nfftx == 64 && nffty == 64) { load_vec<float>(1, "test_data/dataFFTin64x64.txt", nfftx*nffty*nbatch, h_dataIn); load_vec<float>(2, "test_data/dataFFTout64x64.txt", nfftx*nffty*nbatch, h_dataOutRef); } else if (nfftx == 4 && nffty == 4) { load_vec<float>(1, "test_data/dataFFTin4x4.txt", nfftx*nffty*nbatch, h_dataIn); load_vec<float>(2, "test_data/dataFFTout4x4.txt", nfftx*nffty*nbatch, h_dataOutRef); } else { std::cerr << "FFT size not supported" << std::endl; exit(1); } } else { std::cerr << "Only nbatch=1 is supported" << std::endl; exit(1); } memcpy(h_dataOutRefTransp, h_dataOutRef, nfftx*nffty*2*nbatch*sizeof(float)); transpose_xy(nfftx, nffty, nbatch, h_dataOutRefTransp); int n[2] = {nffty, nfftx}; cufftHandle xy_r2c_plan; cufftCheck(cufftPlanMany(&xy_r2c_plan, 2, n, NULL, 0, 0, NULL, 0, 0, CUFFT_R2C, nbatch)); cufftCheck(cufftSetCompatibilityMode(xy_r2c_plan, CUFFT_COMPATIBILITY_NATIVE)); copy_HtoD_sync<float>(h_dataIn, d_dataIn, nfftx*nffty*nbatch); cufftCheck(cufftExecR2C(xy_r2c_plan, (cufftReal *)d_dataIn, (cufftComplex *)d_dataOut)); cudaCheck(cudaDeviceSynchronize()); copy_DtoH_sync<float>(d_dataOut, h_dataOut, nfftx*(nffty/2+1)*2*nbatch); check_err2Dcufft(nfftx, nffty, nbatch, h_dataOut, h_dataOutRef); cufftCheck(cufftDestroy(xy_r2c_plan)); clear_gpu_array_sync<float>(d_dataOut, (nfftx/2+1)*2*nffty*nbatch); /* printf("-------------------------------\n"); printf("h_dataOut (cufft)\n"); printf("-------------------------------\n"); int pos = 0; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx/2+1;i++,pos+=2) { printf("%d %d %f %f\n",i,j,h_dataOut[pos],h_dataOut[pos+1]); } } printf("-------------------------------\n"); printf("h_dataOutRef\n"); printf("-------------------------------\n"); pos = 0; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx;i++,pos+=2) { printf("%d %d %f %f\n",i,j,h_dataOutRef[pos],h_dataOutRef[pos+1]); } } printf("-------------------------------\n"); printf("h_dataOutRefTransp\n"); printf("-------------------------------\n"); pos = 0; for (int j=0;j < nffty/2+1;++j) { for (int i=0;i < nfftx;i++,pos+=2) { printf("%d %d %f %f\n",i,j,h_dataOutRefTransp[pos],h_dataOutRefTransp[pos+1]); } } */ /* int pos = 0; for (int j=0;j < nffty;++j) { for (int i=0;i < nfftx/2+1;i++,pos+=2) { if (j == 1) printf("%f %f\n",h_dataOut[pos],h_dataOut[pos+1]); } } */ { using namespace facebook::cuda::fbfft; int dataInSize[3] = {nbatch, nffty, nfftx}; int dataOutSize[4] = {nbatch, 0, 0, 2}; if (nfftx == 64 && nffty == 64) { dataOutSize[1] = nfftx; dataOutSize[2] = nffty/2+1; } else { dataOutSize[1] = nfftx/2+1; dataOutSize[2] = nffty; } DeviceTensor<float, 3> dataInTensor(d_dataIn, dataInSize); DeviceTensor<float, 4> dataOutTensor(d_dataOut, dataOutSize); fbfftCheck(fbfft2D<1>(dataInTensor, dataOutTensor)); //int dataInSize[3] = {nbatch, nffty, nfftx}; //int dataOutSize[3] = {nbatch, nffty, nfftx/2+1}; //DeviceTensor<Complex, 3> dataInTensor((Complex *)d_dataIn, dataInSize); //DeviceTensor<Complex, 3> dataOutTensor((Complex *)d_dataOut, dataOutSize); //fbfftCheck(fbfft2D<1>(dataInTensor, dataOutTensor)); cudaCheck(cudaDeviceSynchronize()); copy_DtoH_sync<float>(d_dataOut, h_dataOut, (nfftx/2+1)*2*nffty*nbatch); if (nfftx == 64 && nffty == 64) { check_err2DfbfftBIG(nfftx, nffty, nbatch, h_dataOut, h_dataOutRefTransp); } else { check_err2DfbfftSMALL(nfftx, nffty, nbatch, h_dataOut, h_dataOutRefTransp); } /* if (nfftx == 64 && nffty == 64) { float *h_dataInCmplx = new float[nfftx*nffty*2*nbatch]; float *d_dataInCmplx = NULL; allocate<float>(&d_dataInCmplx, nfftx*nffty*2*nbatch); int dataInCmplxSize[3] = {nbatch, nffty, nfftx/2+1}; int dataOutCmplxSize[3] = {nbatch, nffty, nfftx/2+1}; DeviceTensor<Complex, 3> dataInCmplxTensor((Complex *)d_dataInCmplx, dataInCmplxSize); DeviceTensor<Complex, 3> dataOutCmplxTensor((Complex *)d_dataOut, dataOutCmplxSize); fbfftCheck(fbfft2D<1>(dataInCmplxTensor, dataOutCmplxTensor)); cudaCheck(cudaDeviceSynchronize()); delete [] h_dataInCmplx; deallocate<float>(&d_dataInCmplx); } */ //for (int i=0;i < nfft/2+1;i++) { // printf("%f %f\n",h_dataOut[2*i],h_dataOut[2*i+1]); //} } /* printf("-------------------------------\n"); printf("h_dataOut (fbfft)\n"); printf("-------------------------------\n"); pos = 0; for (int j=0;j < nffty/2+1;++j) { for (int i=0;i < nfftx;i++,pos+=2) { printf("%d %d %f %f\n",i,j,h_dataOut[pos],h_dataOut[pos+1]); } } */ deallocate<float>(&d_dataIn); deallocate<float>(&d_dataOut); delete [] h_dataIn; delete [] h_dataOut; delete [] h_dataOutRef; delete [] h_dataOutRefTransp; }
de943be813c4d43ac02be982b533f54de6d33dc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Part3: implement the kernel __global__ void reverseArrayBlock(int *d_out, int *d_in) { int inOffset = blockDim.x * blockIdx.x; int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int in = inOffset + threadIdx.x; int out = outOffset + (blockDim.x - 1 - threadIdx.x); d_out[out] = d_in[in]; } ///////////////////////////////////////////////////////////////////// // Program main ///////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 256 * 1024; // 256K elements (1MB total) // pointer for device memory int *d_b, *d_a; // define grid and block size int numThreadsPerBlock = 256; // Part 1: compute number of blocks needed based on // array size and desired block size int numBlocks = dimA / numThreadsPerBlock; // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); hipMalloc( (void **) &d_a, memSize ); hipMalloc( (void **) &d_b, memSize ); // Initialize input array on host for (int i = 0; i < dimA; ++i) { h_a[i] = i; } // Copy host array to device array hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); hipLaunchKernelGGL(( reverseArrayBlock), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_b, d_a ); // block until the device has completed hipDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); // device to host copy hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == dimA - 1 - i ); } // free device memory hipFree(d_a); hipFree(d_b); // free host memory free(h_a); // If the program makes it this far, then the results are // correct and there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
de943be813c4d43ac02be982b533f54de6d33dc2.cu
// includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Part3: implement the kernel __global__ void reverseArrayBlock(int *d_out, int *d_in) { int inOffset = blockDim.x * blockIdx.x; int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int in = inOffset + threadIdx.x; int out = outOffset + (blockDim.x - 1 - threadIdx.x); d_out[out] = d_in[in]; } ///////////////////////////////////////////////////////////////////// // Program main ///////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 256 * 1024; // 256K elements (1MB total) // pointer for device memory int *d_b, *d_a; // define grid and block size int numThreadsPerBlock = 256; // Part 1: compute number of blocks needed based on // array size and desired block size int numBlocks = dimA / numThreadsPerBlock; // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); cudaMalloc( (void **) &d_a, memSize ); cudaMalloc( (void **) &d_b, memSize ); // Initialize input array on host for (int i = 0; i < dimA; ++i) { h_a[i] = i; } // Copy host array to device array cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); reverseArrayBlock<<< dimGrid, dimBlock >>>( d_b, d_a ); // block until the device has completed cudaThreadSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); // device to host copy cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == dimA - 1 - i ); } // free device memory cudaFree(d_a); cudaFree(d_b); // free host memory free(h_a); // If the program makes it this far, then the results are // correct and there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
ccd9cd1c69a5c5302cbd1651f9e044d8725996eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include "support.h" #include "kernel.hip" #define HILOS 16 int main(int argc, char**argv) { Timer timer; hipError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); unsigned int n; if(argc == 1) { n = 10000; } else if(argc == 2) { n = atoi(argv[1]); } else { printf("\n Invalid input parameters!" "\n Usage: ./vecadd # Vector of size 10,000 is used" "\n Usage: ./vecadd <m> # Vector of size m is used" "\n"); exit(0); } int ld = n; float *A_h = (float*) malloc( sizeof(float)*n*n ); for (unsigned int i=0; i < n; i++) { for (unsigned int j=0; j < n; j++) { A_h[i*ld + j] = (rand()%100)/100.00; } } float *B_h = (float*) malloc( sizeof(float)*n*n ); for (unsigned int i=0; i < n; i++) { for (unsigned int j=0; j < n; j++) { B_h[i*ld + j] = (rand()%100)/100.00; } } float *C_h = (float*) malloc( sizeof(float)*n*n ); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" Matrix size = %u * %u\n", n,n); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE float *A_d, *B_d, *C_d; hipMalloc( (void **) &A_d, sizeof(float)*n*n); hipMalloc( (void **) &B_d, sizeof(float)*n*n); hipMalloc( (void **) &C_d, sizeof(float)*n*n); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE hipMemcpy(A_d, A_h, sizeof(float)*n*n, hipMemcpyHostToDevice); hipMemcpy(B_d, B_h, sizeof(float)*n*n, hipMemcpyHostToDevice); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel ---------------------------------------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE dim3 num_threads(HILOS, HILOS); dim3 num_blocks((int)((n + HILOS - 1) / num_threads.x),(int)((n + HILOS - 1) / num_threads.y)); hipLaunchKernelGGL(( vecAddKernel), dim3(num_blocks), dim3(num_threads), 0, 0, A_d, B_d, C_d, n); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE hipMemcpy(C_h, C_d, sizeof(float)*n*n, hipMemcpyDeviceToHost); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, n); // Free memory ------------------------------------------------------------ free(A_h); free(B_h); free(C_h); //INSERT CODE HERE hipFree(A_d); hipFree(B_d); hipFree(C_d); return 0; }
ccd9cd1c69a5c5302cbd1651f9e044d8725996eb.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include "support.h" #include "kernel.cu" #define HILOS 16 int main(int argc, char**argv) { Timer timer; cudaError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); unsigned int n; if(argc == 1) { n = 10000; } else if(argc == 2) { n = atoi(argv[1]); } else { printf("\n Invalid input parameters!" "\n Usage: ./vecadd # Vector of size 10,000 is used" "\n Usage: ./vecadd <m> # Vector of size m is used" "\n"); exit(0); } int ld = n; float *A_h = (float*) malloc( sizeof(float)*n*n ); for (unsigned int i=0; i < n; i++) { for (unsigned int j=0; j < n; j++) { A_h[i*ld + j] = (rand()%100)/100.00; } } float *B_h = (float*) malloc( sizeof(float)*n*n ); for (unsigned int i=0; i < n; i++) { for (unsigned int j=0; j < n; j++) { B_h[i*ld + j] = (rand()%100)/100.00; } } float *C_h = (float*) malloc( sizeof(float)*n*n ); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" Matrix size = %u * %u\n", n,n); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE float *A_d, *B_d, *C_d; cudaMalloc( (void **) &A_d, sizeof(float)*n*n); cudaMalloc( (void **) &B_d, sizeof(float)*n*n); cudaMalloc( (void **) &C_d, sizeof(float)*n*n); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cudaMemcpy(A_d, A_h, sizeof(float)*n*n, cudaMemcpyHostToDevice); cudaMemcpy(B_d, B_h, sizeof(float)*n*n, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel ---------------------------------------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE dim3 num_threads(HILOS, HILOS); dim3 num_blocks((int)((n + HILOS - 1) / num_threads.x),(int)((n + HILOS - 1) / num_threads.y)); vecAddKernel<<<num_blocks, num_threads>>>(A_d, B_d, C_d, n); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cudaMemcpy(C_h, C_d, sizeof(float)*n*n, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, n); // Free memory ------------------------------------------------------------ free(A_h); free(B_h); free(C_h); //INSERT CODE HERE cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); return 0; }
8a20e5e8ac0c5c96f1904f9101c30c7f82760983.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions normal z -> c d s */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif // every multiprocessor handles one BCSR-block to copy from A __global__ void zbcsrvalcpy_kernel( int size_b, magma_int_t num_blocks, magmaDoubleComplex_ptr *Aval, magmaDoubleComplex_ptr *Bval ) { if(blockIdx.x*65535+blockIdx.y < num_blocks){ magmaDoubleComplex *dA = Aval[ blockIdx.x*65535+blockIdx.y ]; magmaDoubleComplex *dB = Bval[ blockIdx.x*65535+blockIdx.y ]; int i = threadIdx.x; while( i<size_b*size_b ){ dB[i] = dA[i]; i+=BLOCK_SIZE; } } } // every multiprocessor handles one BCSR-block to initialize with 0 __global__ void zbcsrvalzro_kernel( int size_b, magma_int_t num_blocks, magmaDoubleComplex_ptr *Bval ) { if(blockIdx.x*65535+blockIdx.y < num_blocks){ magmaDoubleComplex *dB = Bval[ blockIdx.x*65535+blockIdx.y ]; int i = threadIdx.x; //dB += i; while( i<size_b*size_b ){ dB[i] = MAGMA_Z_MAKE(0.0, 0.0); i+=BLOCK_SIZE; } } } /** Purpose ------- For a Block-CSR ILU factorization, this routine copies the filled blocks from the original matrix A and initializes the blocks that will later be filled in the factorization process with zeros. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] num_blocks magma_int_t number of nonzero blocks @param[in] num_zblocks magma_int_t number of zero-blocks (will later be filled) @param[in] Aval magmaDoubleComplex_ptr * pointers to the nonzero blocks in A @param[in] Bval magmaDoubleComplex_ptr * pointers to the nonzero blocks in B @param[in] Bval2 magmaDoubleComplex_ptr * pointers to the zero blocks in B @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrvalcpy( magma_int_t size_b, magma_int_t num_blocks, magma_int_t num_zblocks, magmaDoubleComplex_ptr *Aval, magmaDoubleComplex_ptr *Bval, magmaDoubleComplex_ptr *Bval2, magma_queue_t queue ) { dim3 dimBlock( BLOCK_SIZE, 1, 1 ); // the grids are adapted to the number of nonzero/zero blocks // the upper block-number the kernels can handle is 65535*65535 int dimgrid1 = 65535; int dimgrid2 = (num_blocks+65535-1)/65535; int dimgrid3 = (num_zblocks+65535-1)/65535; dim3 dimGrid( dimgrid2, dimgrid1, 1 ); hipLaunchKernelGGL(( zbcsrvalcpy_kernel), dim3(dimGrid),dim3(dimBlock), 0, queue , size_b, num_blocks, Aval, Bval ); dim3 dimGrid2( dimgrid3, dimgrid1, 1 ); hipLaunchKernelGGL(( zbcsrvalzro_kernel), dim3(dimGrid2),dim3(dimBlock), 0, queue , size_b, num_zblocks, Bval2 ); return MAGMA_SUCCESS; }
8a20e5e8ac0c5c96f1904f9101c30c7f82760983.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions normal z -> c d s */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif // every multiprocessor handles one BCSR-block to copy from A __global__ void zbcsrvalcpy_kernel( int size_b, magma_int_t num_blocks, magmaDoubleComplex_ptr *Aval, magmaDoubleComplex_ptr *Bval ) { if(blockIdx.x*65535+blockIdx.y < num_blocks){ magmaDoubleComplex *dA = Aval[ blockIdx.x*65535+blockIdx.y ]; magmaDoubleComplex *dB = Bval[ blockIdx.x*65535+blockIdx.y ]; int i = threadIdx.x; while( i<size_b*size_b ){ dB[i] = dA[i]; i+=BLOCK_SIZE; } } } // every multiprocessor handles one BCSR-block to initialize with 0 __global__ void zbcsrvalzro_kernel( int size_b, magma_int_t num_blocks, magmaDoubleComplex_ptr *Bval ) { if(blockIdx.x*65535+blockIdx.y < num_blocks){ magmaDoubleComplex *dB = Bval[ blockIdx.x*65535+blockIdx.y ]; int i = threadIdx.x; //dB += i; while( i<size_b*size_b ){ dB[i] = MAGMA_Z_MAKE(0.0, 0.0); i+=BLOCK_SIZE; } } } /** Purpose ------- For a Block-CSR ILU factorization, this routine copies the filled blocks from the original matrix A and initializes the blocks that will later be filled in the factorization process with zeros. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] num_blocks magma_int_t number of nonzero blocks @param[in] num_zblocks magma_int_t number of zero-blocks (will later be filled) @param[in] Aval magmaDoubleComplex_ptr * pointers to the nonzero blocks in A @param[in] Bval magmaDoubleComplex_ptr * pointers to the nonzero blocks in B @param[in] Bval2 magmaDoubleComplex_ptr * pointers to the zero blocks in B @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrvalcpy( magma_int_t size_b, magma_int_t num_blocks, magma_int_t num_zblocks, magmaDoubleComplex_ptr *Aval, magmaDoubleComplex_ptr *Bval, magmaDoubleComplex_ptr *Bval2, magma_queue_t queue ) { dim3 dimBlock( BLOCK_SIZE, 1, 1 ); // the grids are adapted to the number of nonzero/zero blocks // the upper block-number the kernels can handle is 65535*65535 int dimgrid1 = 65535; int dimgrid2 = (num_blocks+65535-1)/65535; int dimgrid3 = (num_zblocks+65535-1)/65535; dim3 dimGrid( dimgrid2, dimgrid1, 1 ); zbcsrvalcpy_kernel<<<dimGrid,dimBlock, 0, queue >>> ( size_b, num_blocks, Aval, Bval ); dim3 dimGrid2( dimgrid3, dimgrid1, 1 ); zbcsrvalzro_kernel<<<dimGrid2,dimBlock, 0, queue >>> ( size_b, num_zblocks, Bval2 ); return MAGMA_SUCCESS; }
44ed4fd01d2c344a538c6b5d59fa0cf24dc5f22c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "../common/common.h" #include "../common/cuda_common.cuh" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void reduction_unrolling_block2(int *input, int *temp, int size) { int tid = threadIdx.x; int BLOCK_OFFSET = blockIdx.x * blockDim.x * 2; int index = BLOCK_OFFSET + tid; int *i_data = input + BLOCK_OFFSET; if ((index + blockDim.x) < size) { input[index] += input[index + blockDim.x]; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2) { if (tid < offset) { i_data[tid] += i_data[tid + offset]; } __syncthreads(); } if (tid == 0) { temp[blockIdx.x] = i_data[0]; } } __global__ void reduction_unrolling_block4(int *input, int *temp, int size) { int tid = threadIdx.x; int BLOCK_OFFSET = blockIdx.x * blockDim.x * 4; int index = BLOCK_OFFSET + tid; int *i_data = input + BLOCK_OFFSET; if ((index + 3 * blockDim.x) < size) { int a1 = input[index]; int a2 = input[index + blockDim.x]; int a3 = input[index + 2 * blockDim.x]; int a4 = input[index + 3 * blockDim.x]; input[index] += a1 + a2 + a3 + a4; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2) { if (tid < offset) { i_data[tid] += i_data[tid + offset]; } __syncthreads(); } if (tid == 0) { temp[blockIdx.x] = i_data[0]; } } int main(int argc, char **argv) { printf("Running neighbored pairs reduction kernel \n"); int size = 1 << 27; // 128Mb data int byte_size = size * sizeof(int); int block_size = 128; int *h_input, *h_ref; h_input = (int *)malloc(byte_size); initialize(h_input, size, INIT_RANDOM); // get the reduction result from cpu int cpu_result = reduction_cpu(h_input, size); dim3 block(block_size); dim3 grid((size + block.x - 1) / block.x / 4); printf("Kernel launch parameters | grid.x: %d, block.x: %d \n", grid.x, block.x); int temp_array_byte_size = sizeof(int) * grid.x; h_ref = (int *)malloc(temp_array_byte_size); int *d_input, *d_temp; gpuErrchk(hipMalloc((void **)&d_input, byte_size)); gpuErrchk(hipMalloc((void **)&d_temp, byte_size)); gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size)); gpuErrchk(hipMemcpy(d_input, h_input, byte_size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduction_unrolling_block4), dim3(grid), dim3(block), 0, 0, d_input, d_temp, size); gpuErrchk(hipDeviceSynchronize()); hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost); int gpu_result = 0; for (int i = 0; i < grid.x; i++) { gpu_result += h_ref[i]; } // validity check compare_results(gpu_result, cpu_result); gpuErrchk(hipFree(d_temp)); gpuErrchk(hipFree(d_input)); free(h_ref); free(h_input); gpuErrchk(hipDeviceReset()); return 0; return 0; }
44ed4fd01d2c344a538c6b5d59fa0cf24dc5f22c.cu
#include <stdio.h> #include <stdlib.h> #include "../common/common.h" #include "../common/cuda_common.cuh" #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void reduction_unrolling_block2(int *input, int *temp, int size) { int tid = threadIdx.x; int BLOCK_OFFSET = blockIdx.x * blockDim.x * 2; int index = BLOCK_OFFSET + tid; int *i_data = input + BLOCK_OFFSET; if ((index + blockDim.x) < size) { input[index] += input[index + blockDim.x]; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2) { if (tid < offset) { i_data[tid] += i_data[tid + offset]; } __syncthreads(); } if (tid == 0) { temp[blockIdx.x] = i_data[0]; } } __global__ void reduction_unrolling_block4(int *input, int *temp, int size) { int tid = threadIdx.x; int BLOCK_OFFSET = blockIdx.x * blockDim.x * 4; int index = BLOCK_OFFSET + tid; int *i_data = input + BLOCK_OFFSET; if ((index + 3 * blockDim.x) < size) { int a1 = input[index]; int a2 = input[index + blockDim.x]; int a3 = input[index + 2 * blockDim.x]; int a4 = input[index + 3 * blockDim.x]; input[index] += a1 + a2 + a3 + a4; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2) { if (tid < offset) { i_data[tid] += i_data[tid + offset]; } __syncthreads(); } if (tid == 0) { temp[blockIdx.x] = i_data[0]; } } int main(int argc, char **argv) { printf("Running neighbored pairs reduction kernel \n"); int size = 1 << 27; // 128Mb data int byte_size = size * sizeof(int); int block_size = 128; int *h_input, *h_ref; h_input = (int *)malloc(byte_size); initialize(h_input, size, INIT_RANDOM); // get the reduction result from cpu int cpu_result = reduction_cpu(h_input, size); dim3 block(block_size); dim3 grid((size + block.x - 1) / block.x / 4); printf("Kernel launch parameters | grid.x: %d, block.x: %d \n", grid.x, block.x); int temp_array_byte_size = sizeof(int) * grid.x; h_ref = (int *)malloc(temp_array_byte_size); int *d_input, *d_temp; gpuErrchk(cudaMalloc((void **)&d_input, byte_size)); gpuErrchk(cudaMalloc((void **)&d_temp, byte_size)); gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size)); gpuErrchk(cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice)); reduction_unrolling_block4<<<grid, block>>>(d_input, d_temp, size); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost); int gpu_result = 0; for (int i = 0; i < grid.x; i++) { gpu_result += h_ref[i]; } // validity check compare_results(gpu_result, cpu_result); gpuErrchk(cudaFree(d_temp)); gpuErrchk(cudaFree(d_input)); free(h_ref); free(h_input); gpuErrchk(cudaDeviceReset()); return 0; return 0; }
cc98220572e2c59ee832d5f405c946d339082ebf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define N 4 #define threads_per_block 4 __global__ void simpleKernel(float *out, float *in) { int index; index = blockIdx.x*blockDim.x+threadIdx.x; if(index<N) { out[index]=in[index]*in[index]*in[index]; } } extern "C" void GPU_STUFF(int device) { hipSetDevice(device); printf("Device number %d \n",device); float *s_host, *r_host; int pad, i; float *r_device; float *s_device; size_t size; pad = threads_per_block - (N % threads_per_block); size = (N+pad)*sizeof(float); s_host = (float *)malloc(size); r_host = (float *)malloc(size); hipMalloc(&s_device, size); hipMalloc(&r_device, size); dim3 threads(threads_per_block); dim3 grid( (N+pad)/threads_per_block ); for (i=0;i<N;i++) { r_host[i]=i; } hipMemcpy(r_device, r_host, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( simpleKernel) , dim3(grid), dim3(threads) , 0, 0, s_device,r_device); hipMemcpy(s_host, s_device, size, hipMemcpyDeviceToHost); for (i=0;i<N;i++) { printf("%3f %3f\n",r_host[i],s_host[i]); } free(s_host); free(r_host); hipFree(r_device); hipFree(s_device); }
cc98220572e2c59ee832d5f405c946d339082ebf.cu
#include <stdio.h> #include <stdlib.h> #define N 4 #define threads_per_block 4 __global__ void simpleKernel(float *out, float *in) { int index; index = blockIdx.x*blockDim.x+threadIdx.x; if(index<N) { out[index]=in[index]*in[index]*in[index]; } } extern "C" void GPU_STUFF(int device) { cudaSetDevice(device); printf("Device number %d \n",device); float *s_host, *r_host; int pad, i; float *r_device; float *s_device; size_t size; pad = threads_per_block - (N % threads_per_block); size = (N+pad)*sizeof(float); s_host = (float *)malloc(size); r_host = (float *)malloc(size); cudaMalloc(&s_device, size); cudaMalloc(&r_device, size); dim3 threads(threads_per_block); dim3 grid( (N+pad)/threads_per_block ); for (i=0;i<N;i++) { r_host[i]=i; } cudaMemcpy(r_device, r_host, size, cudaMemcpyHostToDevice); simpleKernel <<< grid, threads >>> (s_device,r_device); cudaMemcpy(s_host, s_device, size, cudaMemcpyDeviceToHost); for (i=0;i<N;i++) { printf("%3f %3f\n",r_host[i],s_host[i]); } free(s_host); free(r_host); cudaFree(r_device); cudaFree(s_device); }