hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
9698dc0bdc84e3a27e22202330c185966b735a48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <unitarization_links.h> #include <atomic.cuh> #include <cub_helper.cuh> #include <index_helper.cuh> #include <hipfft.h> #ifdef GPU_GAUGE_ALG #include <CUFFT_Plans.h> #endif namespace quda { #ifdef GPU_GAUGE_ALG //Comment if you don't want to use textures for Delta(x) and g(x) #define GAUGEFIXING_SITE_MATRIX_LOAD_TEX //UNCOMMENT THIS IF YOU WAN'T TO USE LESS MEMORY #define GAUGEFIXING_DONT_USE_GX //Without using the precalculation of g(x), //we loose some performance, because Delta(x) is written in normal lattice coordinates need for the FFTs //and the gauge array in even/odd format #ifdef HOST_DEBUG #ifdef GAUGEFIXING_DONT_USE_GX #warning Not using precalculated g(x) #else #warning Using precalculated g(x) #endif #endif #ifndef FL_UNITARIZE_PI #define FL_UNITARIZE_PI 3.14159265358979323846 #endif texture<float2, 1, hipReadModeElementType> GXTexSingle; texture<int4, 1, hipReadModeElementType> GXTexDouble; //Delta is only stored using 12 real number parameters, // (0,0), (0,1), (0,2), (1,1), (1,2) and (2,2) // (0,0), (1,1) and (0,1) don't have real part, however we need a complex for the FFTs texture<float2, 1, hipReadModeElementType> DELTATexSingle; texture<int4, 1, hipReadModeElementType> DELTATexDouble; template <class T> inline __device__ T TEXTURE_GX(int id){ return 0.0; } template <> inline __device__ complex<float> TEXTURE_GX<complex<float> >(int id){ return tex1Dfetch(GXTexSingle, id); } template <> inline __device__ complex<double> TEXTURE_GX<complex<double> >(int id){ int4 u = tex1Dfetch(GXTexDouble, id); return complex<double>(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z)); } template <class T> inline __device__ T TEXTURE_DELTA(int id){ return 0.0; } template <> inline __device__ complex<float> TEXTURE_DELTA<complex<float> >(int id){ return tex1Dfetch(DELTATexSingle, id); } template <> inline __device__ complex<double> TEXTURE_DELTA<complex<double> >(int id){ int4 u = tex1Dfetch(DELTATexDouble, id); return complex<double>(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z)); } static void BindTex(complex<float> *delta, complex<float> *gx, size_t bytes){ #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX #ifndef GAUGEFIXING_DONT_USE_GX hipBindTexture(0, GXTexSingle, gx, bytes); #endif hipBindTexture(0, DELTATexSingle, delta, bytes); #endif } static void BindTex(complex<double> *delta, complex<double> *gx, size_t bytes){ #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX #ifndef GAUGEFIXING_DONT_USE_GX hipBindTexture(0, GXTexDouble, gx, bytes); #endif hipBindTexture(0, DELTATexDouble, delta, bytes); #endif } static void UnBindTex(complex<float> *delta, complex<float> *gx){ #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX #ifndef GAUGEFIXING_DONT_USE_GX hipUnbindTexture(GXTexSingle); #endif hipUnbindTexture(DELTATexSingle); #endif } static void UnBindTex(complex<double> *delta, complex<double> *gx){ #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX #ifndef GAUGEFIXING_DONT_USE_GX hipUnbindTexture(GXTexDouble); #endif hipUnbindTexture(DELTATexDouble); #endif } template <typename Float> struct GaugeFixFFTRotateArg { int threads; // number of active threads required int X[4]; // grid dimensions complex<Float> *tmp0; complex<Float> *tmp1; GaugeFixFFTRotateArg(const cudaGaugeField &data){ for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = X[0] * X[1] * X[2] * X[3]; tmp0 = 0; tmp1 = 0; } }; template <int direction, typename Float> __global__ void fft_rotate_kernel_2D2D(GaugeFixFFTRotateArg<Float> arg){ //Cmplx *data_in, Cmplx *data_out){ int id = blockIdx.x * blockDim.x + threadIdx.x; if ( id >= arg.threads ) return; if ( direction == 0 ) { int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]); int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2]; int x1 = (id / arg.X[0]) % arg.X[1]; int x0 = id % arg.X[0]; int id = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]; int id_out = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2]; arg.tmp1[id_out] = arg.tmp0[id]; //data_out[id_out] = data_in[id]; } if ( direction == 1 ) { int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]); int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0]; int x3 = (id / arg.X[2]) % arg.X[3]; int x2 = id % arg.X[2]; int id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2]; int id_out = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]; arg.tmp1[id_out] = arg.tmp0[id]; //data_out[id_out] = data_in[id]; } } template<typename Float> class GaugeFixFFTRotate : Tunable { GaugeFixFFTRotateArg<Float> arg; int direction; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixFFTRotate(GaugeFixFFTRotateArg<Float> &arg) : arg(arg) { direction = 0; } ~GaugeFixFFTRotate () { } void setDirection(int dir, complex<Float> *data_in, complex<Float> *data_out){ direction = dir; arg.tmp0 = data_in; arg.tmp1 = data_out; } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if ( direction == 0 ) fft_rotate_kernel_2D2D<0, Float ><< < tp.grid, tp.block, 0, stream >> > (arg); else if ( direction == 1 ) fft_rotate_kernel_2D2D<1, Float ><< < tp.grid, tp.block, 0, stream >> > (arg); else errorQuda("Error in GaugeFixFFTRotate option.\n"); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return 0; } long long bytes() const { return 4LL * sizeof(Float) * arg.threads; } }; template <typename Float, typename Gauge> struct GaugeFixQualityArg : public ReduceArg<double2> { int threads; // number of active threads required int X[4]; // grid dimensions Gauge dataOr; complex<Float> *delta; GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data, complex<Float> * delta) : ReduceArg<double2>(), dataOr(dataOr), delta(delta) { for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = data.VolumeCB(); } double getAction(){ return result_h[0].x; } double getTheta(){ return result_h[0].y; } }; template<int blockSize, int Elems, typename Float, typename Gauge, int gauge_dir> __global__ void computeFix_quality(GaugeFixQualityArg<Float, Gauge> argQ){ int idx_cb = threadIdx.x + blockIdx.x * blockDim.x; int parity = threadIdx.y; double2 data = make_double2(0.0,0.0); while (idx_cb < argQ.threads) { typedef complex<Float> Cmplx; int x[4]; getCoords(x, idx_cb, argQ.X, parity); Matrix<Cmplx,3> delta; setZero(&delta); //idx = linkIndex(x,X); for ( int mu = 0; mu < gauge_dir; mu++ ) { Matrix<Cmplx,3> U; argQ.dataOr.load((Float *)(U.data), idx_cb, mu, parity); delta -= U; } //18*gauge_dir data.x += -delta(0, 0).x - delta(1, 1).x - delta(2, 2).x; //2 for ( int mu = 0; mu < gauge_dir; mu++ ) { Matrix<Cmplx,3> U; argQ.dataOr.load((Float*)(U.data),linkIndexM1(x,argQ.X,mu), mu, 1 - parity); delta += U; } //18*gauge_dir delta -= conj(delta); //18 //SAVE DELTA!!!!! SubTraceUnit(delta); int idx = getIndexFull(idx_cb, argQ.X, parity); //Saving Delta argQ.delta[idx] = delta(0,0); argQ.delta[idx + 2 * argQ.threads] = delta(0,1); argQ.delta[idx + 4 * argQ.threads] = delta(0,2); argQ.delta[idx + 6 * argQ.threads] = delta(1,1); argQ.delta[idx + 8 * argQ.threads] = delta(1,2); argQ.delta[idx + 10 * argQ.threads] = delta(2,2); //12 data.y += getRealTraceUVdagger(delta, delta); //35 //T=36*gauge_dir+65 idx_cb += blockDim.x * gridDim.x; } reduce2d<blockSize,2>(argQ, data); } template<int Elems, typename Float, typename Gauge, int gauge_dir> class GaugeFixQuality : TunableLocalParity { GaugeFixQualityArg<Float, Gauge> argQ; mutable char aux_string[128]; // used as a label in the autotuner private: bool tuneGridDim() const { return true; } public: GaugeFixQuality(GaugeFixQualityArg<Float, Gauge> &argQ) : argQ(argQ) { } ~GaugeFixQuality () { } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); argQ.result_h[0] = make_double2(0.0,0.0); LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Elems, Float, Gauge, gauge_dir); qudaDeviceSynchronize(); argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads); argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads); } TuneKey tuneKey() const { std::stringstream vol; vol << argQ.X[0] << "x" << argQ.X[1] << "x" << argQ.X[2] << "x" << argQ.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d", argQ.threads, sizeof(Float), gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return (36LL * gauge_dir + 65LL) * 2 * argQ.threads; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return (2LL * gauge_dir + 2LL) * Elems * 2 * argQ.threads * sizeof(Float); } //Not accounting the reduction!!! }; template <typename Float> struct GaugeFixArg { int threads; // number of active threads required int X[4]; // grid dimensions cudaGaugeField &data; Float *invpsq; complex<Float> *delta; complex<Float> *gx; GaugeFixArg( cudaGaugeField & data, const int Elems) : data(data){ for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = X[0] * X[1] * X[2] * X[3]; invpsq = (Float*)device_malloc(sizeof(Float) * threads); delta = (complex<Float>*)device_malloc(sizeof(complex<Float>) * threads * 6); #ifdef GAUGEFIXING_DONT_USE_GX gx = (complex<Float>*)device_malloc(sizeof(complex<Float>) * threads); #else gx = (complex<Float>*)device_malloc(sizeof(complex<Float>) * threads * Elems); #endif BindTex(delta, gx, sizeof(complex<Float>) * threads * Elems); } void free(){ UnBindTex(delta, gx); device_free(invpsq); device_free(delta); device_free(gx); } }; template <typename Float> __global__ void kernel_gauge_set_invpsq(GaugeFixArg<Float> arg){ int id = blockIdx.x * blockDim.x + threadIdx.x; if ( id >= arg.threads ) return; int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]); int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0]; int x3 = (id / arg.X[2]) % arg.X[3]; int x2 = id % arg.X[2]; //id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2]; Float sx = sin( (Float)x0 * FL_UNITARIZE_PI / (Float)arg.X[0]); Float sy = sin( (Float)x1 * FL_UNITARIZE_PI / (Float)arg.X[1]); Float sz = sin( (Float)x2 * FL_UNITARIZE_PI / (Float)arg.X[2]); Float st = sin( (Float)x3 * FL_UNITARIZE_PI / (Float)arg.X[3]); Float sinsq = sx * sx + sy * sy + sz * sz + st * st; Float prcfact = 0.0; //The FFT normalization is done here if ( sinsq > 0.00001 ) prcfact = 4.0 / (sinsq * (Float)arg.threads); arg.invpsq[id] = prcfact; } template<typename Float> class GaugeFixSETINVPSP : Tunable { GaugeFixArg<Float> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixSETINVPSP(GaugeFixArg<Float> &arg) : arg(arg) { } ~GaugeFixSETINVPSP () { } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_set_invpsq<Float><< < tp.grid, tp.block, 0, stream >> > (arg); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return 21 * arg.threads; } long long bytes() const { return sizeof(Float) * arg.threads; } }; template<typename Float> __global__ void kernel_gauge_mult_norm_2D(GaugeFixArg<Float> arg){ int id = blockIdx.x * blockDim.x + threadIdx.x; if ( id < arg.threads ) arg.gx[id] = arg.gx[id] * arg.invpsq[id]; } template<typename Float> class GaugeFixINVPSP : Tunable { GaugeFixArg<Float> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixINVPSP(GaugeFixArg<Float> &arg) : arg(arg){ hipFuncSetCacheConfig( kernel_gauge_mult_norm_2D<Float>, hipFuncCachePreferL1); } ~GaugeFixINVPSP () { } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_mult_norm_2D<Float><< < tp.grid, tp.block, 0, stream >> > (arg); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } void preTune(){ //since delta contents are irrelevant at this point, we can swap gx with delta complex<Float> *tmp = arg.gx; arg.gx = arg.delta; arg.delta = tmp; } void postTune(){ arg.gx = arg.delta; } long long flops() const { return 2LL * arg.threads; } long long bytes() const { return 5LL * sizeof(Float) * arg.threads; } }; template <typename Float> __host__ __device__ inline void reunit_link( Matrix<complex<Float>,3> &U ){ complex<Float> t2((Float)0.0, (Float)0.0); Float t1 = 0.0; //first normalize first row //sum of squares of row #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += norm(U(0,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(0,c) *= t1; //6 #pragma unroll for ( int c = 0; c < 3; c++ ) t2 += conj(U(0,c)) * U(1,c); //24 #pragma unroll for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c); //24 //normalize second row //sum of squares of row t1 = 0.0; #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += norm(U(1,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(1, c) *= t1; //6 //Reconstruct lat row U(2,0) = conj(U(0,1) * U(1,2) - U(0,2) * U(1,1)); U(2,1) = conj(U(0,2) * U(1,0) - U(0,0) * U(1,2)); U(2,2) = conj(U(0,0) * U(1,1) - U(0,1) * U(1,0)); //42 //T=130 } #ifdef GAUGEFIXING_DONT_USE_GX template <typename Float, typename Gauge> __global__ void kernel_gauge_fix_U_EO_NEW( GaugeFixArg<Float> arg, Gauge dataOr, Float half_alpha){ int id = threadIdx.x + blockIdx.x * blockDim.x; int parity = threadIdx.y; if ( id >= arg.threads/2 ) return; typedef complex<Float> Cmplx; int x[4]; getCoords(x, id, arg.X, parity); int idx = ((x[3] * arg.X[2] + x[2]) * arg.X[1] + x[1]) * arg.X[0] + x[0]; Matrix<Cmplx,3> de; //Read Delta #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads); de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads); de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads); de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads); de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads); de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads); #else de(0,0) = arg.delta[idx + 0 * arg.threads]; de(0,1) = arg.delta[idx + 1 * arg.threads]; de(0,2) = arg.delta[idx + 2 * arg.threads]; de(1,1) = arg.delta[idx + 3 * arg.threads]; de(1,2) = arg.delta[idx + 4 * arg.threads]; de(2,2) = arg.delta[idx + 5 * arg.threads]; #endif de(1,0) = Cmplx(-de(0,1).x, de(0,1).y); de(2,0) = Cmplx(-de(0,2).x, de(0,2).y); de(2,1) = Cmplx(-de(1,2).x, de(1,2).y); Matrix<Cmplx,3> g; setIdentity(&g); g += de * half_alpha; //36 reunit_link<Float>( g ); //130 for ( int mu = 0; mu < 4; mu++ ) { Matrix<Cmplx,3> U; Matrix<Cmplx,3> g0; dataOr.load((Float*)(U.data),id, mu, parity); U = g * U; //198 idx = linkNormalIndexP1(x,arg.X,mu); //Read Delta #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads); de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads); de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads); de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads); de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads); de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads); #else de(0,0) = arg.delta[idx + 0 * arg.threads]; de(0,1) = arg.delta[idx + 1 * arg.threads]; de(0,2) = arg.delta[idx + 2 * arg.threads]; de(1,1) = arg.delta[idx + 3 * arg.threads]; de(1,2) = arg.delta[idx + 4 * arg.threads]; de(2,2) = arg.delta[idx + 5 * arg.threads]; #endif de(1,0) = Cmplx(-de(0,1).x, de(0,1).y); de(2,0) = Cmplx(-de(0,2).x, de(0,2).y); de(2,1) = Cmplx(-de(1,2).x, de(1,2).y); setIdentity(&g0); g0 += de * half_alpha; //36 reunit_link<Float>( g0 ); //130 U = U * conj(g0); //198 dataOr.save((Float*)(U.data),id, mu, parity); } } template<typename Float, typename Gauge> class GaugeFixNEW : TunableLocalParity { GaugeFixArg<Float> arg; Float half_alpha; Gauge dataOr; mutable char aux_string[128]; // used as a label in the autotuner private: // since GaugeFixArg is used by other kernels that don't use // tunableLocalParity, arg.threads stores Volume and not VolumeCB // so we need to divide by two unsigned int minThreads() const { return arg.threads/2; } public: GaugeFixNEW(Gauge & dataOr, GaugeFixArg<Float> &arg, Float alpha) : dataOr(dataOr), arg(arg) { half_alpha = alpha * 0.5; hipFuncSetCacheConfig( kernel_gauge_fix_U_EO_NEW<Float, Gauge>, hipFuncCachePreferL1); } ~GaugeFixNEW () { } void setAlpha(Float alpha){ half_alpha = alpha * 0.5; } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_fix_U_EO_NEW<Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr, half_alpha); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { return 2414LL * arg.threads; //Not accounting here the reconstruction of the gauge if 12 or 8!!!!!! } long long bytes() const { return ( dataOr.Bytes() * 4LL + 5 * 12LL * sizeof(Float)) * arg.threads; } }; #else template <int Elems, typename Float> __global__ void kernel_gauge_GX(GaugeFixArg<Float> arg, Float half_alpha){ int id = blockIdx.x * blockDim.x + threadIdx.x; if ( id >= arg.threads ) return; typedef complex<Float> Cmplx; Matrix<Cmplx,3> de; //Read Delta #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX de(0,0) = TEXTURE_DELTA<Cmplx>(id); de(0,1) = TEXTURE_DELTA<Cmplx>(id + arg.threads); de(0,2) = TEXTURE_DELTA<Cmplx>(id + 2 * arg.threads); de(1,1) = TEXTURE_DELTA<Cmplx>(id + 3 * arg.threads); de(1,2) = TEXTURE_DELTA<Cmplx>(id + 4 * arg.threads); de(2,2) = TEXTURE_DELTA<Cmplx>(id + 5 * arg.threads); #else de(0,0) = arg.delta[id]; de(0,1) = arg.delta[id + arg.threads]; de(0,2) = arg.delta[id + 2 * arg.threads]; de(1,1) = arg.delta[id + 3 * arg.threads]; de(1,2) = arg.delta[id + 4 * arg.threads]; de(2,2) = arg.delta[id + 5 * arg.threads]; #endif de(1,0) = makeComplex(-de(0,1).x, de(0,1).y); de(2,0) = makeComplex(-de(0,2).x, de(0,2).y); de(2,1) = makeComplex(-de(1,2).x, de(1,2).y); Matrix<Cmplx,3> g; setIdentity(&g); g += de * half_alpha; //36 reunit_link<Float>( g ); //130 //gx is represented in even/odd order //normal lattice index to even/odd index int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]); int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2]; int x1 = (id / arg.X[0]) % arg.X[1]; int x0 = id % arg.X[0]; id = (x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]) >> 1; id += ((x0 + x1 + x2 + x3) & 1 ) * arg.threads / 2; for ( int i = 0; i < Elems; i++ ) arg.gx[id + i * arg.threads] = g.data[i]; //T=166 for Elems 9 //T=208 for Elems 6 } template<int Elems, typename Float> class GaugeFix_GX : Tunable { GaugeFixArg<Float> arg; Float half_alpha; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFix_GX(GaugeFixArg<Float> &arg, Float alpha) : arg(arg) { half_alpha = alpha * 0.5; hipFuncSetCacheConfig( kernel_gauge_GX<Elems, Float>, hipFuncCachePreferL1); } ~GaugeFix_GX () { } void setAlpha(Float alpha){ half_alpha = alpha * 0.5; } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_GX<Elems, Float><< < tp.grid, tp.block, 0, stream >> > (arg, half_alpha); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { if ( Elems == 6 ) return 208LL * arg.threads; else return 166LL * arg.threads; } long long bytes() const { return 4LL * Elems * sizeof(Float) * arg.threads; } }; template <int Elems, typename Float, typename Gauge> __global__ void kernel_gauge_fix_U_EO( GaugeFixArg<Float> arg, Gauge dataOr){ int idd = threadIdx.x + blockIdx.x * blockDim.x; if ( idd >= arg.threads ) return; int parity = 0; int id = idd; if ( idd >= arg.threads / 2 ) { parity = 1; id -= arg.threads / 2; } typedef complex<Float> Cmplx; Matrix<Cmplx,3> g; //for(int i = 0; i < Elems; i++) g.data[i] = arg.gx[idd + i * arg.threads]; for ( int i = 0; i < Elems; i++ ) { #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX g.data[i] = TEXTURE_GX<Cmplx>(idd + i * arg.threads); #else g.data[i] = arg.gx[idd + i * arg.threads]; #endif } if ( Elems == 6 ) { g(2,0) = conj(g(0,1) * g(1,2) - g(0,2) * g(1,1)); g(2,1) = conj(g(0,2) * g(1,0) - g(0,0) * g(1,2)); g(2,2) = conj(g(0,0) * g(1,1) - g(0,1) * g(1,0)); //42 } int x[4]; getCoords(x, id, arg.X, parity); for ( int mu = 0; mu < 4; mu++ ) { Matrix<Cmplx,3> U; Matrix<Cmplx,3> g0; dataOr.load((Float*)(U.data),id, mu, parity); U = g * U; //198 int idm1 = linkIndexP1(x,arg.X,mu); idm1 += (1 - parity) * arg.threads / 2; //for(int i = 0; i < Elems; i++) g0.data[i] = arg.gx[idm1 + i * arg.threads]; for ( int i = 0; i < Elems; i++ ) { #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX g0.data[i] = TEXTURE_GX<Cmplx>(idm1 + i * arg.threads); #else g0.data[i] = arg.gx[idm1 + i * arg.threads]; #endif } if ( Elems == 6 ) { g0(2,0) = conj(g0(0,1) * g0(1,2) - g0(0,2) * g0(1,1)); g0(2,1) = conj(g0(0,2) * g0(1,0) - g0(0,0) * g0(1,2)); g0(2,2) = conj(g0(0,0) * g0(1,1) - g0(0,1) * g0(1,0)); //42 } U = U * conj(g0); //198 dataOr.save((Float*)(U.data),id, mu, parity); } //T=42+4*(198*2+42) Elems=6 //T=4*(198*2) Elems=9 //Not accounting here the reconstruction of the gauge if 12 or 8!!!!!! } template<int Elems, typename Float, typename Gauge> class GaugeFix : Tunable { GaugeFixArg<Float> arg; Gauge dataOr; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFix(Gauge & dataOr, GaugeFixArg<Float> &arg) : dataOr(dataOr), arg(arg) { hipFuncSetCacheConfig( kernel_gauge_fix_U_EO<Elems, Float, Gauge>, hipFuncCachePreferL1); } ~GaugeFix () { } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_fix_U_EO<Elems, Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { if ( Elems == 6 ) return 1794LL * arg.threads; else return 1536LL * arg.threads; //Not accounting here the reconstruction of the gauge if 12 or 8!!!!!! } long long bytes() const { return 26LL * Elems * sizeof(Float) * arg.threads; } }; #endif //GAUGEFIXING_DONT_USE_GX template<int Elems, typename Float, typename Gauge, int gauge_dir> void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, \ const int Nsteps, const int verbose_interval, \ const Float alpha0, const int autotune, const double tolerance, \ const int stopWtheta) { TimeProfile profileInternalGaugeFixFFT("InternalGaugeFixQudaFFT", false); profileInternalGaugeFixFFT.TPSTART(QUDA_PROFILE_COMPUTE); Float alpha = alpha0; std::cout << "\tAlpha parameter of the Steepest Descent Method: " << alpha << std::endl; if ( autotune ) std::cout << "\tAuto tune active: yes" << std::endl; else std::cout << "\tAuto tune active: no" << std::endl; std::cout << "\tStop criterium: " << tolerance << std::endl; if ( stopWtheta ) std::cout << "\tStop criterium method: theta" << std::endl; else std::cout << "\tStop criterium method: Delta" << std::endl; std::cout << "\tMaximum number of iterations: " << Nsteps << std::endl; std::cout << "\tPrint convergence results at every " << verbose_interval << " steps" << std::endl; unsigned int delta_pad = data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3]; int4 size = make_int4( data.X()[0], data.X()[1], data.X()[2], data.X()[3] ); hipfftHandle plan_xy; hipfftHandle plan_zt; GaugeFixArg<Float> arg(data, Elems); SetPlanFFT2DMany( plan_zt, size, 0, arg.delta); //for space and time ZT SetPlanFFT2DMany( plan_xy, size, 1, arg.delta); //with space only XY GaugeFixFFTRotateArg<Float> arg_rotate(data); GaugeFixFFTRotate<Float> GFRotate(arg_rotate); GaugeFixSETINVPSP<Float> setinvpsp(arg); setinvpsp.apply(0); GaugeFixINVPSP<Float> invpsp(arg); #ifdef GAUGEFIXING_DONT_USE_GX //without using GX, gx will be created only for plane rotation but with less size GaugeFixNEW<Float, Gauge> gfixNew(dataOr, arg, alpha); #else //using GX GaugeFix_GX<Elems, Float> calcGX(arg, alpha); GaugeFix<Elems, Float, Gauge> gfix(dataOr, arg); #endif GaugeFixQualityArg<Float, Gauge> argQ(dataOr, data, arg.delta); GaugeFixQuality<Elems, Float, Gauge, gauge_dir> gfixquality(argQ); gfixquality.apply(0); double action0 = argQ.getAction(); printf("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta()); double diff = 0.0; int iter = 0; for ( iter = 0; iter < Nsteps; iter++ ) { for ( int k = 0; k < 6; k++ ) { //------------------------------------------------------------------------ // Set a pointer do the element k in lattice volume // each element is stored with stride lattice volume // it uses gx as temporary array!!!!!! //------------------------------------------------------------------------ complex<Float> *_array = arg.delta + k * delta_pad; ////// 2D FFT + 2D FFT //------------------------------------------------------------------------ // Perform FFT on xy plane //------------------------------------------------------------------------ ApplyFFT(plan_xy, _array, arg.gx, HIPFFT_FORWARD); //------------------------------------------------------------------------ // Rotate hypercube, xyzt -> ztxy //------------------------------------------------------------------------ GFRotate.setDirection(0, arg.gx, _array); GFRotate.apply(0); //------------------------------------------------------------------------ // Perform FFT on zt plane //------------------------------------------------------------------------ ApplyFFT(plan_zt, _array, arg.gx, HIPFFT_FORWARD); //------------------------------------------------------------------------ // Normalize FFT and apply pmax^2/p^2 //------------------------------------------------------------------------ invpsp.apply(0); //------------------------------------------------------------------------ // Perform IFFT on zt plane //------------------------------------------------------------------------ ApplyFFT(plan_zt, arg.gx, _array, HIPFFT_BACKWARD); //------------------------------------------------------------------------ // Rotate hypercube, ztxy -> xyzt //------------------------------------------------------------------------ GFRotate.setDirection(1, _array, arg.gx); GFRotate.apply(0); //------------------------------------------------------------------------ // Perform IFFT on xy plane //------------------------------------------------------------------------ ApplyFFT(plan_xy, arg.gx, _array, HIPFFT_BACKWARD); } #ifdef GAUGEFIXING_DONT_USE_GX //------------------------------------------------------------------------ // Apply gauge fix to current gauge field //------------------------------------------------------------------------ gfixNew.apply(0); #else //------------------------------------------------------------------------ // Calculate g(x) //------------------------------------------------------------------------ calcGX.apply(0); //------------------------------------------------------------------------ // Apply gauge fix to current gauge field //------------------------------------------------------------------------ gfix.apply(0); #endif //------------------------------------------------------------------------ // Measure gauge quality and recalculate new Delta(x) //------------------------------------------------------------------------ gfixquality.apply(0); double action = argQ.getAction(); diff = abs(action0 - action); if ((iter % verbose_interval) == (verbose_interval - 1)) printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff); if ( autotune && ((action - action0) < -1e-14) ) { if ( alpha > 0.01 ) { alpha = 0.95 * alpha; #ifdef GAUGEFIXING_DONT_USE_GX gfixNew.setAlpha(alpha); #else calcGX.setAlpha(alpha); #endif printf(">>>>>>>>>>>>>> Warning: changing alpha down -> %.4e\n", alpha ); } } //------------------------------------------------------------------------ // Check gauge fix quality criterium //------------------------------------------------------------------------ if ( stopWtheta ) { if ( argQ.getTheta() < tolerance ) break; } else { if ( diff < tolerance ) break; } action0 = action; } if ((iter % verbose_interval) != 0 ) printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter, argQ.getAction(), argQ.getTheta(), diff); // Reunitarize at end const double unitarize_eps = 1e-14; const double max_error = 1e-10; const int reunit_allow_svd = 1; const int reunit_svd_only = 0; const double svd_rel_error = 1e-6; const double svd_abs_error = 1e-6; setUnitarizeLinksConstants(unitarize_eps, max_error, reunit_allow_svd, reunit_svd_only, svd_rel_error, svd_abs_error); int num_failures = 0; int* num_failures_dev = static_cast<int*>(pool_device_malloc(sizeof(int))); hipMemset(num_failures_dev, 0, sizeof(int)); unitarizeLinks(data, data, num_failures_dev); qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost); pool_device_free(num_failures_dev); if ( num_failures > 0 ) { errorQuda("Error in the unitarization\n"); exit(1); } // end reunitarize arg.free(); CUFFT_SAFE_CALL(hipfftDestroy(plan_zt)); CUFFT_SAFE_CALL(hipfftDestroy(plan_xy)); checkCudaError(); qudaDeviceSynchronize(); profileInternalGaugeFixFFT.TPSTOP(QUDA_PROFILE_COMPUTE); if (getVerbosity() > QUDA_SUMMARIZE){ double secs = profileInternalGaugeFixFFT.Last(QUDA_PROFILE_COMPUTE); double fftflop = 5.0 * (log2((double)( data.X()[0] * data.X()[1]) ) + log2( (double)(data.X()[2] * data.X()[3] ))); fftflop *= (double)( data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3] ); double gflops = setinvpsp.flops() + gfixquality.flops(); double gbytes = setinvpsp.bytes() + gfixquality.bytes(); double flop = invpsp.flops() * Elems; double byte = invpsp.bytes() * Elems; flop += (GFRotate.flops() + fftflop) * Elems * 2; byte += GFRotate.bytes() * Elems * 4; //includes FFT reads, assuming 1 read and 1 write per site #ifdef GAUGEFIXING_DONT_USE_GX flop += gfixNew.flops(); byte += gfixNew.bytes(); #else flop += calcGX.flops(); byte += calcGX.bytes(); flop += gfix.flops(); byte += gfix.bytes(); #endif flop += gfixquality.flops(); byte += gfixquality.bytes(); gflops += flop * iter; gbytes += byte * iter; gflops += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; //Reunitarize at end gbytes += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes() ; //Reunitarize at end gflops = (gflops * 1e-9) / (secs); gbytes = gbytes / (secs * 1e9); printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes); } } template<int Elems, typename Float, typename Gauge> void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, const int gauge_dir, \ const int Nsteps, const int verbose_interval, const Float alpha, const int autotune, \ const double tolerance, const int stopWtheta) { if ( gauge_dir != 3 ) { printf("Starting Landau gauge fixing with FFTs...\n"); gaugefixingFFT<Elems, Float, Gauge, 4>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else { printf("Starting Coulomb gauge fixing with FFTs...\n"); gaugefixingFFT<Elems, Float, Gauge, 3>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } } template<typename Float> void gaugefixingFFT( cudaGaugeField& data, const int gauge_dir, \ const int Nsteps, const int verbose_interval, const Float alpha, const int autotune, \ const double tolerance, const int stopWtheta) { // Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12 // Need to fix this!! //9 and 6 means the number of complex elements used to store g(x) and Delta(x) if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { //printfQuda("QUDA_RECONSTRUCT_NO\n"); typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; gaugefixingFFT<9, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { //printfQuda("QUDA_RECONSTRUCT_12\n"); typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { //printfQuda("QUDA_RECONSTRUCT_8\n"); typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } #endif // GPU_GAUGE_ALG /** * @brief Gauge fixing with Steepest descent method with FFTs with support for single GPU only. * @param[in,out] data, quda gauge field * @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing * @param[in] Nsteps, maximum number of steps to perform gauge fixing * @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this * @param[in] alpha, gauge fixing parameter of the method, most common value is 0.08 * @param[in] autotune, 1 to autotune the method, i.e., if the Fg inverts its tendency we decrease the alpha value * @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps * @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value */ void gaugefixingFFT( cudaGaugeField& data, const int gauge_dir, \ const int Nsteps, const int verbose_interval, const double alpha, const int autotune, \ const double tolerance, const int stopWtheta) { #ifdef GPU_GAUGE_ALG #ifdef MULTI_GPU if(comm_dim_partitioned(0) || comm_dim_partitioned(1) || comm_dim_partitioned(2) || comm_dim_partitioned(3)) errorQuda("Gauge Fixing with FFTs in multi-GPU support NOT implemented yet!\n"); #endif if ( data.Precision() == QUDA_HALF_PRECISION ) { errorQuda("Half precision not supported\n"); } if ( data.Precision() == QUDA_SINGLE_PRECISION ) { gaugefixingFFT<float> (data, gauge_dir, Nsteps, verbose_interval, (float)alpha, autotune, tolerance, stopWtheta); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { gaugefixingFFT<double>(data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else { errorQuda("Precision %d not supported", data.Precision()); } #else errorQuda("Gauge fixing has bot been built"); #endif } }
9698dc0bdc84e3a27e22202330c185966b735a48.cu
#include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <unitarization_links.h> #include <atomic.cuh> #include <cub_helper.cuh> #include <index_helper.cuh> #include <cufft.h> #ifdef GPU_GAUGE_ALG #include <CUFFT_Plans.h> #endif namespace quda { #ifdef GPU_GAUGE_ALG //Comment if you don't want to use textures for Delta(x) and g(x) #define GAUGEFIXING_SITE_MATRIX_LOAD_TEX //UNCOMMENT THIS IF YOU WAN'T TO USE LESS MEMORY #define GAUGEFIXING_DONT_USE_GX //Without using the precalculation of g(x), //we loose some performance, because Delta(x) is written in normal lattice coordinates need for the FFTs //and the gauge array in even/odd format #ifdef HOST_DEBUG #ifdef GAUGEFIXING_DONT_USE_GX #warning Not using precalculated g(x) #else #warning Using precalculated g(x) #endif #endif #ifndef FL_UNITARIZE_PI #define FL_UNITARIZE_PI 3.14159265358979323846 #endif texture<float2, 1, cudaReadModeElementType> GXTexSingle; texture<int4, 1, cudaReadModeElementType> GXTexDouble; //Delta is only stored using 12 real number parameters, // (0,0), (0,1), (0,2), (1,1), (1,2) and (2,2) // (0,0), (1,1) and (0,1) don't have real part, however we need a complex for the FFTs texture<float2, 1, cudaReadModeElementType> DELTATexSingle; texture<int4, 1, cudaReadModeElementType> DELTATexDouble; template <class T> inline __device__ T TEXTURE_GX(int id){ return 0.0; } template <> inline __device__ complex<float> TEXTURE_GX<complex<float> >(int id){ return tex1Dfetch(GXTexSingle, id); } template <> inline __device__ complex<double> TEXTURE_GX<complex<double> >(int id){ int4 u = tex1Dfetch(GXTexDouble, id); return complex<double>(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z)); } template <class T> inline __device__ T TEXTURE_DELTA(int id){ return 0.0; } template <> inline __device__ complex<float> TEXTURE_DELTA<complex<float> >(int id){ return tex1Dfetch(DELTATexSingle, id); } template <> inline __device__ complex<double> TEXTURE_DELTA<complex<double> >(int id){ int4 u = tex1Dfetch(DELTATexDouble, id); return complex<double>(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z)); } static void BindTex(complex<float> *delta, complex<float> *gx, size_t bytes){ #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX #ifndef GAUGEFIXING_DONT_USE_GX cudaBindTexture(0, GXTexSingle, gx, bytes); #endif cudaBindTexture(0, DELTATexSingle, delta, bytes); #endif } static void BindTex(complex<double> *delta, complex<double> *gx, size_t bytes){ #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX #ifndef GAUGEFIXING_DONT_USE_GX cudaBindTexture(0, GXTexDouble, gx, bytes); #endif cudaBindTexture(0, DELTATexDouble, delta, bytes); #endif } static void UnBindTex(complex<float> *delta, complex<float> *gx){ #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX #ifndef GAUGEFIXING_DONT_USE_GX cudaUnbindTexture(GXTexSingle); #endif cudaUnbindTexture(DELTATexSingle); #endif } static void UnBindTex(complex<double> *delta, complex<double> *gx){ #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX #ifndef GAUGEFIXING_DONT_USE_GX cudaUnbindTexture(GXTexDouble); #endif cudaUnbindTexture(DELTATexDouble); #endif } template <typename Float> struct GaugeFixFFTRotateArg { int threads; // number of active threads required int X[4]; // grid dimensions complex<Float> *tmp0; complex<Float> *tmp1; GaugeFixFFTRotateArg(const cudaGaugeField &data){ for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = X[0] * X[1] * X[2] * X[3]; tmp0 = 0; tmp1 = 0; } }; template <int direction, typename Float> __global__ void fft_rotate_kernel_2D2D(GaugeFixFFTRotateArg<Float> arg){ //Cmplx *data_in, Cmplx *data_out){ int id = blockIdx.x * blockDim.x + threadIdx.x; if ( id >= arg.threads ) return; if ( direction == 0 ) { int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]); int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2]; int x1 = (id / arg.X[0]) % arg.X[1]; int x0 = id % arg.X[0]; int id = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]; int id_out = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2]; arg.tmp1[id_out] = arg.tmp0[id]; //data_out[id_out] = data_in[id]; } if ( direction == 1 ) { int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]); int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0]; int x3 = (id / arg.X[2]) % arg.X[3]; int x2 = id % arg.X[2]; int id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2]; int id_out = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]; arg.tmp1[id_out] = arg.tmp0[id]; //data_out[id_out] = data_in[id]; } } template<typename Float> class GaugeFixFFTRotate : Tunable { GaugeFixFFTRotateArg<Float> arg; int direction; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixFFTRotate(GaugeFixFFTRotateArg<Float> &arg) : arg(arg) { direction = 0; } ~GaugeFixFFTRotate () { } void setDirection(int dir, complex<Float> *data_in, complex<Float> *data_out){ direction = dir; arg.tmp0 = data_in; arg.tmp1 = data_out; } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if ( direction == 0 ) fft_rotate_kernel_2D2D<0, Float ><< < tp.grid, tp.block, 0, stream >> > (arg); else if ( direction == 1 ) fft_rotate_kernel_2D2D<1, Float ><< < tp.grid, tp.block, 0, stream >> > (arg); else errorQuda("Error in GaugeFixFFTRotate option.\n"); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return 0; } long long bytes() const { return 4LL * sizeof(Float) * arg.threads; } }; template <typename Float, typename Gauge> struct GaugeFixQualityArg : public ReduceArg<double2> { int threads; // number of active threads required int X[4]; // grid dimensions Gauge dataOr; complex<Float> *delta; GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data, complex<Float> * delta) : ReduceArg<double2>(), dataOr(dataOr), delta(delta) { for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = data.VolumeCB(); } double getAction(){ return result_h[0].x; } double getTheta(){ return result_h[0].y; } }; template<int blockSize, int Elems, typename Float, typename Gauge, int gauge_dir> __global__ void computeFix_quality(GaugeFixQualityArg<Float, Gauge> argQ){ int idx_cb = threadIdx.x + blockIdx.x * blockDim.x; int parity = threadIdx.y; double2 data = make_double2(0.0,0.0); while (idx_cb < argQ.threads) { typedef complex<Float> Cmplx; int x[4]; getCoords(x, idx_cb, argQ.X, parity); Matrix<Cmplx,3> delta; setZero(&delta); //idx = linkIndex(x,X); for ( int mu = 0; mu < gauge_dir; mu++ ) { Matrix<Cmplx,3> U; argQ.dataOr.load((Float *)(U.data), idx_cb, mu, parity); delta -= U; } //18*gauge_dir data.x += -delta(0, 0).x - delta(1, 1).x - delta(2, 2).x; //2 for ( int mu = 0; mu < gauge_dir; mu++ ) { Matrix<Cmplx,3> U; argQ.dataOr.load((Float*)(U.data),linkIndexM1(x,argQ.X,mu), mu, 1 - parity); delta += U; } //18*gauge_dir delta -= conj(delta); //18 //SAVE DELTA!!!!! SubTraceUnit(delta); int idx = getIndexFull(idx_cb, argQ.X, parity); //Saving Delta argQ.delta[idx] = delta(0,0); argQ.delta[idx + 2 * argQ.threads] = delta(0,1); argQ.delta[idx + 4 * argQ.threads] = delta(0,2); argQ.delta[idx + 6 * argQ.threads] = delta(1,1); argQ.delta[idx + 8 * argQ.threads] = delta(1,2); argQ.delta[idx + 10 * argQ.threads] = delta(2,2); //12 data.y += getRealTraceUVdagger(delta, delta); //35 //T=36*gauge_dir+65 idx_cb += blockDim.x * gridDim.x; } reduce2d<blockSize,2>(argQ, data); } template<int Elems, typename Float, typename Gauge, int gauge_dir> class GaugeFixQuality : TunableLocalParity { GaugeFixQualityArg<Float, Gauge> argQ; mutable char aux_string[128]; // used as a label in the autotuner private: bool tuneGridDim() const { return true; } public: GaugeFixQuality(GaugeFixQualityArg<Float, Gauge> &argQ) : argQ(argQ) { } ~GaugeFixQuality () { } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); argQ.result_h[0] = make_double2(0.0,0.0); LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Elems, Float, Gauge, gauge_dir); qudaDeviceSynchronize(); argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads); argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads); } TuneKey tuneKey() const { std::stringstream vol; vol << argQ.X[0] << "x" << argQ.X[1] << "x" << argQ.X[2] << "x" << argQ.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d", argQ.threads, sizeof(Float), gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return (36LL * gauge_dir + 65LL) * 2 * argQ.threads; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return (2LL * gauge_dir + 2LL) * Elems * 2 * argQ.threads * sizeof(Float); } //Not accounting the reduction!!! }; template <typename Float> struct GaugeFixArg { int threads; // number of active threads required int X[4]; // grid dimensions cudaGaugeField &data; Float *invpsq; complex<Float> *delta; complex<Float> *gx; GaugeFixArg( cudaGaugeField & data, const int Elems) : data(data){ for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = X[0] * X[1] * X[2] * X[3]; invpsq = (Float*)device_malloc(sizeof(Float) * threads); delta = (complex<Float>*)device_malloc(sizeof(complex<Float>) * threads * 6); #ifdef GAUGEFIXING_DONT_USE_GX gx = (complex<Float>*)device_malloc(sizeof(complex<Float>) * threads); #else gx = (complex<Float>*)device_malloc(sizeof(complex<Float>) * threads * Elems); #endif BindTex(delta, gx, sizeof(complex<Float>) * threads * Elems); } void free(){ UnBindTex(delta, gx); device_free(invpsq); device_free(delta); device_free(gx); } }; template <typename Float> __global__ void kernel_gauge_set_invpsq(GaugeFixArg<Float> arg){ int id = blockIdx.x * blockDim.x + threadIdx.x; if ( id >= arg.threads ) return; int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]); int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0]; int x3 = (id / arg.X[2]) % arg.X[3]; int x2 = id % arg.X[2]; //id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2]; Float sx = sin( (Float)x0 * FL_UNITARIZE_PI / (Float)arg.X[0]); Float sy = sin( (Float)x1 * FL_UNITARIZE_PI / (Float)arg.X[1]); Float sz = sin( (Float)x2 * FL_UNITARIZE_PI / (Float)arg.X[2]); Float st = sin( (Float)x3 * FL_UNITARIZE_PI / (Float)arg.X[3]); Float sinsq = sx * sx + sy * sy + sz * sz + st * st; Float prcfact = 0.0; //The FFT normalization is done here if ( sinsq > 0.00001 ) prcfact = 4.0 / (sinsq * (Float)arg.threads); arg.invpsq[id] = prcfact; } template<typename Float> class GaugeFixSETINVPSP : Tunable { GaugeFixArg<Float> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixSETINVPSP(GaugeFixArg<Float> &arg) : arg(arg) { } ~GaugeFixSETINVPSP () { } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_set_invpsq<Float><< < tp.grid, tp.block, 0, stream >> > (arg); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return 21 * arg.threads; } long long bytes() const { return sizeof(Float) * arg.threads; } }; template<typename Float> __global__ void kernel_gauge_mult_norm_2D(GaugeFixArg<Float> arg){ int id = blockIdx.x * blockDim.x + threadIdx.x; if ( id < arg.threads ) arg.gx[id] = arg.gx[id] * arg.invpsq[id]; } template<typename Float> class GaugeFixINVPSP : Tunable { GaugeFixArg<Float> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixINVPSP(GaugeFixArg<Float> &arg) : arg(arg){ cudaFuncSetCacheConfig( kernel_gauge_mult_norm_2D<Float>, cudaFuncCachePreferL1); } ~GaugeFixINVPSP () { } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_mult_norm_2D<Float><< < tp.grid, tp.block, 0, stream >> > (arg); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } void preTune(){ //since delta contents are irrelevant at this point, we can swap gx with delta complex<Float> *tmp = arg.gx; arg.gx = arg.delta; arg.delta = tmp; } void postTune(){ arg.gx = arg.delta; } long long flops() const { return 2LL * arg.threads; } long long bytes() const { return 5LL * sizeof(Float) * arg.threads; } }; template <typename Float> __host__ __device__ inline void reunit_link( Matrix<complex<Float>,3> &U ){ complex<Float> t2((Float)0.0, (Float)0.0); Float t1 = 0.0; //first normalize first row //sum of squares of row #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += norm(U(0,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(0,c) *= t1; //6 #pragma unroll for ( int c = 0; c < 3; c++ ) t2 += conj(U(0,c)) * U(1,c); //24 #pragma unroll for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c); //24 //normalize second row //sum of squares of row t1 = 0.0; #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += norm(U(1,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(1, c) *= t1; //6 //Reconstruct lat row U(2,0) = conj(U(0,1) * U(1,2) - U(0,2) * U(1,1)); U(2,1) = conj(U(0,2) * U(1,0) - U(0,0) * U(1,2)); U(2,2) = conj(U(0,0) * U(1,1) - U(0,1) * U(1,0)); //42 //T=130 } #ifdef GAUGEFIXING_DONT_USE_GX template <typename Float, typename Gauge> __global__ void kernel_gauge_fix_U_EO_NEW( GaugeFixArg<Float> arg, Gauge dataOr, Float half_alpha){ int id = threadIdx.x + blockIdx.x * blockDim.x; int parity = threadIdx.y; if ( id >= arg.threads/2 ) return; typedef complex<Float> Cmplx; int x[4]; getCoords(x, id, arg.X, parity); int idx = ((x[3] * arg.X[2] + x[2]) * arg.X[1] + x[1]) * arg.X[0] + x[0]; Matrix<Cmplx,3> de; //Read Delta #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads); de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads); de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads); de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads); de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads); de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads); #else de(0,0) = arg.delta[idx + 0 * arg.threads]; de(0,1) = arg.delta[idx + 1 * arg.threads]; de(0,2) = arg.delta[idx + 2 * arg.threads]; de(1,1) = arg.delta[idx + 3 * arg.threads]; de(1,2) = arg.delta[idx + 4 * arg.threads]; de(2,2) = arg.delta[idx + 5 * arg.threads]; #endif de(1,0) = Cmplx(-de(0,1).x, de(0,1).y); de(2,0) = Cmplx(-de(0,2).x, de(0,2).y); de(2,1) = Cmplx(-de(1,2).x, de(1,2).y); Matrix<Cmplx,3> g; setIdentity(&g); g += de * half_alpha; //36 reunit_link<Float>( g ); //130 for ( int mu = 0; mu < 4; mu++ ) { Matrix<Cmplx,3> U; Matrix<Cmplx,3> g0; dataOr.load((Float*)(U.data),id, mu, parity); U = g * U; //198 idx = linkNormalIndexP1(x,arg.X,mu); //Read Delta #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads); de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads); de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads); de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads); de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads); de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads); #else de(0,0) = arg.delta[idx + 0 * arg.threads]; de(0,1) = arg.delta[idx + 1 * arg.threads]; de(0,2) = arg.delta[idx + 2 * arg.threads]; de(1,1) = arg.delta[idx + 3 * arg.threads]; de(1,2) = arg.delta[idx + 4 * arg.threads]; de(2,2) = arg.delta[idx + 5 * arg.threads]; #endif de(1,0) = Cmplx(-de(0,1).x, de(0,1).y); de(2,0) = Cmplx(-de(0,2).x, de(0,2).y); de(2,1) = Cmplx(-de(1,2).x, de(1,2).y); setIdentity(&g0); g0 += de * half_alpha; //36 reunit_link<Float>( g0 ); //130 U = U * conj(g0); //198 dataOr.save((Float*)(U.data),id, mu, parity); } } template<typename Float, typename Gauge> class GaugeFixNEW : TunableLocalParity { GaugeFixArg<Float> arg; Float half_alpha; Gauge dataOr; mutable char aux_string[128]; // used as a label in the autotuner private: // since GaugeFixArg is used by other kernels that don't use // tunableLocalParity, arg.threads stores Volume and not VolumeCB // so we need to divide by two unsigned int minThreads() const { return arg.threads/2; } public: GaugeFixNEW(Gauge & dataOr, GaugeFixArg<Float> &arg, Float alpha) : dataOr(dataOr), arg(arg) { half_alpha = alpha * 0.5; cudaFuncSetCacheConfig( kernel_gauge_fix_U_EO_NEW<Float, Gauge>, cudaFuncCachePreferL1); } ~GaugeFixNEW () { } void setAlpha(Float alpha){ half_alpha = alpha * 0.5; } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_fix_U_EO_NEW<Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr, half_alpha); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { return 2414LL * arg.threads; //Not accounting here the reconstruction of the gauge if 12 or 8!!!!!! } long long bytes() const { return ( dataOr.Bytes() * 4LL + 5 * 12LL * sizeof(Float)) * arg.threads; } }; #else template <int Elems, typename Float> __global__ void kernel_gauge_GX(GaugeFixArg<Float> arg, Float half_alpha){ int id = blockIdx.x * blockDim.x + threadIdx.x; if ( id >= arg.threads ) return; typedef complex<Float> Cmplx; Matrix<Cmplx,3> de; //Read Delta #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX de(0,0) = TEXTURE_DELTA<Cmplx>(id); de(0,1) = TEXTURE_DELTA<Cmplx>(id + arg.threads); de(0,2) = TEXTURE_DELTA<Cmplx>(id + 2 * arg.threads); de(1,1) = TEXTURE_DELTA<Cmplx>(id + 3 * arg.threads); de(1,2) = TEXTURE_DELTA<Cmplx>(id + 4 * arg.threads); de(2,2) = TEXTURE_DELTA<Cmplx>(id + 5 * arg.threads); #else de(0,0) = arg.delta[id]; de(0,1) = arg.delta[id + arg.threads]; de(0,2) = arg.delta[id + 2 * arg.threads]; de(1,1) = arg.delta[id + 3 * arg.threads]; de(1,2) = arg.delta[id + 4 * arg.threads]; de(2,2) = arg.delta[id + 5 * arg.threads]; #endif de(1,0) = makeComplex(-de(0,1).x, de(0,1).y); de(2,0) = makeComplex(-de(0,2).x, de(0,2).y); de(2,1) = makeComplex(-de(1,2).x, de(1,2).y); Matrix<Cmplx,3> g; setIdentity(&g); g += de * half_alpha; //36 reunit_link<Float>( g ); //130 //gx is represented in even/odd order //normal lattice index to even/odd index int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]); int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2]; int x1 = (id / arg.X[0]) % arg.X[1]; int x0 = id % arg.X[0]; id = (x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]) >> 1; id += ((x0 + x1 + x2 + x3) & 1 ) * arg.threads / 2; for ( int i = 0; i < Elems; i++ ) arg.gx[id + i * arg.threads] = g.data[i]; //T=166 for Elems 9 //T=208 for Elems 6 } template<int Elems, typename Float> class GaugeFix_GX : Tunable { GaugeFixArg<Float> arg; Float half_alpha; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFix_GX(GaugeFixArg<Float> &arg, Float alpha) : arg(arg) { half_alpha = alpha * 0.5; cudaFuncSetCacheConfig( kernel_gauge_GX<Elems, Float>, cudaFuncCachePreferL1); } ~GaugeFix_GX () { } void setAlpha(Float alpha){ half_alpha = alpha * 0.5; } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_GX<Elems, Float><< < tp.grid, tp.block, 0, stream >> > (arg, half_alpha); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { if ( Elems == 6 ) return 208LL * arg.threads; else return 166LL * arg.threads; } long long bytes() const { return 4LL * Elems * sizeof(Float) * arg.threads; } }; template <int Elems, typename Float, typename Gauge> __global__ void kernel_gauge_fix_U_EO( GaugeFixArg<Float> arg, Gauge dataOr){ int idd = threadIdx.x + blockIdx.x * blockDim.x; if ( idd >= arg.threads ) return; int parity = 0; int id = idd; if ( idd >= arg.threads / 2 ) { parity = 1; id -= arg.threads / 2; } typedef complex<Float> Cmplx; Matrix<Cmplx,3> g; //for(int i = 0; i < Elems; i++) g.data[i] = arg.gx[idd + i * arg.threads]; for ( int i = 0; i < Elems; i++ ) { #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX g.data[i] = TEXTURE_GX<Cmplx>(idd + i * arg.threads); #else g.data[i] = arg.gx[idd + i * arg.threads]; #endif } if ( Elems == 6 ) { g(2,0) = conj(g(0,1) * g(1,2) - g(0,2) * g(1,1)); g(2,1) = conj(g(0,2) * g(1,0) - g(0,0) * g(1,2)); g(2,2) = conj(g(0,0) * g(1,1) - g(0,1) * g(1,0)); //42 } int x[4]; getCoords(x, id, arg.X, parity); for ( int mu = 0; mu < 4; mu++ ) { Matrix<Cmplx,3> U; Matrix<Cmplx,3> g0; dataOr.load((Float*)(U.data),id, mu, parity); U = g * U; //198 int idm1 = linkIndexP1(x,arg.X,mu); idm1 += (1 - parity) * arg.threads / 2; //for(int i = 0; i < Elems; i++) g0.data[i] = arg.gx[idm1 + i * arg.threads]; for ( int i = 0; i < Elems; i++ ) { #ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX g0.data[i] = TEXTURE_GX<Cmplx>(idm1 + i * arg.threads); #else g0.data[i] = arg.gx[idm1 + i * arg.threads]; #endif } if ( Elems == 6 ) { g0(2,0) = conj(g0(0,1) * g0(1,2) - g0(0,2) * g0(1,1)); g0(2,1) = conj(g0(0,2) * g0(1,0) - g0(0,0) * g0(1,2)); g0(2,2) = conj(g0(0,0) * g0(1,1) - g0(0,1) * g0(1,0)); //42 } U = U * conj(g0); //198 dataOr.save((Float*)(U.data),id, mu, parity); } //T=42+4*(198*2+42) Elems=6 //T=4*(198*2) Elems=9 //Not accounting here the reconstruction of the gauge if 12 or 8!!!!!! } template<int Elems, typename Float, typename Gauge> class GaugeFix : Tunable { GaugeFixArg<Float> arg; Gauge dataOr; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFix(Gauge & dataOr, GaugeFixArg<Float> &arg) : dataOr(dataOr), arg(arg) { cudaFuncSetCacheConfig( kernel_gauge_fix_U_EO<Elems, Float, Gauge>, cudaFuncCachePreferL1); } ~GaugeFix () { } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); kernel_gauge_fix_U_EO<Elems, Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { if ( Elems == 6 ) return 1794LL * arg.threads; else return 1536LL * arg.threads; //Not accounting here the reconstruction of the gauge if 12 or 8!!!!!! } long long bytes() const { return 26LL * Elems * sizeof(Float) * arg.threads; } }; #endif //GAUGEFIXING_DONT_USE_GX template<int Elems, typename Float, typename Gauge, int gauge_dir> void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, \ const int Nsteps, const int verbose_interval, \ const Float alpha0, const int autotune, const double tolerance, \ const int stopWtheta) { TimeProfile profileInternalGaugeFixFFT("InternalGaugeFixQudaFFT", false); profileInternalGaugeFixFFT.TPSTART(QUDA_PROFILE_COMPUTE); Float alpha = alpha0; std::cout << "\tAlpha parameter of the Steepest Descent Method: " << alpha << std::endl; if ( autotune ) std::cout << "\tAuto tune active: yes" << std::endl; else std::cout << "\tAuto tune active: no" << std::endl; std::cout << "\tStop criterium: " << tolerance << std::endl; if ( stopWtheta ) std::cout << "\tStop criterium method: theta" << std::endl; else std::cout << "\tStop criterium method: Delta" << std::endl; std::cout << "\tMaximum number of iterations: " << Nsteps << std::endl; std::cout << "\tPrint convergence results at every " << verbose_interval << " steps" << std::endl; unsigned int delta_pad = data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3]; int4 size = make_int4( data.X()[0], data.X()[1], data.X()[2], data.X()[3] ); cufftHandle plan_xy; cufftHandle plan_zt; GaugeFixArg<Float> arg(data, Elems); SetPlanFFT2DMany( plan_zt, size, 0, arg.delta); //for space and time ZT SetPlanFFT2DMany( plan_xy, size, 1, arg.delta); //with space only XY GaugeFixFFTRotateArg<Float> arg_rotate(data); GaugeFixFFTRotate<Float> GFRotate(arg_rotate); GaugeFixSETINVPSP<Float> setinvpsp(arg); setinvpsp.apply(0); GaugeFixINVPSP<Float> invpsp(arg); #ifdef GAUGEFIXING_DONT_USE_GX //without using GX, gx will be created only for plane rotation but with less size GaugeFixNEW<Float, Gauge> gfixNew(dataOr, arg, alpha); #else //using GX GaugeFix_GX<Elems, Float> calcGX(arg, alpha); GaugeFix<Elems, Float, Gauge> gfix(dataOr, arg); #endif GaugeFixQualityArg<Float, Gauge> argQ(dataOr, data, arg.delta); GaugeFixQuality<Elems, Float, Gauge, gauge_dir> gfixquality(argQ); gfixquality.apply(0); double action0 = argQ.getAction(); printf("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta()); double diff = 0.0; int iter = 0; for ( iter = 0; iter < Nsteps; iter++ ) { for ( int k = 0; k < 6; k++ ) { //------------------------------------------------------------------------ // Set a pointer do the element k in lattice volume // each element is stored with stride lattice volume // it uses gx as temporary array!!!!!! //------------------------------------------------------------------------ complex<Float> *_array = arg.delta + k * delta_pad; ////// 2D FFT + 2D FFT //------------------------------------------------------------------------ // Perform FFT on xy plane //------------------------------------------------------------------------ ApplyFFT(plan_xy, _array, arg.gx, CUFFT_FORWARD); //------------------------------------------------------------------------ // Rotate hypercube, xyzt -> ztxy //------------------------------------------------------------------------ GFRotate.setDirection(0, arg.gx, _array); GFRotate.apply(0); //------------------------------------------------------------------------ // Perform FFT on zt plane //------------------------------------------------------------------------ ApplyFFT(plan_zt, _array, arg.gx, CUFFT_FORWARD); //------------------------------------------------------------------------ // Normalize FFT and apply pmax^2/p^2 //------------------------------------------------------------------------ invpsp.apply(0); //------------------------------------------------------------------------ // Perform IFFT on zt plane //------------------------------------------------------------------------ ApplyFFT(plan_zt, arg.gx, _array, CUFFT_INVERSE); //------------------------------------------------------------------------ // Rotate hypercube, ztxy -> xyzt //------------------------------------------------------------------------ GFRotate.setDirection(1, _array, arg.gx); GFRotate.apply(0); //------------------------------------------------------------------------ // Perform IFFT on xy plane //------------------------------------------------------------------------ ApplyFFT(plan_xy, arg.gx, _array, CUFFT_INVERSE); } #ifdef GAUGEFIXING_DONT_USE_GX //------------------------------------------------------------------------ // Apply gauge fix to current gauge field //------------------------------------------------------------------------ gfixNew.apply(0); #else //------------------------------------------------------------------------ // Calculate g(x) //------------------------------------------------------------------------ calcGX.apply(0); //------------------------------------------------------------------------ // Apply gauge fix to current gauge field //------------------------------------------------------------------------ gfix.apply(0); #endif //------------------------------------------------------------------------ // Measure gauge quality and recalculate new Delta(x) //------------------------------------------------------------------------ gfixquality.apply(0); double action = argQ.getAction(); diff = abs(action0 - action); if ((iter % verbose_interval) == (verbose_interval - 1)) printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff); if ( autotune && ((action - action0) < -1e-14) ) { if ( alpha > 0.01 ) { alpha = 0.95 * alpha; #ifdef GAUGEFIXING_DONT_USE_GX gfixNew.setAlpha(alpha); #else calcGX.setAlpha(alpha); #endif printf(">>>>>>>>>>>>>> Warning: changing alpha down -> %.4e\n", alpha ); } } //------------------------------------------------------------------------ // Check gauge fix quality criterium //------------------------------------------------------------------------ if ( stopWtheta ) { if ( argQ.getTheta() < tolerance ) break; } else { if ( diff < tolerance ) break; } action0 = action; } if ((iter % verbose_interval) != 0 ) printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter, argQ.getAction(), argQ.getTheta(), diff); // Reunitarize at end const double unitarize_eps = 1e-14; const double max_error = 1e-10; const int reunit_allow_svd = 1; const int reunit_svd_only = 0; const double svd_rel_error = 1e-6; const double svd_abs_error = 1e-6; setUnitarizeLinksConstants(unitarize_eps, max_error, reunit_allow_svd, reunit_svd_only, svd_rel_error, svd_abs_error); int num_failures = 0; int* num_failures_dev = static_cast<int*>(pool_device_malloc(sizeof(int))); cudaMemset(num_failures_dev, 0, sizeof(int)); unitarizeLinks(data, data, num_failures_dev); qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost); pool_device_free(num_failures_dev); if ( num_failures > 0 ) { errorQuda("Error in the unitarization\n"); exit(1); } // end reunitarize arg.free(); CUFFT_SAFE_CALL(cufftDestroy(plan_zt)); CUFFT_SAFE_CALL(cufftDestroy(plan_xy)); checkCudaError(); qudaDeviceSynchronize(); profileInternalGaugeFixFFT.TPSTOP(QUDA_PROFILE_COMPUTE); if (getVerbosity() > QUDA_SUMMARIZE){ double secs = profileInternalGaugeFixFFT.Last(QUDA_PROFILE_COMPUTE); double fftflop = 5.0 * (log2((double)( data.X()[0] * data.X()[1]) ) + log2( (double)(data.X()[2] * data.X()[3] ))); fftflop *= (double)( data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3] ); double gflops = setinvpsp.flops() + gfixquality.flops(); double gbytes = setinvpsp.bytes() + gfixquality.bytes(); double flop = invpsp.flops() * Elems; double byte = invpsp.bytes() * Elems; flop += (GFRotate.flops() + fftflop) * Elems * 2; byte += GFRotate.bytes() * Elems * 4; //includes FFT reads, assuming 1 read and 1 write per site #ifdef GAUGEFIXING_DONT_USE_GX flop += gfixNew.flops(); byte += gfixNew.bytes(); #else flop += calcGX.flops(); byte += calcGX.bytes(); flop += gfix.flops(); byte += gfix.bytes(); #endif flop += gfixquality.flops(); byte += gfixquality.bytes(); gflops += flop * iter; gbytes += byte * iter; gflops += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; //Reunitarize at end gbytes += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes() ; //Reunitarize at end gflops = (gflops * 1e-9) / (secs); gbytes = gbytes / (secs * 1e9); printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes); } } template<int Elems, typename Float, typename Gauge> void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, const int gauge_dir, \ const int Nsteps, const int verbose_interval, const Float alpha, const int autotune, \ const double tolerance, const int stopWtheta) { if ( gauge_dir != 3 ) { printf("Starting Landau gauge fixing with FFTs...\n"); gaugefixingFFT<Elems, Float, Gauge, 4>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else { printf("Starting Coulomb gauge fixing with FFTs...\n"); gaugefixingFFT<Elems, Float, Gauge, 3>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } } template<typename Float> void gaugefixingFFT( cudaGaugeField& data, const int gauge_dir, \ const int Nsteps, const int verbose_interval, const Float alpha, const int autotune, \ const double tolerance, const int stopWtheta) { // Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12 // Need to fix this!! //9 and 6 means the number of complex elements used to store g(x) and Delta(x) if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { //printfQuda("QUDA_RECONSTRUCT_NO\n"); typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; gaugefixingFFT<9, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { //printfQuda("QUDA_RECONSTRUCT_12\n"); typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { //printfQuda("QUDA_RECONSTRUCT_8\n"); typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } #endif // GPU_GAUGE_ALG /** * @brief Gauge fixing with Steepest descent method with FFTs with support for single GPU only. * @param[in,out] data, quda gauge field * @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing * @param[in] Nsteps, maximum number of steps to perform gauge fixing * @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this * @param[in] alpha, gauge fixing parameter of the method, most common value is 0.08 * @param[in] autotune, 1 to autotune the method, i.e., if the Fg inverts its tendency we decrease the alpha value * @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps * @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value */ void gaugefixingFFT( cudaGaugeField& data, const int gauge_dir, \ const int Nsteps, const int verbose_interval, const double alpha, const int autotune, \ const double tolerance, const int stopWtheta) { #ifdef GPU_GAUGE_ALG #ifdef MULTI_GPU if(comm_dim_partitioned(0) || comm_dim_partitioned(1) || comm_dim_partitioned(2) || comm_dim_partitioned(3)) errorQuda("Gauge Fixing with FFTs in multi-GPU support NOT implemented yet!\n"); #endif if ( data.Precision() == QUDA_HALF_PRECISION ) { errorQuda("Half precision not supported\n"); } if ( data.Precision() == QUDA_SINGLE_PRECISION ) { gaugefixingFFT<float> (data, gauge_dir, Nsteps, verbose_interval, (float)alpha, autotune, tolerance, stopWtheta); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { gaugefixingFFT<double>(data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta); } else { errorQuda("Precision %d not supported", data.Precision()); } #else errorQuda("Gauge fixing has bot been built"); #endif } }
8ff633027e92e9de0f23d1b1d8aeba30293bd076.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <layers/fully_connected_layer.hpp> #include <linalg/matrix_vector_op.cuh> #include <linalg/reduce.cuh> #include <utils.cuh> #include <utils.hpp> #include <vector> namespace HugeCTR { FullyConnectedLayer<float>::FullyConnectedLayer( const std::shared_ptr<BufferBlock2<float>>& weight_buff, const std::shared_ptr<BufferBlock2<float>>& wgrad_buff, const Tensor2<float>& in_tensor, const Tensor2<float>& out_tensor, const std::shared_ptr<GPUResource>& gpu_resource, bool use_mixed_precision, bool enable_tf32_compute, std::vector<Initializer_t> initializer_types) : Layer(gpu_resource, initializer_types), use_mixed_precision_(use_mixed_precision), enable_tf32_compute_(enable_tf32_compute) { try { // check the in_tensor and out_tensor const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); // 1. two dim? if (in_tensor_dim.size() != 2 || out_tensor_dim.size() != 2) { CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions"); } // 2. dim match? size_t m = in_tensor_dim[0]; size_t n = out_tensor_dim[1]; size_t k = in_tensor_dim[1]; size_t m_ck = out_tensor_dim[0]; if (m != m_ck) { CK_THROW_(Error_t::WrongInput, "size of input / output tensor doesn't match"); } std::vector<size_t> weight_dim = {k, n}; std::vector<size_t> bias_dim = {1, n}; { Tensor2<float> tensor; weight_buff->reserve(weight_dim, &tensor); weights_.push_back(tensor); } { Tensor2<float> tensor; weight_buff->reserve(bias_dim, &tensor); weights_.push_back(tensor); } { Tensor2<float> tensor; wgrad_buff->reserve(weight_dim, &tensor); wgrad_.push_back(tensor); } { Tensor2<float> tensor; wgrad_buff->reserve(bias_dim, &tensor); wgrad_.push_back(tensor); } in_tensors_.push_back(in_tensor); out_tensors_.push_back(out_tensor); // Where should we create this cuBLAS handle? } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } void __global__ add_bias_kernel_row(float* data, const float* bias, const int m, const int n) { int offset = blockIdx.x * n; for (int tid = threadIdx.x; tid < n; tid += blockDim.x) { data[offset + tid] += bias[tid]; } } void __global__ add_bias_kernel_col(float* data, const float* bias, const int m, const int n) { int offset = blockIdx.x * m; float b = bias[blockIdx.x]; for (int tid = threadIdx.x; tid < m; tid += blockDim.x) { data[offset + tid] += b; } } void add_bias(float* data, const float* bias, const int m, const int n, bool row_major, hipStream_t stream) { if (row_major) { dim3 grid(m); dim3 block(min(n, 1024)); hipLaunchKernelGGL(( add_bias_kernel_row), dim3(grid), dim3(block), 0, stream, data, bias, m, n); } else { dim3 grid(n); dim3 block(min(m, 1024)); hipLaunchKernelGGL(( add_bias_kernel_col), dim3(grid), dim3(block), 0, stream, data, bias, m, n); } #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } void FullyConnectedLayer<float>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); Tensor2<float>& in_tensor = get_in_tensors(is_train)[0]; Tensor2<float>& out_tensor = out_tensors_[0]; float* weight = weights_[0].get_ptr(); float* bias = weights_[1].get_ptr(); float* in = in_tensor.get_ptr(); float* out = out_tensor.get_ptr(); const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); int m, n, k; m = in_tensor_dim[0]; n = out_tensor_dim[1]; k = in_tensor_dim[1]; float alpha = 1.0f, beta = 0.0f; hipblasComputeType_t compute_type = enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F; CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, weight, HIP_R_32F, n, in, HIP_R_32F, k, &beta, out, HIP_R_32F, n, compute_type, falgo_)); add_bias(out, bias, m, n, true, get_gpu().get_stream()); } void FullyConnectedLayer<float>::bprop() { CudaDeviceContext context(get_device_id()); Tensor2<float>& in_tensor = get_in_tensors(true)[0]; Tensor2<float>& out_tensor = out_tensors_[0]; float* wgrad = wgrad_[0].get_ptr(); float* bias_grad = wgrad_[1].get_ptr(); float* weight = weights_[0].get_ptr(); float* in = in_tensor.get_ptr(); float* out = out_tensor.get_ptr(); const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); int m, n, k; m = in_tensor_dim[0]; n = out_tensor_dim[1]; k = in_tensor_dim[1]; float alpha = 1.0f, beta_w = 1.0f, beta_x = 0.0f; hipblasComputeType_t compute_type = enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F; // gradient respect to W CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m, &alpha, out, HIP_R_32F, n, in, HIP_R_32F, k, &beta_w, wgrad, HIP_R_32F, n, compute_type, balgo_W_)); // gradient respect to Xn CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n, &alpha, weight, HIP_R_32F, n, out, HIP_R_32F, n, &beta_x, in, HIP_R_32F, k, compute_type, balgo_Xn_)); MLCommon::LinAlg::reduce(bias_grad, out, m, n, float(0), false, true, get_gpu().get_stream(), true); } void FullyConnectedLayer<float>::search_algorithm() { // Set to the CUDA device where this layer assigned to CudaDeviceContext context(get_device_id()); const int repeat_num = 100; // Device Tensors to be used Tensor2<float>& in_tensor = get_in_tensors(true)[0]; Tensor2<float>& out_tensor = out_tensors_[0]; float* weight = weights_[0].get_ptr(); float* in = in_tensor.get_ptr(); float* out = out_tensor.get_ptr(); float* wgrad = wgrad_[0].get_ptr(); // Tensor dim const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); int m, n, k; m = in_tensor_dim[0]; n = out_tensor_dim[1]; k = in_tensor_dim[1]; // Record time for each algorithm float shortestTime = 100000000.0; float time; hipEvent_t start, stop; CK_CUDA_THROW_(hipEventCreate(&start)); CK_CUDA_THROW_(hipEventCreate(&stop)); // cublas ret status hipblasStatus_t status; // Start, end for search int startAlgo, endAlgo; if (use_mixed_precision_) { startAlgo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP; endAlgo = (int)CUBLAS_GEMM_ALGO15_TENSOR_OP; } else { startAlgo = (int)HIPBLAS_GEMM_DEFAULT; endAlgo = (int)CUBLAS_GEMM_ALGO23; } hipblasComputeType_t compute_type = enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F; // Search all the algorithm for fprop for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { float alpha = 1.0f, beta = 0.0f; // Record start event CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream())); for (int i = 0; i < repeat_num; ++i) { status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, weight, HIP_R_32F, n, in, HIP_R_32F, k, &beta, out, HIP_R_32F, n, compute_type, static_cast<hipblasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(hipEventSynchronize(stop)); CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != HIPBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; falgo_ = static_cast<hipblasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = 100000000.0; // Search all the algorithm for bprop_W for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { float alpha = 1.0f, beta_w = 1.0f; // Record start event CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream())); for (int i = 0; i < repeat_num; ++i) { status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m, &alpha, out, HIP_R_32F, n, in, HIP_R_32F, k, &beta_w, wgrad, HIP_R_32F, n, compute_type, static_cast<hipblasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(hipEventSynchronize(stop)); CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != HIPBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_W_ = static_cast<hipblasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = 100000000.0; // Search all the algorithm for bprop_Xn for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { float alpha = 1.0f, beta_x = 0.0f; // Record start event CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream())); for (int i = 0; i < repeat_num; ++i) { status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n, &alpha, weight, HIP_R_32F, n, out, HIP_R_32F, n, &beta_x, in, HIP_R_32F, k, compute_type, static_cast<hipblasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(hipEventSynchronize(stop)); CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != HIPBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_Xn, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_Xn_ = static_cast<hipblasGemmAlgo_t>(testAlgo); } } // Print selection information // printf("The algorithm selection for fprop, bprop_W and bprop_Xn are: %d, %d and %d.\n", // (int)falgo_, (int)balgo_W_, (int)balgo_Xn_); // Output msg // MESSAGE_("The fully-connected layer has finished choosing the algorithm for cublas Gemm."); // Clean-up CK_CUDA_THROW_(hipEventDestroy(start)); CK_CUDA_THROW_(hipEventDestroy(stop)); } std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_uniform_initializer( const int index) { const Tensor2<float>& in_tensor = get_in_tensors(true)[0]; const Tensor2<float>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim); return std::make_unique<UniformDataSimulator>(-1 * limit, limit); } std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_xavier_uniform_initializer( const int index) { const Tensor2<float>& in_tensor = get_in_tensors(true)[0]; const Tensor2<float>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_xavier_norm_initializer( const int index) { const Tensor2<float>& in_tensor = get_in_tensors(true)[0]; const Tensor2<float>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_default_initializer( const int index) { const Tensor2<float>& in_tensor = get_in_tensors(true)[0]; const Tensor2<float>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; std::unique_ptr<DataSimulator> simu(nullptr); if (0 == index) { simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, bottom_dim, top_dim)); } else if (1 == index) { float stddev = sqrt(1.f / top_dim); simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev)); } else { CK_THROW_(Error_t::OutOfBound, "index != {0, 1}."); } return simu; } template class FullyConnectedLayer<float>; } // namespace HugeCTR
8ff633027e92e9de0f23d1b1d8aeba30293bd076.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <layers/fully_connected_layer.hpp> #include <linalg/matrix_vector_op.cuh> #include <linalg/reduce.cuh> #include <utils.cuh> #include <utils.hpp> #include <vector> namespace HugeCTR { FullyConnectedLayer<float>::FullyConnectedLayer( const std::shared_ptr<BufferBlock2<float>>& weight_buff, const std::shared_ptr<BufferBlock2<float>>& wgrad_buff, const Tensor2<float>& in_tensor, const Tensor2<float>& out_tensor, const std::shared_ptr<GPUResource>& gpu_resource, bool use_mixed_precision, bool enable_tf32_compute, std::vector<Initializer_t> initializer_types) : Layer(gpu_resource, initializer_types), use_mixed_precision_(use_mixed_precision), enable_tf32_compute_(enable_tf32_compute) { try { // check the in_tensor and out_tensor const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); // 1. two dim? if (in_tensor_dim.size() != 2 || out_tensor_dim.size() != 2) { CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions"); } // 2. dim match? size_t m = in_tensor_dim[0]; size_t n = out_tensor_dim[1]; size_t k = in_tensor_dim[1]; size_t m_ck = out_tensor_dim[0]; if (m != m_ck) { CK_THROW_(Error_t::WrongInput, "size of input / output tensor doesn't match"); } std::vector<size_t> weight_dim = {k, n}; std::vector<size_t> bias_dim = {1, n}; { Tensor2<float> tensor; weight_buff->reserve(weight_dim, &tensor); weights_.push_back(tensor); } { Tensor2<float> tensor; weight_buff->reserve(bias_dim, &tensor); weights_.push_back(tensor); } { Tensor2<float> tensor; wgrad_buff->reserve(weight_dim, &tensor); wgrad_.push_back(tensor); } { Tensor2<float> tensor; wgrad_buff->reserve(bias_dim, &tensor); wgrad_.push_back(tensor); } in_tensors_.push_back(in_tensor); out_tensors_.push_back(out_tensor); // Where should we create this cuBLAS handle? } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } void __global__ add_bias_kernel_row(float* data, const float* bias, const int m, const int n) { int offset = blockIdx.x * n; for (int tid = threadIdx.x; tid < n; tid += blockDim.x) { data[offset + tid] += bias[tid]; } } void __global__ add_bias_kernel_col(float* data, const float* bias, const int m, const int n) { int offset = blockIdx.x * m; float b = bias[blockIdx.x]; for (int tid = threadIdx.x; tid < m; tid += blockDim.x) { data[offset + tid] += b; } } void add_bias(float* data, const float* bias, const int m, const int n, bool row_major, cudaStream_t stream) { if (row_major) { dim3 grid(m); dim3 block(min(n, 1024)); add_bias_kernel_row<<<grid, block, 0, stream>>>(data, bias, m, n); } else { dim3 grid(n); dim3 block(min(m, 1024)); add_bias_kernel_col<<<grid, block, 0, stream>>>(data, bias, m, n); } #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } void FullyConnectedLayer<float>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); Tensor2<float>& in_tensor = get_in_tensors(is_train)[0]; Tensor2<float>& out_tensor = out_tensors_[0]; float* weight = weights_[0].get_ptr(); float* bias = weights_[1].get_ptr(); float* in = in_tensor.get_ptr(); float* out = out_tensor.get_ptr(); const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); int m, n, k; m = in_tensor_dim[0]; n = out_tensor_dim[1]; k = in_tensor_dim[1]; float alpha = 1.0f, beta = 0.0f; cublasComputeType_t compute_type = enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F; CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, weight, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta, out, CUDA_R_32F, n, compute_type, falgo_)); add_bias(out, bias, m, n, true, get_gpu().get_stream()); } void FullyConnectedLayer<float>::bprop() { CudaDeviceContext context(get_device_id()); Tensor2<float>& in_tensor = get_in_tensors(true)[0]; Tensor2<float>& out_tensor = out_tensors_[0]; float* wgrad = wgrad_[0].get_ptr(); float* bias_grad = wgrad_[1].get_ptr(); float* weight = weights_[0].get_ptr(); float* in = in_tensor.get_ptr(); float* out = out_tensor.get_ptr(); const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); int m, n, k; m = in_tensor_dim[0]; n = out_tensor_dim[1]; k = in_tensor_dim[1]; float alpha = 1.0f, beta_w = 1.0f, beta_x = 0.0f; cublasComputeType_t compute_type = enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F; // gradient respect to W CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m, &alpha, out, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta_w, wgrad, CUDA_R_32F, n, compute_type, balgo_W_)); // gradient respect to Xn CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, k, m, n, &alpha, weight, CUDA_R_32F, n, out, CUDA_R_32F, n, &beta_x, in, CUDA_R_32F, k, compute_type, balgo_Xn_)); MLCommon::LinAlg::reduce(bias_grad, out, m, n, float(0), false, true, get_gpu().get_stream(), true); } void FullyConnectedLayer<float>::search_algorithm() { // Set to the CUDA device where this layer assigned to CudaDeviceContext context(get_device_id()); const int repeat_num = 100; // Device Tensors to be used Tensor2<float>& in_tensor = get_in_tensors(true)[0]; Tensor2<float>& out_tensor = out_tensors_[0]; float* weight = weights_[0].get_ptr(); float* in = in_tensor.get_ptr(); float* out = out_tensor.get_ptr(); float* wgrad = wgrad_[0].get_ptr(); // Tensor dim const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); int m, n, k; m = in_tensor_dim[0]; n = out_tensor_dim[1]; k = in_tensor_dim[1]; // Record time for each algorithm float shortestTime = 100000000.0; float time; cudaEvent_t start, stop; CK_CUDA_THROW_(cudaEventCreate(&start)); CK_CUDA_THROW_(cudaEventCreate(&stop)); // cublas ret status cublasStatus_t status; // Start, end for search int startAlgo, endAlgo; if (use_mixed_precision_) { startAlgo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP; endAlgo = (int)CUBLAS_GEMM_ALGO15_TENSOR_OP; } else { startAlgo = (int)CUBLAS_GEMM_DEFAULT; endAlgo = (int)CUBLAS_GEMM_ALGO23; } cublasComputeType_t compute_type = enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F; // Search all the algorithm for fprop for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { float alpha = 1.0f, beta = 0.0f; // Record start event CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream())); for (int i = 0; i < repeat_num; ++i) { status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, weight, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta, out, CUDA_R_32F, n, compute_type, static_cast<cublasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(cudaEventSynchronize(stop)); CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; falgo_ = static_cast<cublasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = 100000000.0; // Search all the algorithm for bprop_W for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { float alpha = 1.0f, beta_w = 1.0f; // Record start event CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream())); for (int i = 0; i < repeat_num; ++i) { status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m, &alpha, out, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta_w, wgrad, CUDA_R_32F, n, compute_type, static_cast<cublasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(cudaEventSynchronize(stop)); CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_W_ = static_cast<cublasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = 100000000.0; // Search all the algorithm for bprop_Xn for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { float alpha = 1.0f, beta_x = 0.0f; // Record start event CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream())); for (int i = 0; i < repeat_num; ++i) { status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, k, m, n, &alpha, weight, CUDA_R_32F, n, out, CUDA_R_32F, n, &beta_x, in, CUDA_R_32F, k, compute_type, static_cast<cublasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(cudaEventSynchronize(stop)); CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_Xn, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_Xn_ = static_cast<cublasGemmAlgo_t>(testAlgo); } } // Print selection information // printf("The algorithm selection for fprop, bprop_W and bprop_Xn are: %d, %d and %d.\n", // (int)falgo_, (int)balgo_W_, (int)balgo_Xn_); // Output msg // MESSAGE_("The fully-connected layer has finished choosing the algorithm for cublas Gemm."); // Clean-up CK_CUDA_THROW_(cudaEventDestroy(start)); CK_CUDA_THROW_(cudaEventDestroy(stop)); } std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_uniform_initializer( const int index) { const Tensor2<float>& in_tensor = get_in_tensors(true)[0]; const Tensor2<float>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim); return std::make_unique<UniformDataSimulator>(-1 * limit, limit); } std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_xavier_uniform_initializer( const int index) { const Tensor2<float>& in_tensor = get_in_tensors(true)[0]; const Tensor2<float>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_xavier_norm_initializer( const int index) { const Tensor2<float>& in_tensor = get_in_tensors(true)[0]; const Tensor2<float>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_default_initializer( const int index) { const Tensor2<float>& in_tensor = get_in_tensors(true)[0]; const Tensor2<float>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; std::unique_ptr<DataSimulator> simu(nullptr); if (0 == index) { simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, bottom_dim, top_dim)); } else if (1 == index) { float stddev = sqrt(1.f / top_dim); simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev)); } else { CK_THROW_(Error_t::OutOfBound, "index != {0, 1}."); } return simu; } template class FullyConnectedLayer<float>; } // namespace HugeCTR
4433af1f12ef71ef88d050b6ef1419e81e38489f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "eeg.h" // Based on code by: Mohammad Tahghighi int32_t abssum(int np, int32_t *x) { int i; int32_t s = 0; for (i = 0; i < np; i++) { s += abs(x[i]); } return s; } float average(int np, int32_t *x) { int i; int32_t s = 0; for (i = 0; i < np; i++) { s += x[i]; } return ((float) s) / ((float) np); } __global__ void gpu_average(int32_t *x, int32_t *blocksums) { //Note that his kernel is merely an example, and is not necessarily the optimal way to calculate the sum/average on a GPU device // The id of this thread within our block unsigned int threadId = threadIdx.x; // The global id if this thread. // Since we launch np threads in total, each id maps to one unique element in x unsigned int globalId = blockIdx.x*blockDim.x + threadIdx.x; // NOTE: for debugging you can print directly from the GPU device // however, if you print a lot, or the GPU encounters a serious error, this might fail // also performance is horrible, so make sure to disable it for benchmarking //printf("Hello from global thread %d, with local id %d\n",globalId,threadId); // Lets first copy the data from global GPU memory to shared memory // Shared memory is only accesible within a threadblock, but it is much faster to access than global memory once you have data in there // Note that by having the keyword "extern" and empty brackets [], the size of the array will be determined at runtime. // You will however have to pass the size in bytes as a 3rd argument to the kernel call (see the "sharedMemBytes" variable) // If you statically know the size of the shared memory array, it is probably faster to use that (Disclaimer: I did not test this claim) extern __shared__ int32_t blockData[]; blockData[threadId]=x[globalId]; // We synchronize the threads here, to make sure every thread has copied valid data from global to local memory // Otherwise we potentially risk accessing uninitialized data in the shared memory __syncthreads(); // The next step is summation of the elements in our blockData // The summation is done in a tree like fashion, as illustrated below // 0 1 2 3 4 5 6 7 (number of parallel summations) // |/ |/ |/ |/ (4) // 1 5 9 13 // |__/ |__/ (2) // 6 22 // |______/ (1) // 28 for(unsigned int s=1;s<blockDim.x;s*=2){ // Because the amount of work reduces, we use the threadId to determine which threads get to execute the summation // The other threads will idle in the meantime (They will be masked during the execution of the conditional-part) if (threadId % (2*s) == 0 ){ blockData[threadId] += blockData[threadId+s]; } // For each layer of the tree, we have to make sure all threads finish their computations // otherwise we could read unsummed results __syncthreads(); } //we let 1 selected thread per block write out our local sum to the global memory if(threadId==0){ #ifdef DEBUG //example debugging, print the partial sum of each block with the block id printf("GPU Block %d sum: %d\n",blockIdx.x, blockData[0]); #endif //write the sum of this block to the blocksums array blocksums[blockIdx.x]=blockData[0]; } //this will return the control to the CPU once all threads finish (reach this point) return; } __global__ void gpu_abssum(int32_t *x, int32_t *blocksums) { //Note that his kernel is merely an example, and is not necessarily the optimal way to calculate the sum/average on a GPU device // The id of this thread within our block unsigned int threadId = threadIdx.x; // The global id if this thread. // Since we launch np threads in total, each id maps to one unique element in x unsigned int globalId = blockIdx.x*blockDim.x + threadIdx.x; // NOTE: for debugging you can print directly from the GPU device // however, if you print a lot, or the GPU encounters a serious error, this might fail // also performance is horrible, so make sure to disable it for benchmarking //printf("Hello from global thread %d, with local id %d\n",globalId,threadId); // Lets first copy the data from global GPU memory to shared memory // Shared memory is only accesible within a threadblock, but it is much faster to access than global memory once you have data in there // Note that by having the keyword "extern" and empty brackets [], the size of the array will be determined at runtime. // You will however have to pass the size in bytes as a 3rd argument to the kernel call (see the "sharedMemBytes" variable) // If you statically know the size of the shared memory array, it is probably faster to use that (Disclaimer: I did not test this claim) extern __shared__ int32_t blockData[]; blockData[threadId]=abs(x[globalId]); // We synchronize the threads here, to make sure every thread has copied valid data from global to local memory // Otherwise we potentially risk accessing uninitialized data in the shared memory __syncthreads(); // The next step is summation of the elements in our blockData // The summation is done in a tree like fashion, as illustrated below // 0 1 2 3 4 5 6 7 (number of parallel summations) // |/ |/ |/ |/ (4) // 1 5 9 13 // |__/ |__/ (2) // 6 22 // |______/ (1) // 28 for(unsigned int s=1;s<blockDim.x;s*=2){ // Because the amount of work reduces, we use the threadId to determine which threads get to execute the summation // The other threads will idle in the meantime (They will be masked during the execution of the conditional-part) if (threadId % (2*s) == 0 ){ blockData[threadId] += blockData[threadId+s]; } // For each layer of the tree, we have to make sure all threads finish their computations // otherwise we could read unsummed results __syncthreads(); } //we let 1 selected thread per block write out our local sum to the global memory if(threadId==0){ #ifdef DEBUG //example debugging, print the partial sum of each block with the block id printf("GPU Block %d abssum: %d\n",blockIdx.x, blockData[0]); #endif //write the sum of this block to the blocksums array blocksums[blockIdx.x]=blockData[0]; } //this will return the control to the CPU once all threads finish (reach this point) return; } float variance(int np, int32_t *x, float avg) { int i; float s = 0; // Variance = Sum((x - avg)^2) for (i = 0; i < np; i++) { float tmp = x[i] - avg; s += (tmp * tmp); } return s / ((float) np); } float stddev(int np, int32_t *x, float avg) { // Stddev = sqrt(variance) float var = variance(np, x, avg); return sqrt(var); } int mean_crosstimes(int np, int32_t *x, float avg) { int i; bool negative = x[0] < avg; int count = 0; // Count number of zero crossings for (x - avg) for (i = 0; i < np; i++) { if (negative) { if (x[i] > avg) { negative = false; count++; } } else { if (x[i] < avg) { negative = true; count++; } } } return count; } void stafeature(int np, int32_t *x, float *sta) { // Returns sta = [mean, std, abssum, mean_crosstimes) #ifdef CPU_ONLY //original CPU code float avg = average(np, x); sta[0] = avg; sta[1] = stddev(np, x, avg); sta[2] = abssum(np, x); sta[3] = mean_crosstimes(np, x, avg); #else //GPU code /* * Our high level strategy to calculate the average in parallel in this example is to split the input array into into a number of blocks (numBlocks). * Each block contains thus np/numBlocks elements * These blocks will be mapped to the Streaming Multiprocessors of the GPU. * For each block we calculate the sum * Finally the sums of all the blocks are added on the CPU */ //NOTE: take care np is a multiple of numBlocks for this example. int numBlocks=32; int threadsPerBlock=np/numBlocks; //i.e., this should have remainder==0 //Ignore the next bit untill you are inspecting the gpu kernel code, then refer back to it. On the first read just ignore it ;) //Because in this setup the amount of required shared memory depends on np we assume it is only known at runtime. (although you can of course get it from the input and assume it fixed for this assigment) //the number of required shared memory bytes need to be passed as a 3rd argument to the kernel call later on //see the remarks in the gpu_average code int sharedMemBytes = threadsPerBlock*sizeof(int32_t); //variable for holding return values of cuda functions hipError_t err; //start by allocating room for array "x" on the global memory of the GPU int32_t* device_x; err=hipMalloc(&device_x, np*sizeof(int32_t)); //Here we check for errors of this cuda call //See eeg.h for the implementation of this error check (it's not a default cuda function) cudaCheckError(err); //also allocate room for the all the sums of the blocks int32_t* device_blocksums; //Note that room is allocated in global memory for the sum of *each* threadblock err=hipMalloc(&device_blocksums, numBlocks*sizeof(int32_t)); cudaCheckError(err); //also allocate room for the all the abssums of the blocks int32_t* device_blockabssums; //Note that room is allocated in global memory for the sum of *each* threadblock err=hipMalloc(&device_blockabssums, numBlocks*sizeof(int32_t)); cudaCheckError(err); //Now copy array "x" from the CPU to the GPU err=hipMemcpy(device_x,x, np*sizeof(int32_t), hipMemcpyHostToDevice); cudaCheckError(err); //Compute the average on the GPU hipLaunchKernelGGL(( gpu_average), dim3(numBlocks),dim3(threadsPerBlock), sharedMemBytes, 0, device_x, device_blocksums); //Compute the abssum on the GPU hipLaunchKernelGGL(( gpu_abssum), dim3(numBlocks),dim3(threadsPerBlock), sharedMemBytes, 0, device_x, device_blockabssums); //We use "peekatlasterror" since a kernel launch does not return a hipError_t to check for errors //cudaCheckError(hipPeekAtLastError()); //copy the sums of each block back from GPU global memory to CPU memory int32_t blocksums[numBlocks]; err=hipMemcpy(blocksums, device_blocksums, numBlocks*sizeof(int32_t), hipMemcpyDeviceToHost); cudaCheckError(err); //copy the abssums of each block back from GPU global memory to CPU memory int32_t blockabssums[numBlocks]; err=hipMemcpy(blockabssums, device_blockabssums, numBlocks*sizeof(int32_t), hipMemcpyDeviceToHost); cudaCheckError(err); //free the memory on the GPU //Optimalisation Hint: if you do not free the memory, the values will be preserved between multiple kernel calls! //For example, the x-array will remain in the GPU global memory if you also map other features to the GPU err=hipFree(device_x); cudaCheckError(err); err=hipFree(device_blocksums); cudaCheckError(err); err=hipFree(device_blockabssums); cudaCheckError(err); #ifdef DEBUG //print all the block sums calculated by CPU for(int b=0;b<numBlocks;b++){ int sum=0; for (int i=0;i<threadsPerBlock;i++) sum+=x[b*threadsPerBlock+i]; printf("CPU Block %d sum: %d\n",b,sum); } #endif #ifdef DEBUG //print all the block abssums calculated by CPU for(int b=0;b<numBlocks;b++){ int abs_sum=0; for (int i=0;i<threadsPerBlock;i++) abs_sum+=abs(x[b*threadsPerBlock+i]); printf("CPU Block %d abs_sum: %d\n",b,abs_sum); } #endif //Now add all the block sums on the CPU //(Note: if you have many blocks, you might consider mapping this to another GPU call of course) int32_t sum=0; for(uint32_t blk=0;blk<numBlocks;blk++) sum+=blocksums[blk]; float avg = (float)(sum)/(float)(np); //Now add all the block abssums on the CPU int32_t abs_sum=0; for(uint32_t blk=0;blk<numBlocks;blk++) abs_sum+=blockabssums[blk]; #ifdef DEBUG //Compare total sum of GPU and CPU printf("GPU Total sum: %d\n",sum); int cpu_sum=0; for(int i=0;i<np;i++) cpu_sum+=x[i]; printf("CPU Total sum: %d\n",cpu_sum); #endif #ifdef DEBUG //compare the average printf("GPU average: %f\n",avg); printf("CPU average: %f\n",(float)cpu_sum/(float)np); #endif #ifdef DEBUG //compare the average printf("GPU abs_sum: %f\n",(float)abs_sum); printf("CPU abs_sum: %f\n",(float)abssum(np, x)); #endif //calculate all other features on the CPU for this example sta[0] = avg; sta[1] = stddev(np, x, avg); sta[2] = abs_sum; sta[3] = mean_crosstimes(np, x, avg); #endif }
4433af1f12ef71ef88d050b6ef1419e81e38489f.cu
#include "eeg.h" // Based on code by: Mohammad Tahghighi int32_t abssum(int np, int32_t *x) { int i; int32_t s = 0; for (i = 0; i < np; i++) { s += abs(x[i]); } return s; } float average(int np, int32_t *x) { int i; int32_t s = 0; for (i = 0; i < np; i++) { s += x[i]; } return ((float) s) / ((float) np); } __global__ void gpu_average(int32_t *x, int32_t *blocksums) { //Note that his kernel is merely an example, and is not necessarily the optimal way to calculate the sum/average on a GPU device // The id of this thread within our block unsigned int threadId = threadIdx.x; // The global id if this thread. // Since we launch np threads in total, each id maps to one unique element in x unsigned int globalId = blockIdx.x*blockDim.x + threadIdx.x; // NOTE: for debugging you can print directly from the GPU device // however, if you print a lot, or the GPU encounters a serious error, this might fail // also performance is horrible, so make sure to disable it for benchmarking //printf("Hello from global thread %d, with local id %d\n",globalId,threadId); // Lets first copy the data from global GPU memory to shared memory // Shared memory is only accesible within a threadblock, but it is much faster to access than global memory once you have data in there // Note that by having the keyword "extern" and empty brackets [], the size of the array will be determined at runtime. // You will however have to pass the size in bytes as a 3rd argument to the kernel call (see the "sharedMemBytes" variable) // If you statically know the size of the shared memory array, it is probably faster to use that (Disclaimer: I did not test this claim) extern __shared__ int32_t blockData[]; blockData[threadId]=x[globalId]; // We synchronize the threads here, to make sure every thread has copied valid data from global to local memory // Otherwise we potentially risk accessing uninitialized data in the shared memory __syncthreads(); // The next step is summation of the elements in our blockData // The summation is done in a tree like fashion, as illustrated below // 0 1 2 3 4 5 6 7 (number of parallel summations) // |/ |/ |/ |/ (4) // 1 5 9 13 // |__/ |__/ (2) // 6 22 // |______/ (1) // 28 for(unsigned int s=1;s<blockDim.x;s*=2){ // Because the amount of work reduces, we use the threadId to determine which threads get to execute the summation // The other threads will idle in the meantime (They will be masked during the execution of the conditional-part) if (threadId % (2*s) == 0 ){ blockData[threadId] += blockData[threadId+s]; } // For each layer of the tree, we have to make sure all threads finish their computations // otherwise we could read unsummed results __syncthreads(); } //we let 1 selected thread per block write out our local sum to the global memory if(threadId==0){ #ifdef DEBUG //example debugging, print the partial sum of each block with the block id printf("GPU Block %d sum: %d\n",blockIdx.x, blockData[0]); #endif //write the sum of this block to the blocksums array blocksums[blockIdx.x]=blockData[0]; } //this will return the control to the CPU once all threads finish (reach this point) return; } __global__ void gpu_abssum(int32_t *x, int32_t *blocksums) { //Note that his kernel is merely an example, and is not necessarily the optimal way to calculate the sum/average on a GPU device // The id of this thread within our block unsigned int threadId = threadIdx.x; // The global id if this thread. // Since we launch np threads in total, each id maps to one unique element in x unsigned int globalId = blockIdx.x*blockDim.x + threadIdx.x; // NOTE: for debugging you can print directly from the GPU device // however, if you print a lot, or the GPU encounters a serious error, this might fail // also performance is horrible, so make sure to disable it for benchmarking //printf("Hello from global thread %d, with local id %d\n",globalId,threadId); // Lets first copy the data from global GPU memory to shared memory // Shared memory is only accesible within a threadblock, but it is much faster to access than global memory once you have data in there // Note that by having the keyword "extern" and empty brackets [], the size of the array will be determined at runtime. // You will however have to pass the size in bytes as a 3rd argument to the kernel call (see the "sharedMemBytes" variable) // If you statically know the size of the shared memory array, it is probably faster to use that (Disclaimer: I did not test this claim) extern __shared__ int32_t blockData[]; blockData[threadId]=abs(x[globalId]); // We synchronize the threads here, to make sure every thread has copied valid data from global to local memory // Otherwise we potentially risk accessing uninitialized data in the shared memory __syncthreads(); // The next step is summation of the elements in our blockData // The summation is done in a tree like fashion, as illustrated below // 0 1 2 3 4 5 6 7 (number of parallel summations) // |/ |/ |/ |/ (4) // 1 5 9 13 // |__/ |__/ (2) // 6 22 // |______/ (1) // 28 for(unsigned int s=1;s<blockDim.x;s*=2){ // Because the amount of work reduces, we use the threadId to determine which threads get to execute the summation // The other threads will idle in the meantime (They will be masked during the execution of the conditional-part) if (threadId % (2*s) == 0 ){ blockData[threadId] += blockData[threadId+s]; } // For each layer of the tree, we have to make sure all threads finish their computations // otherwise we could read unsummed results __syncthreads(); } //we let 1 selected thread per block write out our local sum to the global memory if(threadId==0){ #ifdef DEBUG //example debugging, print the partial sum of each block with the block id printf("GPU Block %d abssum: %d\n",blockIdx.x, blockData[0]); #endif //write the sum of this block to the blocksums array blocksums[blockIdx.x]=blockData[0]; } //this will return the control to the CPU once all threads finish (reach this point) return; } float variance(int np, int32_t *x, float avg) { int i; float s = 0; // Variance = Sum((x - avg)^2) for (i = 0; i < np; i++) { float tmp = x[i] - avg; s += (tmp * tmp); } return s / ((float) np); } float stddev(int np, int32_t *x, float avg) { // Stddev = sqrt(variance) float var = variance(np, x, avg); return sqrt(var); } int mean_crosstimes(int np, int32_t *x, float avg) { int i; bool negative = x[0] < avg; int count = 0; // Count number of zero crossings for (x - avg) for (i = 0; i < np; i++) { if (negative) { if (x[i] > avg) { negative = false; count++; } } else { if (x[i] < avg) { negative = true; count++; } } } return count; } void stafeature(int np, int32_t *x, float *sta) { // Returns sta = [mean, std, abssum, mean_crosstimes) #ifdef CPU_ONLY //original CPU code float avg = average(np, x); sta[0] = avg; sta[1] = stddev(np, x, avg); sta[2] = abssum(np, x); sta[3] = mean_crosstimes(np, x, avg); #else //GPU code /* * Our high level strategy to calculate the average in parallel in this example is to split the input array into into a number of blocks (numBlocks). * Each block contains thus np/numBlocks elements * These blocks will be mapped to the Streaming Multiprocessors of the GPU. * For each block we calculate the sum * Finally the sums of all the blocks are added on the CPU */ //NOTE: take care np is a multiple of numBlocks for this example. int numBlocks=32; int threadsPerBlock=np/numBlocks; //i.e., this should have remainder==0 //Ignore the next bit untill you are inspecting the gpu kernel code, then refer back to it. On the first read just ignore it ;) //Because in this setup the amount of required shared memory depends on np we assume it is only known at runtime. (although you can of course get it from the input and assume it fixed for this assigment) //the number of required shared memory bytes need to be passed as a 3rd argument to the kernel call later on //see the remarks in the gpu_average code int sharedMemBytes = threadsPerBlock*sizeof(int32_t); //variable for holding return values of cuda functions cudaError_t err; //start by allocating room for array "x" on the global memory of the GPU int32_t* device_x; err=cudaMalloc(&device_x, np*sizeof(int32_t)); //Here we check for errors of this cuda call //See eeg.h for the implementation of this error check (it's not a default cuda function) cudaCheckError(err); //also allocate room for the all the sums of the blocks int32_t* device_blocksums; //Note that room is allocated in global memory for the sum of *each* threadblock err=cudaMalloc(&device_blocksums, numBlocks*sizeof(int32_t)); cudaCheckError(err); //also allocate room for the all the abssums of the blocks int32_t* device_blockabssums; //Note that room is allocated in global memory for the sum of *each* threadblock err=cudaMalloc(&device_blockabssums, numBlocks*sizeof(int32_t)); cudaCheckError(err); //Now copy array "x" from the CPU to the GPU err=cudaMemcpy(device_x,x, np*sizeof(int32_t), cudaMemcpyHostToDevice); cudaCheckError(err); //Compute the average on the GPU gpu_average<<<numBlocks,threadsPerBlock, sharedMemBytes>>>(device_x, device_blocksums); //Compute the abssum on the GPU gpu_abssum<<<numBlocks,threadsPerBlock, sharedMemBytes>>>(device_x, device_blockabssums); //We use "peekatlasterror" since a kernel launch does not return a cudaError_t to check for errors //cudaCheckError(cudaPeekAtLastError()); //copy the sums of each block back from GPU global memory to CPU memory int32_t blocksums[numBlocks]; err=cudaMemcpy(blocksums, device_blocksums, numBlocks*sizeof(int32_t), cudaMemcpyDeviceToHost); cudaCheckError(err); //copy the abssums of each block back from GPU global memory to CPU memory int32_t blockabssums[numBlocks]; err=cudaMemcpy(blockabssums, device_blockabssums, numBlocks*sizeof(int32_t), cudaMemcpyDeviceToHost); cudaCheckError(err); //free the memory on the GPU //Optimalisation Hint: if you do not free the memory, the values will be preserved between multiple kernel calls! //For example, the x-array will remain in the GPU global memory if you also map other features to the GPU err=cudaFree(device_x); cudaCheckError(err); err=cudaFree(device_blocksums); cudaCheckError(err); err=cudaFree(device_blockabssums); cudaCheckError(err); #ifdef DEBUG //print all the block sums calculated by CPU for(int b=0;b<numBlocks;b++){ int sum=0; for (int i=0;i<threadsPerBlock;i++) sum+=x[b*threadsPerBlock+i]; printf("CPU Block %d sum: %d\n",b,sum); } #endif #ifdef DEBUG //print all the block abssums calculated by CPU for(int b=0;b<numBlocks;b++){ int abs_sum=0; for (int i=0;i<threadsPerBlock;i++) abs_sum+=abs(x[b*threadsPerBlock+i]); printf("CPU Block %d abs_sum: %d\n",b,abs_sum); } #endif //Now add all the block sums on the CPU //(Note: if you have many blocks, you might consider mapping this to another GPU call of course) int32_t sum=0; for(uint32_t blk=0;blk<numBlocks;blk++) sum+=blocksums[blk]; float avg = (float)(sum)/(float)(np); //Now add all the block abssums on the CPU int32_t abs_sum=0; for(uint32_t blk=0;blk<numBlocks;blk++) abs_sum+=blockabssums[blk]; #ifdef DEBUG //Compare total sum of GPU and CPU printf("GPU Total sum: %d\n",sum); int cpu_sum=0; for(int i=0;i<np;i++) cpu_sum+=x[i]; printf("CPU Total sum: %d\n",cpu_sum); #endif #ifdef DEBUG //compare the average printf("GPU average: %f\n",avg); printf("CPU average: %f\n",(float)cpu_sum/(float)np); #endif #ifdef DEBUG //compare the average printf("GPU abs_sum: %f\n",(float)abs_sum); printf("CPU abs_sum: %f\n",(float)abssum(np, x)); #endif //calculate all other features on the CPU for this example sta[0] = avg; sta[1] = stddev(np, x, avg); sta[2] = abs_sum; sta[3] = mean_crosstimes(np, x, avg); #endif }
4a78922e44e35c4e3fe80183ff8e76f95b2d50fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2022 NVIDIA CORPORATION * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "unary.h" #include "mathtypes/half.h" using namespace Legion; namespace triton { namespace backend { namespace legion { template <typename TI, typename TO> __global__ static void gpu_forward_cast(const TI* input, TO* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (TO)input[offset]; } // Some unfortunate specializations because the compiler can't figure // out the best intermedidate type to convert half types to template <> __global__ void gpu_forward_cast<__half, int8_t>( const __half* input, int8_t* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (short)input[offset]; } template <> __global__ void gpu_forward_cast<__half, int64_t>( const __half* input, int64_t* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (long long)input[offset]; } template <> __global__ void gpu_forward_cast<int64_t, __half>( const int64_t* input, __half* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (long long)input[offset]; } template <> __global__ void gpu_forward_cast<__half, uint8_t>( const __half* input, uint8_t* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (unsigned short)input[offset]; } template <> __global__ void gpu_forward_cast<__half, uint64_t>( const __half* input, uint64_t* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (unsigned long long)input[offset]; } template <> __global__ void gpu_forward_cast<uint64_t, __half>( const uint64_t* input, __half* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (unsigned long long)input[offset]; } __global__ static void unary_forward_half( const __half* input, __half* output, const __half alpha, const __half beta, const __half scalar, const OperatorType optype, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; switch (optype) { case OP_EXP: { output[offset] = alpha * hexp(input[offset]) + beta * output[offset]; break; } case OP_LOG: { output[offset] = alpha * hlog(input[offset]) + beta * output[offset]; break; } case OP_SQRT: { output[offset] = alpha * hsqrt(input[offset]) + beta * output[offset]; break; } case OP_IDENTITY: { output[offset] = input[offset]; break; } case OP_SCALAR_MULTIPLY: { output[offset] = input[offset] * scalar; break; } case OP_SCALAR_ADD: { output[offset] = input[offset] + scalar; break; } case OP_SCALAR_SUB: { output[offset] = input[offset] - scalar; break; } case OP_SCALAR_TRUE_DIV: { output[offset] = input[offset] / scalar; break; } case OP_GELU: { output[offset] = __hmul( __hmul(input[offset], 0.5f), erfcf(__hmul(-input[offset], M_SQRT1_2))); break; } case OP_RECIPROCAL: { output[offset] = __hdiv(__half(1.f), input[offset]); break; } default: break; } } __global__ static void unary_forward_float( const float* input, float* output, const float alpha, const float beta, const float scalar, const OperatorType optype, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; switch (optype) { case OP_EXP: { output[offset] = alpha * expf(input[offset]) + beta * output[offset]; break; } case OP_LOG: { output[offset] = alpha * logf(input[offset]) + beta * output[offset]; break; } case OP_SQRT: { output[offset] = alpha * sqrtf(input[offset]) + beta * output[offset]; break; } case OP_IDENTITY: { output[offset] = input[offset]; break; } case OP_SCALAR_MULTIPLY: { output[offset] = input[offset] * scalar; break; } case OP_SCALAR_ADD: { output[offset] = input[offset] + scalar; break; } case OP_SCALAR_SUB: { output[offset] = input[offset] - scalar; break; } case OP_SCALAR_TRUE_DIV: { output[offset] = input[offset] / scalar; break; } case OP_GELU: { output[offset] = input[offset] * 0.5f * erfc(-input[offset] * M_SQRT1_2); break; } case OP_RECIPROCAL: { output[offset] = 1.f / input[offset]; break; } default: break; } } __global__ static void unary_forward_double( const double* input, double* output, const double alpha, const double beta, const double scalar, const OperatorType optype, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; switch (optype) { case OP_EXP: { output[offset] = alpha * exp(input[offset]) + beta * output[offset]; break; } case OP_LOG: { output[offset] = alpha * log(input[offset]) + beta * output[offset]; break; } case OP_SQRT: { output[offset] = alpha * sqrt(input[offset]) + beta * output[offset]; break; } case OP_IDENTITY: { output[offset] = input[offset]; break; } case OP_SCALAR_MULTIPLY: { output[offset] = input[offset] * scalar; break; } case OP_SCALAR_ADD: { output[offset] = input[offset] + scalar; break; } case OP_SCALAR_SUB: { output[offset] = input[offset] - scalar; break; } case OP_SCALAR_TRUE_DIV: { output[offset] = input[offset] / scalar; break; } case OP_GELU: { output[offset] = input[offset] * 0.5 * erfc(-input[offset] * M_SQRT1_2); break; } case OP_RECIPROCAL: { output[offset] = 1.0 / input[offset]; break; } default: break; } } template <typename T> __host__ static void forward_cast( DataType output_type, ::hipStream_t stream, const void* input_ptr, void* output_ptr, size_t num_elements) { const size_t blocks = (num_elements + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK; switch (output_type) { case DT_HALF: { hipLaunchKernelGGL(( gpu_forward_cast<T, __half>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (__half*)output_ptr, num_elements); break; } case DT_FLOAT: { hipLaunchKernelGGL(( gpu_forward_cast<T, float>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (float*)output_ptr, num_elements); break; } case DT_DOUBLE: { hipLaunchKernelGGL(( gpu_forward_cast<T, double>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (double*)output_ptr, num_elements); break; } case DT_INT8: { hipLaunchKernelGGL(( gpu_forward_cast<T, int8_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (int8_t*)output_ptr, num_elements); break; } case DT_INT16: { hipLaunchKernelGGL(( gpu_forward_cast<T, int16_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (int16_t*)output_ptr, num_elements); break; } case DT_INT32: { hipLaunchKernelGGL(( gpu_forward_cast<T, int32_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (int32_t*)output_ptr, num_elements); break; } case DT_INT64: { hipLaunchKernelGGL(( gpu_forward_cast<T, int64_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (int64_t*)output_ptr, num_elements); break; } case DT_UINT8: { hipLaunchKernelGGL(( gpu_forward_cast<T, uint8_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (uint8_t*)output_ptr, num_elements); break; } case DT_UINT16: { hipLaunchKernelGGL(( gpu_forward_cast<T, uint16_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (uint16_t*)output_ptr, num_elements); break; } case DT_UINT32: { hipLaunchKernelGGL(( gpu_forward_cast<T, uint32_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (uint32_t*)output_ptr, num_elements); break; } case DT_UINT64: { hipLaunchKernelGGL(( gpu_forward_cast<T, uint64_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (uint64_t*)output_ptr, num_elements); break; } case DT_BOOLEAN: { hipLaunchKernelGGL(( gpu_forward_cast<T, bool>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const T*)input_ptr, (bool*)output_ptr, num_elements); break; } default: abort(); } } __host__ /*static*/ void UnaryOperator::forward_kernel( const UnaryArgs* args, ::hipStream_t stream, const void* input_ptr, void* output_ptr, size_t num_elements) { if (args->op_type == OP_CAST) { switch (args->datatype) { case DT_HALF: { forward_cast<__half>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_FLOAT: { forward_cast<float>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_DOUBLE: { forward_cast<double>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_INT8: { forward_cast<int8_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_INT16: { forward_cast<int16_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_INT32: { forward_cast<int32_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_INT64: { forward_cast<int64_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_UINT8: { forward_cast<uint8_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_UINT16: { forward_cast<uint16_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_UINT32: { forward_cast<uint32_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_UINT64: { forward_cast<uint64_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_BOOLEAN: { forward_cast<bool>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } default: abort(); } } else if (use_cudnn(args->op_type)) { if (args->datatype == DT_DOUBLE) { double alpha = 1.0, beta = 0.0; CHECK_CUDNN(cudnnActivationForward( args->cudnn, args->actiDesc, &alpha, args->inputTensor, input_ptr, &beta, args->outputTensor, output_ptr)); } else { float alpha = 1.f, beta = 0.f; CHECK_CUDNN(cudnnActivationForward( args->cudnn, args->actiDesc, &alpha, args->inputTensor, input_ptr, &beta, args->outputTensor, output_ptr)); } } else { const size_t blocks = (num_elements + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK; assert( (args->op_type == OP_EXP) || (args->op_type == OP_LOG) || (args->op_type == OP_SQRT) || (args->op_type == OP_IDENTITY) || (args->op_type == OP_SCALAR_MULTIPLY) || (args->op_type == OP_SCALAR_ADD) || (args->op_type == OP_SCALAR_SUB) || (args->op_type == OP_SCALAR_TRUE_DIV) || (args->op_type == OP_GELU) || (OP_RECIPROCAL)); switch (args->datatype) { case DT_HALF: { __half alpha = 1.f, beta = 0.f; hipLaunchKernelGGL(( unary_forward_half), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const __half*)input_ptr, (__half*)output_ptr, alpha, beta, args->scalar.half_value, args->op_type, num_elements); break; } case DT_FLOAT: { float alpha = 1.f, beta = 0.f; hipLaunchKernelGGL(( unary_forward_float), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const float*)input_ptr, (float*)output_ptr, alpha, beta, args->scalar.float_value, args->op_type, num_elements); break; } case DT_DOUBLE: { double alpha = 1.0, beta = 0.0; hipLaunchKernelGGL(( unary_forward_double), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, (const double*)input_ptr, (double*)output_ptr, alpha, beta, args->scalar.double_value, args->op_type, num_elements); break; } default: // TODO support for other data types like int8 abort(); } } } }}} // namespace triton::backend::legion
4a78922e44e35c4e3fe80183ff8e76f95b2d50fb.cu
/* Copyright 2022 NVIDIA CORPORATION * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "unary.h" #include "mathtypes/half.h" using namespace Legion; namespace triton { namespace backend { namespace legion { template <typename TI, typename TO> __global__ static void gpu_forward_cast(const TI* input, TO* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (TO)input[offset]; } // Some unfortunate specializations because the compiler can't figure // out the best intermedidate type to convert half types to template <> __global__ void gpu_forward_cast<__half, int8_t>( const __half* input, int8_t* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (short)input[offset]; } template <> __global__ void gpu_forward_cast<__half, int64_t>( const __half* input, int64_t* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (long long)input[offset]; } template <> __global__ void gpu_forward_cast<int64_t, __half>( const int64_t* input, __half* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (long long)input[offset]; } template <> __global__ void gpu_forward_cast<__half, uint8_t>( const __half* input, uint8_t* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (unsigned short)input[offset]; } template <> __global__ void gpu_forward_cast<__half, uint64_t>( const __half* input, uint64_t* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (unsigned long long)input[offset]; } template <> __global__ void gpu_forward_cast<uint64_t, __half>( const uint64_t* input, __half* output, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; output[offset] = (unsigned long long)input[offset]; } __global__ static void unary_forward_half( const __half* input, __half* output, const __half alpha, const __half beta, const __half scalar, const OperatorType optype, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; switch (optype) { case OP_EXP: { output[offset] = alpha * hexp(input[offset]) + beta * output[offset]; break; } case OP_LOG: { output[offset] = alpha * hlog(input[offset]) + beta * output[offset]; break; } case OP_SQRT: { output[offset] = alpha * hsqrt(input[offset]) + beta * output[offset]; break; } case OP_IDENTITY: { output[offset] = input[offset]; break; } case OP_SCALAR_MULTIPLY: { output[offset] = input[offset] * scalar; break; } case OP_SCALAR_ADD: { output[offset] = input[offset] + scalar; break; } case OP_SCALAR_SUB: { output[offset] = input[offset] - scalar; break; } case OP_SCALAR_TRUE_DIV: { output[offset] = input[offset] / scalar; break; } case OP_GELU: { output[offset] = __hmul( __hmul(input[offset], 0.5f), erfcf(__hmul(-input[offset], M_SQRT1_2))); break; } case OP_RECIPROCAL: { output[offset] = __hdiv(__half(1.f), input[offset]); break; } default: break; } } __global__ static void unary_forward_float( const float* input, float* output, const float alpha, const float beta, const float scalar, const OperatorType optype, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; switch (optype) { case OP_EXP: { output[offset] = alpha * expf(input[offset]) + beta * output[offset]; break; } case OP_LOG: { output[offset] = alpha * logf(input[offset]) + beta * output[offset]; break; } case OP_SQRT: { output[offset] = alpha * sqrtf(input[offset]) + beta * output[offset]; break; } case OP_IDENTITY: { output[offset] = input[offset]; break; } case OP_SCALAR_MULTIPLY: { output[offset] = input[offset] * scalar; break; } case OP_SCALAR_ADD: { output[offset] = input[offset] + scalar; break; } case OP_SCALAR_SUB: { output[offset] = input[offset] - scalar; break; } case OP_SCALAR_TRUE_DIV: { output[offset] = input[offset] / scalar; break; } case OP_GELU: { output[offset] = input[offset] * 0.5f * erfc(-input[offset] * M_SQRT1_2); break; } case OP_RECIPROCAL: { output[offset] = 1.f / input[offset]; break; } default: break; } } __global__ static void unary_forward_double( const double* input, double* output, const double alpha, const double beta, const double scalar, const OperatorType optype, const size_t volume) { const size_t offset = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (offset >= volume) return; switch (optype) { case OP_EXP: { output[offset] = alpha * exp(input[offset]) + beta * output[offset]; break; } case OP_LOG: { output[offset] = alpha * log(input[offset]) + beta * output[offset]; break; } case OP_SQRT: { output[offset] = alpha * sqrt(input[offset]) + beta * output[offset]; break; } case OP_IDENTITY: { output[offset] = input[offset]; break; } case OP_SCALAR_MULTIPLY: { output[offset] = input[offset] * scalar; break; } case OP_SCALAR_ADD: { output[offset] = input[offset] + scalar; break; } case OP_SCALAR_SUB: { output[offset] = input[offset] - scalar; break; } case OP_SCALAR_TRUE_DIV: { output[offset] = input[offset] / scalar; break; } case OP_GELU: { output[offset] = input[offset] * 0.5 * erfc(-input[offset] * M_SQRT1_2); break; } case OP_RECIPROCAL: { output[offset] = 1.0 / input[offset]; break; } default: break; } } template <typename T> __host__ static void forward_cast( DataType output_type, ::cudaStream_t stream, const void* input_ptr, void* output_ptr, size_t num_elements) { const size_t blocks = (num_elements + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK; switch (output_type) { case DT_HALF: { gpu_forward_cast<T, __half><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (__half*)output_ptr, num_elements); break; } case DT_FLOAT: { gpu_forward_cast<T, float><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (float*)output_ptr, num_elements); break; } case DT_DOUBLE: { gpu_forward_cast<T, double><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (double*)output_ptr, num_elements); break; } case DT_INT8: { gpu_forward_cast<T, int8_t><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (int8_t*)output_ptr, num_elements); break; } case DT_INT16: { gpu_forward_cast<T, int16_t><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (int16_t*)output_ptr, num_elements); break; } case DT_INT32: { gpu_forward_cast<T, int32_t><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (int32_t*)output_ptr, num_elements); break; } case DT_INT64: { gpu_forward_cast<T, int64_t><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (int64_t*)output_ptr, num_elements); break; } case DT_UINT8: { gpu_forward_cast<T, uint8_t><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (uint8_t*)output_ptr, num_elements); break; } case DT_UINT16: { gpu_forward_cast<T, uint16_t><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (uint16_t*)output_ptr, num_elements); break; } case DT_UINT32: { gpu_forward_cast<T, uint32_t><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (uint32_t*)output_ptr, num_elements); break; } case DT_UINT64: { gpu_forward_cast<T, uint64_t><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (uint64_t*)output_ptr, num_elements); break; } case DT_BOOLEAN: { gpu_forward_cast<T, bool><<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const T*)input_ptr, (bool*)output_ptr, num_elements); break; } default: abort(); } } __host__ /*static*/ void UnaryOperator::forward_kernel( const UnaryArgs* args, ::cudaStream_t stream, const void* input_ptr, void* output_ptr, size_t num_elements) { if (args->op_type == OP_CAST) { switch (args->datatype) { case DT_HALF: { forward_cast<__half>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_FLOAT: { forward_cast<float>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_DOUBLE: { forward_cast<double>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_INT8: { forward_cast<int8_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_INT16: { forward_cast<int16_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_INT32: { forward_cast<int32_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_INT64: { forward_cast<int64_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_UINT8: { forward_cast<uint8_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_UINT16: { forward_cast<uint16_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_UINT32: { forward_cast<uint32_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_UINT64: { forward_cast<uint64_t>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } case DT_BOOLEAN: { forward_cast<bool>( args->casttype, stream, input_ptr, output_ptr, num_elements); break; } default: abort(); } } else if (use_cudnn(args->op_type)) { if (args->datatype == DT_DOUBLE) { double alpha = 1.0, beta = 0.0; CHECK_CUDNN(cudnnActivationForward( args->cudnn, args->actiDesc, &alpha, args->inputTensor, input_ptr, &beta, args->outputTensor, output_ptr)); } else { float alpha = 1.f, beta = 0.f; CHECK_CUDNN(cudnnActivationForward( args->cudnn, args->actiDesc, &alpha, args->inputTensor, input_ptr, &beta, args->outputTensor, output_ptr)); } } else { const size_t blocks = (num_elements + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK; assert( (args->op_type == OP_EXP) || (args->op_type == OP_LOG) || (args->op_type == OP_SQRT) || (args->op_type == OP_IDENTITY) || (args->op_type == OP_SCALAR_MULTIPLY) || (args->op_type == OP_SCALAR_ADD) || (args->op_type == OP_SCALAR_SUB) || (args->op_type == OP_SCALAR_TRUE_DIV) || (args->op_type == OP_GELU) || (OP_RECIPROCAL)); switch (args->datatype) { case DT_HALF: { __half alpha = 1.f, beta = 0.f; unary_forward_half<<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const __half*)input_ptr, (__half*)output_ptr, alpha, beta, args->scalar.half_value, args->op_type, num_elements); break; } case DT_FLOAT: { float alpha = 1.f, beta = 0.f; unary_forward_float<<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const float*)input_ptr, (float*)output_ptr, alpha, beta, args->scalar.float_value, args->op_type, num_elements); break; } case DT_DOUBLE: { double alpha = 1.0, beta = 0.0; unary_forward_double<<<blocks, THREADS_PER_BLOCK, 0, stream>>>( (const double*)input_ptr, (double*)output_ptr, alpha, beta, args->scalar.double_value, args->op_type, num_elements); break; } default: // TODO support for other data types like int8 abort(); } } } }}} // namespace triton::backend::legion
5022f557e39ddd6e837d9aa54fdac746c281e5eb.hip
// !!! This is a file automatically generated by hipify!!! /** * * OHIO STATE UNIVERSITY SOFTWARE DISTRIBUTION LICENSE * * Parallel CCD++ on GPU (the Software) Copyright (c) 2017, The Ohio State * University. All rights reserved. * * The Software is available for download and use subject to the terms and * conditions of this License. Access or use of the Software constitutes acceptance * and agreement to the terms and conditions of this License. Redistribution and * use of the Software in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the capitalized paragraph below. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the capitalized paragraph below in the documentation * and/or other materials provided with the distribution. * * 3. The names of Ohio State University, or its faculty, staff or students may not * be used to endorse or promote products derived from the Software without * specific prior written permission. * * This software was produced with support from the National Science Foundation * (NSF) through Award 1629548. Nothing in this work should be construed as * reflecting the official policy or position of the Defense Department, the United * States government, Ohio State University. * * THIS SOFTWARE HAS BEEN APPROVED FOR PUBLIC RELEASE, UNLIMITED DISTRIBUTION. THE * SOFTWARE IS PROVIDED AS IS AND WITHOUT ANY EXPRESS, IMPLIED OR STATUTORY * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF ACCURACY, COMPLETENESS, * NONINFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. ACCESS OR USE OF THE SOFTWARE IS ENTIRELY AT THE USERS RISK. IN * NO EVENT SHALL OHIO STATE UNIVERSITY OR ITS FACULTY, STAFF OR STUDENTS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE SOFTWARE * USER SHALL INDEMNIFY, DEFEND AND HOLD HARMLESS OHIO STATE UNIVERSITY AND ITS * FACULTY, STAFF AND STUDENTS FROM ANY AND ALL CLAIMS, ACTIONS, DAMAGES, LOSSES, * LIABILITIES, COSTS AND EXPENSES, INCLUDING ATTORNEYS FEES AND COURT COSTS, * DIRECTLY OR INDIRECTLY ARISING OUT OF OR IN CONNECTION WITH ACCESS OR USE OF THE * SOFTWARE. * */ /** * * Author: * Israt ([email protected]) * * Contacts: * Israt ([email protected]) * Aravind Sukumaran-Rajam ([email protected]) * P. (Saday) Sadayappan ([email protected]) * */ #include "device_utilities.h" #include "util.h" #include <hip/hip_runtime.h> #include <vector> #include "helper_fusedR.h" #include "helper_updateH.h" inline hipError_t checkCuda(hipError_t result, int s) { if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", hipGetErrorString(result), s); assert(result == hipSuccess); } return result; } // Cyclic Coordinate Descent for Matrix Factorization void ccdr1(SparseMatrix &R, MatData &W, MatData &H, TestData &T, Options &param) { int k = param.k, maxiter = param.maxiter, inneriter = param.maxinneriter, tileSize_H = param.tileSizeH, tileSize_W = param.tileSizeW; DTYPE lambda = param.lambda, *d_R_val, *d_R_val_t, *d_gArrU, *d_hArrU, *d_gArrV, *d_hArrV, *d_u, *d_v, oldobj = 0; int LB[NUM_THRDS], UB[NUM_THRDS], LB_Rt[NUM_THRDS], UB_Rt[NUM_THRDS], *d_R_colPtr, *d_R_rowPtr, *d_row_lim_R, *d_row_lim_Rt, sum = 0, *d_test_row, *d_test_col, i, j; DTYPE reg = 0, loss, *d_loss, *d_v_new, *d_Wt, *d_Ht, *d_W, *d_H, *d_test_val, v, *d_pred_v, *d_rmse, *d_fundec_col; unsigned *d_R_rowIdx, *d_R_colIdx; DTYPE *pred_v = (DTYPE*) malloc(T.nnz_ * sizeof(DTYPE)); DTYPE *rmse = (DTYPE*) malloc(T.nnz_ * sizeof(DTYPE)); //omp_set_num_threads(param.threads); // Create transpose view of R SparseMatrix Rt; Rt = R.get_shallow_transpose(); // initial value of the regularization term // H is a zero matrix now. for (int t = 0; t < k; ++t) for (unsigned c = 0; c < R.cols_; ++c) H[t][c] = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float mili = 0, copyTime = 0; //**************************CUDA COPY************************ checkCuda(hipDeviceSetCacheConfig(hipFuncCachePreferL1), __LINE__); size_t RCols_memsize = (R.cols_) * sizeof(DTYPE); size_t RRows_memsize = (R.rows_) * sizeof(DTYPE); size_t d_memsize = 1 * sizeof(DTYPE); size_t R_colPtr_memsize = (R.cols_ + 1) * sizeof(int); size_t R_rowPtr_memsize = (R.rows_ + 1) * sizeof(int); size_t R_rowIdx_memsize = R.nnz_ * sizeof(unsigned); size_t R_val_memsize = R.nnz_ * sizeof(DTYPE); checkCuda(hipMalloc((void**) &d_W, k * R.rows_ * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &d_H, k * R.cols_ * sizeof(DTYPE)), 1); checkCuda(hipMalloc((void**) &d_Wt, R.rows_ * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &d_Ht, R.cols_ * sizeof(DTYPE)), 1); checkCuda(hipMalloc((void**) &d_u, RRows_memsize), 0); checkCuda(hipMalloc((void**) &d_v, RCols_memsize), 1); checkCuda(hipMalloc((void**) &d_v_new, RCols_memsize), 1); checkCuda(hipMalloc((void**) &d_gArrU, RRows_memsize), 2); checkCuda(hipMalloc((void**) &d_hArrU, RRows_memsize), 3); checkCuda(hipMalloc((void**) &d_gArrV, RCols_memsize), 2); checkCuda(hipMalloc((void**) &d_hArrV, RCols_memsize), 3); checkCuda(hipMalloc((void**) &d_R_colPtr, R_colPtr_memsize), 4); checkCuda(hipMalloc((void**) &d_R_rowPtr, R_rowPtr_memsize), 5); checkCuda(hipMalloc((void**) &d_R_rowIdx, R_rowIdx_memsize), 6); checkCuda(hipMalloc((void**) &d_R_colIdx, R_rowIdx_memsize), 8); checkCuda(hipMalloc((void**) &d_R_val, R_val_memsize), 7); checkCuda(hipMalloc((void**) &d_R_val_t, R_val_memsize), 7); checkCuda(hipMalloc((void**) &d_loss, 1 * sizeof(DTYPE)), 11); checkCuda(hipMalloc((void**) &d_test_row, (T.nnz_ + 1) * sizeof(int)), 7); checkCuda(hipMalloc((void**) &d_test_col, (T.nnz_ + 1) * sizeof(int)), 7); checkCuda(hipMalloc((void**) &d_test_val, (T.nnz_ + 1) * sizeof(DTYPE)), 7); checkCuda(hipMalloc((void**) &d_pred_v, (T.nnz_ + 1) * sizeof(DTYPE)), 7); checkCuda(hipMalloc((void**) &d_rmse, (T.nnz_ + 1) * sizeof(DTYPE)), 7); checkCuda(hipEventRecord(start), __LINE__); hipMemcpy(d_R_colPtr, R.get_csc_col_ptr(), R_colPtr_memsize, hipMemcpyHostToDevice); hipMemcpy(d_R_rowPtr, R.get_csr_row_ptr(), R_rowPtr_memsize, hipMemcpyHostToDevice); hipMemcpy(d_R_rowIdx, R.get_csc_row_indx(), R_rowIdx_memsize, hipMemcpyHostToDevice); hipMemcpy(d_R_colIdx, R.get_csr_col_indx(), R_rowIdx_memsize, hipMemcpyHostToDevice); hipMemcpy(d_R_val, R.get_csc_val(), R_val_memsize, hipMemcpyHostToDevice); hipMemcpy(d_R_val_t, R.get_csr_val(), R_val_memsize, hipMemcpyHostToDevice); for (int t = 0; t < k; ++t) hipMemcpy(d_W + t * R.rows_, &(W[t][0]), R.rows_ * sizeof(DTYPE), hipMemcpyHostToDevice); hipMemset(d_H, 0, k * R.cols_ * sizeof(DTYPE)); //cpoying test hipMemcpy(d_test_row, T.getTestRow(), (T.nnz_ + 1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_test_col, T.getTestCol(), (T.nnz_ + 1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_test_val, T.getTestVal(), (T.nnz_ + 1) * sizeof(DTYPE), hipMemcpyHostToDevice); checkCuda(hipEventRecord(stop), __LINE__); checkCuda(hipDeviceSynchronize(), __LINE__); checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__); copyTime = mili; float ACSRTime = 0, textureACSRTime = 0, innerLoopTime = 0; float ACSRPreProcessTime; hipStream_t streamT; checkCuda(hipStreamCreate(&streamT), __LINE__); create_stream(); //****************** preprrocessing TILING************* double t1 = seconds(); int total_tileInRows = (R.rows_ + tileSize_H - 1) / tileSize_H; int total_tileInCols = (R.cols_ + tileSize_W - 1) / tileSize_W; MatInt row_lim_R = MatInt(total_tileInRows + 1, VecInt(R.cols_ + 1)); MatInt row_lim_Rt = MatInt(total_tileInCols + 1, VecInt(R.rows_ + 1)); MatInt row_lim_R_odd = MatInt(total_tileInRows + 1, VecInt(R.cols_ + 1)); MatInt row_lim_Rt_odd = MatInt(total_tileInCols + 1, VecInt(R.rows_ + 1)); make_tile(R, row_lim_R, tileSize_H); make_tile(Rt, row_lim_Rt, tileSize_W); //copying tiles limit rowPointers checkCuda(hipEventRecord(start), __LINE__); checkCuda( hipMalloc((void**) &d_row_lim_R, (total_tileInRows + 1) * (R.cols_ + 1) * sizeof(int)), 0); checkCuda( hipMalloc((void**) &d_row_lim_Rt, (total_tileInCols + 1) * (R.rows_ + 1) * sizeof(int)), 0); checkCuda( hipMemcpy(d_row_lim_R, R.get_csc_col_ptr(), (R.cols_ + 1) * sizeof(int), hipMemcpyHostToDevice), __LINE__); checkCuda( hipMemcpy(d_row_lim_Rt, R.get_csr_row_ptr(), (R.rows_ + 1) * sizeof(int), hipMemcpyHostToDevice), __LINE__); for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H; // - 1; checkCuda( hipMemcpy(d_row_lim_R + tile_no * (R.cols_ + 1), &(row_lim_R[tile_no][0]), (R.cols_ + 1) * sizeof(int), hipMemcpyHostToDevice), __LINE__); } for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W; // - 1; checkCuda( hipMemcpy(d_row_lim_Rt + (tile_no * R.rows_) + tile_no, &(row_lim_Rt[tile_no][0]), (R.rows_ + 1) * sizeof(int), hipMemcpyHostToDevice), __LINE__); } mili = cuda_timerEnd(start, stop, streamT); copyTime = mili; //******************PreProcess for TILED binning******************************* checkCuda(hipEventRecord(start), __LINE__); int *tiled_count[total_tileInRows], *tiled_count_Rt[total_tileInCols]; for (int i = 0; i < total_tileInRows; ++i) tiled_count[i] = (int*) malloc(NUM_THRDS * sizeof(int)); for (int i = 0; i < total_tileInCols; ++i) tiled_count_Rt[i] = (int*) malloc(NUM_THRDS * sizeof(int)); int *tiled_rowGroupPtr, *tiled_rowGroupPtr_Rt; // Extract CSR group info on CPU int *tiled_host_rowGroupPtr[total_tileInRows], *tiled_host_rowGroupPtr_Rt[total_tileInCols]; for (int i = 0; i < total_tileInRows; ++i) tiled_host_rowGroupPtr[i] = (int*) malloc( NUM_THRDS * R.cols_ * sizeof(int)); for (int i = 0; i < total_tileInCols; ++i) tiled_host_rowGroupPtr_Rt[i] = (int*) malloc( NUM_THRDS * R.rows_ * sizeof(int)); for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H - 1; tiled_binning(R, (tiled_host_rowGroupPtr[tile_no]), LB, UB, tiled_count[tile_no], row_lim_R, tile_no); } for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W - 1; tiled_binning(Rt, (tiled_host_rowGroupPtr_Rt[tile_no]), LB_Rt, UB_Rt, tiled_count_Rt[tile_no], row_lim_Rt, tile_no); } checkCuda( hipMalloc((void **) &tiled_rowGroupPtr, total_tileInRows * R.cols_ * sizeof(int)), __LINE__); checkCuda( hipMalloc((void **) &tiled_rowGroupPtr_Rt, total_tileInCols * R.rows_ * sizeof(int)), __LINE__); int *test1 = (int*) malloc((R.cols_ + 1) * sizeof(int)); //del for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H - 1; sum = 0; for (int i = 0; i < NUM_THRDS; i++) { if (tiled_count[tile_no][i] > 0) { checkCuda( hipMemcpy( tiled_rowGroupPtr + (tile_no * R.cols_) + sum, &(tiled_host_rowGroupPtr[tile_no][i * R.cols_]), tiled_count[tile_no][i] * sizeof(int), hipMemcpyHostToDevice), __LINE__); sum += tiled_count[tile_no][i]; } } } for (int tile = tileSize_W; tile < (Rt.rows_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W - 1; sum = 0; for (int i = 0; i < NUM_THRDS; i++) { if (tiled_count_Rt[tile_no][i] > 0) { checkCuda( hipMemcpy( tiled_rowGroupPtr_Rt + (tile_no * Rt.cols_) + sum, &(tiled_host_rowGroupPtr_Rt[tile_no][i * R.rows_]), tiled_count_Rt[tile_no][i] * sizeof(int), hipMemcpyHostToDevice), __LINE__); sum += tiled_count_Rt[tile_no][i]; } } } mili = cuda_timerEnd(start, stop, streamT); copyTime = mili; //********************STARTING CCD++ ALGORTIHM************************ printf("tileSize_H,W: %d, %d k: %d lambda: %f\n", tileSize_H, tileSize_W, k, lambda); float mergeR = 0, mergeRT = 0, updateR = 0, updateRT = 0; for (int oiter = 1; oiter <= maxiter; ++oiter) { int early_stop = 0, kk = 0; for (int tt = 0; tt < k; ++tt) { int t = tt; VecData &Wt = W[t], &Ht = H[t]; hipMemset(d_hArrU, 0, RRows_memsize); hipMemset(d_gArrV, 0, RCols_memsize); hipMemset(d_hArrV, 0, RCols_memsize); hipMemset(d_gArrU, 0, RRows_memsize); //if (oiter > 1) { //**************************Updating R with add true********************************** mergeR = 0; for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H; //printf("*****tile no %d\n", tile_no ); cuda_timerStart(start, streamT); if (t == 0) kk = t; else kk = t - 1; helper_UpdateR( d_row_lim_R + ((tile_no - 1) * (R.cols_ + 1)), d_row_lim_R + (tile_no * R.cols_) + tile_no, d_R_rowIdx, d_R_val, d_W + t * R.rows_, d_H + t * R.cols_, R.rows_, R.cols_, true, tiled_rowGroupPtr + ((tile_no - 1) * R.cols_), &(tiled_count[tile_no - 1][0]), lambda, d_gArrV, d_hArrV, d_W + t * R.rows_, d_W + kk * R.rows_, d_H + kk * R.cols_, t); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; mergeR += mili; } cuda_timerStart(start, streamT); hipLaunchKernelGGL(( assignment), dim3((R.cols_ + 1023) / 1024), dim3(1024), 0, 0, d_R_colPtr, d_v_new, d_gArrV, d_hArrV, lambda, R.cols_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; mergeR += mili; if (oiter == 1 && (t == 1)) printf("time to merge R %f\n", mergeR); //**************************Updating RTranspose with add true********************************** mergeRT = 0; for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W; //printf("tile_no from RT %d\n", tile_no); cuda_timerStart(start, streamT); if (t == 0) kk = t; else kk = t - 1; helper_UpdateR( d_row_lim_Rt + ((tile_no - 1) * (R.rows_ + 1)), d_row_lim_Rt + (tile_no * R.rows_) + (tile_no), d_R_colIdx, d_R_val_t, d_H + t * R.cols_, d_W + t * R.rows_, R.cols_, R.rows_, true, tiled_rowGroupPtr_Rt + ((tile_no - 1) * Rt.cols_), &(tiled_count_Rt[tile_no - 1][0]), lambda, d_gArrU, d_hArrU, d_v_new, d_H + kk * R.cols_, d_W + kk * R.rows_, t); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; mergeRT += mili; //printf("update R in GPU takes %f \n", mili ); } cuda_timerStart(start, streamT); hipLaunchKernelGGL(( assignment), dim3((R.cols_ + 1023) / 1024), dim3(1024), 0, 0, d_R_colPtr, d_H + t * R.cols_, d_gArrV, d_hArrV, lambda, R.cols_); hipLaunchKernelGGL(( assignment), dim3((R.rows_ + 1023) / 1024), dim3(1024), 0, 0, d_R_rowPtr, d_W + t * R.rows_, d_gArrU, d_hArrU, lambda, R.rows_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; mergeRT += mili; if (oiter == 1 && t == 1) printf("time to merge Rt %f\n", mergeRT); } int maxit = inneriter; float init = ACSRTime; int iter = 0; //*************************inner iter*** // if(oiter > 1) iter = 2; // else iter = 1; //maxit = inneriter; for (; iter < maxit; ++iter) { //*************************Update Ht*************** float updateR = 0; for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H; //printf("*****tile no %d\n", tile_no ); cuda_timerStart(start, streamT); helper_rankOneUpdate_v( d_row_lim_R + ((tile_no - 1) * R.cols_) + (tile_no - 1), d_row_lim_R + (tile_no * R.cols_) + tile_no, d_R_rowIdx, d_R_val, d_W + t * R.rows_, d_H + t * R.cols_, R.rows_, R.cols_, true, tiled_rowGroupPtr + ((tile_no - 1) * R.cols_), &(tiled_count[tile_no - 1][0]), lambda, d_gArrV, d_hArrV, d_W + t * R.rows_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateR += mili; } cuda_timerStart(start, streamT); hipLaunchKernelGGL(( assignment), dim3((R.cols_ + 1023) / 1024), dim3(1024), 0, 0, d_R_colPtr, d_H + t * R.cols_, d_gArrV, d_hArrV, lambda, R.cols_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateR += mili; //*************************Update Wt*************** float updateRT = 0; for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W;//printf("tile_no from RT %d\n", tile_no); cuda_timerStart(start, streamT); helper_rankOneUpdate_v( d_row_lim_Rt + ((tile_no - 1) * R.rows_) + (tile_no - 1), d_row_lim_Rt + (tile_no * R.rows_) + (tile_no), d_R_colIdx, d_R_val_t, d_H + t * R.cols_, d_W + t * R.rows_, R.cols_, R.rows_, true, tiled_rowGroupPtr_Rt + ((tile_no - 1) * Rt.cols_), &(tiled_count_Rt[tile_no - 1][0]), lambda, d_gArrU, d_hArrU, d_v_new); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateRT += mili; } cuda_timerStart(start, streamT); hipLaunchKernelGGL(( assignment), dim3((R.rows_ + 1023) / 1024), dim3(1024), 0, 0, d_R_rowPtr, d_W + t * R.rows_, d_gArrU, d_hArrU, lambda, R.rows_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateRT += mili; // if(oiter ==1 && t ==0 && iter == maxit-1) // printf("time to update Wt %f\n", updateRT); } //**************************Updating R = R - Wt * Ht ***************************** updateR = 0; if (t == k - 1) { for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H; //printf("tile no %d\n", tile_no ); cuda_timerStart(start, streamT); helper_UpdateR( d_row_lim_R + ((tile_no - 1) * (R.cols_ + 1)), d_row_lim_R + (tile_no * R.cols_) + tile_no, d_R_rowIdx, d_R_val, d_W + t * R.rows_, d_H + t * R.cols_, R.rows_, R.cols_, false, tiled_rowGroupPtr + ((tile_no - 1) * R.cols_), &(tiled_count[tile_no - 1][0]), lambda, d_gArrU, d_hArrU, d_W + t * R.rows_, d_W + t * R.rows_, d_H + t * R.cols_, t); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateR += mili; } } //**************************Updating RT = RT - Wt * Ht ***************************** updateRT = 0; if (t == k - 1) { for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W; cuda_timerStart(start, streamT); helper_UpdateR( d_row_lim_Rt + ((tile_no - 1) * (R.rows_ + 1)), d_row_lim_Rt + (tile_no * R.rows_) + (tile_no), d_R_colIdx, d_R_val_t, d_H + t * R.cols_, d_W + t * R.rows_, R.cols_, R.rows_, false, tiled_rowGroupPtr_Rt + ((tile_no - 1) * Rt.cols_), &(tiled_count_Rt[tile_no - 1][0]), lambda, d_gArrU, d_hArrU, d_H + t * R.cols_, d_H + t * R.cols_, d_W + t * R.rows_, t); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateRT += mili; } } // if(oiter ==1 && t == k-1) // printf("time to update Rt %f\n", updateRT); if (oiter == 1 && t == 2) printf("iter %d time for 1 feature: %f ms\n", oiter, ACSRTime); } //**************Check RMSE******************** hipMemset(d_rmse, 0, (T.nnz_ + 1) * sizeof(DTYPE)); hipMemset(d_pred_v, 0, (T.nnz_ + 1) * sizeof(DTYPE)); hipLaunchKernelGGL(( GPU_rmse), dim3((T.nnz_ + 1023) / 1024), dim3(1024), 0, 0, d_test_row, d_test_col, d_test_val, d_pred_v, d_rmse, d_W, d_H, T.nnz_, k, R.rows_, R.cols_); DTYPE tot_rmse = 0, f_rmse = 0; hipMemcpy(&(rmse[0]), d_rmse, (T.nnz_ + 1) * sizeof(DTYPE), hipMemcpyDeviceToHost); //Copy to CPU for (int t = 0; t < k; ++t){ hipMemcpy(&(W[t][0]),d_W + t * R.rows_, R.rows_ * sizeof(DTYPE), hipMemcpyDeviceToHost); hipMemcpy(&(H[t][0]),d_H + t * R.cols_, R.cols_ * sizeof(DTYPE), hipMemcpyDeviceToHost); } #pragma omp parallel for reduction(+:tot_rmse) for (int i = 0; i < T.nnz_; ++i) tot_rmse += rmse[i]; f_rmse = sqrt(tot_rmse / T.nnz_); printf("iter %d time %f RMSE %f\n", oiter, (ACSRTime / 1000), f_rmse); } for (int i = 0; i <= NUM_THRDS; i++) checkCuda(hipStreamDestroy(stream[i]), __LINE__); checkCuda(hipStreamDestroy(streamT), __LINE__); hipFree(d_u); hipFree(d_v); hipFree(d_W); hipFree(d_H); hipFree(d_R_rowIdx); hipFree(d_R_colPtr); hipFree(d_R_val); hipFree(d_R_colIdx); hipFree(d_R_rowPtr); hipFree(d_R_val_t); hipFree(d_gArrU); hipFree(d_gArrV); hipFree(d_hArrU); hipFree(d_hArrV); }
5022f557e39ddd6e837d9aa54fdac746c281e5eb.cu
/** * * OHIO STATE UNIVERSITY SOFTWARE DISTRIBUTION LICENSE * * Parallel CCD++ on GPU (the “Software”) Copyright (c) 2017, The Ohio State * University. All rights reserved. * * The Software is available for download and use subject to the terms and * conditions of this License. Access or use of the Software constitutes acceptance * and agreement to the terms and conditions of this License. Redistribution and * use of the Software in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the capitalized paragraph below. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the capitalized paragraph below in the documentation * and/or other materials provided with the distribution. * * 3. The names of Ohio State University, or its faculty, staff or students may not * be used to endorse or promote products derived from the Software without * specific prior written permission. * * This software was produced with support from the National Science Foundation * (NSF) through Award 1629548. Nothing in this work should be construed as * reflecting the official policy or position of the Defense Department, the United * States government, Ohio State University. * * THIS SOFTWARE HAS BEEN APPROVED FOR PUBLIC RELEASE, UNLIMITED DISTRIBUTION. THE * SOFTWARE IS PROVIDED “AS IS” AND WITHOUT ANY EXPRESS, IMPLIED OR STATUTORY * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF ACCURACY, COMPLETENESS, * NONINFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. ACCESS OR USE OF THE SOFTWARE IS ENTIRELY AT THE USER’S RISK. IN * NO EVENT SHALL OHIO STATE UNIVERSITY OR ITS FACULTY, STAFF OR STUDENTS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE SOFTWARE * USER SHALL INDEMNIFY, DEFEND AND HOLD HARMLESS OHIO STATE UNIVERSITY AND ITS * FACULTY, STAFF AND STUDENTS FROM ANY AND ALL CLAIMS, ACTIONS, DAMAGES, LOSSES, * LIABILITIES, COSTS AND EXPENSES, INCLUDING ATTORNEYS’ FEES AND COURT COSTS, * DIRECTLY OR INDIRECTLY ARISING OUT OF OR IN CONNECTION WITH ACCESS OR USE OF THE * SOFTWARE. * */ /** * * Author: * Israt ([email protected]) * * Contacts: * Israt ([email protected]) * Aravind Sukumaran-Rajam ([email protected]) * P. (Saday) Sadayappan ([email protected]) * */ #include "device_utilities.h" #include "util.h" #include <cuda.h> #include <vector> #include "helper_fusedR.h" #include "helper_updateH.h" inline cudaError_t checkCuda(cudaError_t result, int s) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", cudaGetErrorString(result), s); assert(result == cudaSuccess); } return result; } // Cyclic Coordinate Descent for Matrix Factorization void ccdr1(SparseMatrix &R, MatData &W, MatData &H, TestData &T, Options &param) { int k = param.k, maxiter = param.maxiter, inneriter = param.maxinneriter, tileSize_H = param.tileSizeH, tileSize_W = param.tileSizeW; DTYPE lambda = param.lambda, *d_R_val, *d_R_val_t, *d_gArrU, *d_hArrU, *d_gArrV, *d_hArrV, *d_u, *d_v, oldobj = 0; int LB[NUM_THRDS], UB[NUM_THRDS], LB_Rt[NUM_THRDS], UB_Rt[NUM_THRDS], *d_R_colPtr, *d_R_rowPtr, *d_row_lim_R, *d_row_lim_Rt, sum = 0, *d_test_row, *d_test_col, i, j; DTYPE reg = 0, loss, *d_loss, *d_v_new, *d_Wt, *d_Ht, *d_W, *d_H, *d_test_val, v, *d_pred_v, *d_rmse, *d_fundec_col; unsigned *d_R_rowIdx, *d_R_colIdx; DTYPE *pred_v = (DTYPE*) malloc(T.nnz_ * sizeof(DTYPE)); DTYPE *rmse = (DTYPE*) malloc(T.nnz_ * sizeof(DTYPE)); //omp_set_num_threads(param.threads); // Create transpose view of R SparseMatrix Rt; Rt = R.get_shallow_transpose(); // initial value of the regularization term // H is a zero matrix now. for (int t = 0; t < k; ++t) for (unsigned c = 0; c < R.cols_; ++c) H[t][c] = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float mili = 0, copyTime = 0; //**************************CUDA COPY************************ checkCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1), __LINE__); size_t RCols_memsize = (R.cols_) * sizeof(DTYPE); size_t RRows_memsize = (R.rows_) * sizeof(DTYPE); size_t d_memsize = 1 * sizeof(DTYPE); size_t R_colPtr_memsize = (R.cols_ + 1) * sizeof(int); size_t R_rowPtr_memsize = (R.rows_ + 1) * sizeof(int); size_t R_rowIdx_memsize = R.nnz_ * sizeof(unsigned); size_t R_val_memsize = R.nnz_ * sizeof(DTYPE); checkCuda(cudaMalloc((void**) &d_W, k * R.rows_ * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &d_H, k * R.cols_ * sizeof(DTYPE)), 1); checkCuda(cudaMalloc((void**) &d_Wt, R.rows_ * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &d_Ht, R.cols_ * sizeof(DTYPE)), 1); checkCuda(cudaMalloc((void**) &d_u, RRows_memsize), 0); checkCuda(cudaMalloc((void**) &d_v, RCols_memsize), 1); checkCuda(cudaMalloc((void**) &d_v_new, RCols_memsize), 1); checkCuda(cudaMalloc((void**) &d_gArrU, RRows_memsize), 2); checkCuda(cudaMalloc((void**) &d_hArrU, RRows_memsize), 3); checkCuda(cudaMalloc((void**) &d_gArrV, RCols_memsize), 2); checkCuda(cudaMalloc((void**) &d_hArrV, RCols_memsize), 3); checkCuda(cudaMalloc((void**) &d_R_colPtr, R_colPtr_memsize), 4); checkCuda(cudaMalloc((void**) &d_R_rowPtr, R_rowPtr_memsize), 5); checkCuda(cudaMalloc((void**) &d_R_rowIdx, R_rowIdx_memsize), 6); checkCuda(cudaMalloc((void**) &d_R_colIdx, R_rowIdx_memsize), 8); checkCuda(cudaMalloc((void**) &d_R_val, R_val_memsize), 7); checkCuda(cudaMalloc((void**) &d_R_val_t, R_val_memsize), 7); checkCuda(cudaMalloc((void**) &d_loss, 1 * sizeof(DTYPE)), 11); checkCuda(cudaMalloc((void**) &d_test_row, (T.nnz_ + 1) * sizeof(int)), 7); checkCuda(cudaMalloc((void**) &d_test_col, (T.nnz_ + 1) * sizeof(int)), 7); checkCuda(cudaMalloc((void**) &d_test_val, (T.nnz_ + 1) * sizeof(DTYPE)), 7); checkCuda(cudaMalloc((void**) &d_pred_v, (T.nnz_ + 1) * sizeof(DTYPE)), 7); checkCuda(cudaMalloc((void**) &d_rmse, (T.nnz_ + 1) * sizeof(DTYPE)), 7); checkCuda(cudaEventRecord(start), __LINE__); cudaMemcpy(d_R_colPtr, R.get_csc_col_ptr(), R_colPtr_memsize, cudaMemcpyHostToDevice); cudaMemcpy(d_R_rowPtr, R.get_csr_row_ptr(), R_rowPtr_memsize, cudaMemcpyHostToDevice); cudaMemcpy(d_R_rowIdx, R.get_csc_row_indx(), R_rowIdx_memsize, cudaMemcpyHostToDevice); cudaMemcpy(d_R_colIdx, R.get_csr_col_indx(), R_rowIdx_memsize, cudaMemcpyHostToDevice); cudaMemcpy(d_R_val, R.get_csc_val(), R_val_memsize, cudaMemcpyHostToDevice); cudaMemcpy(d_R_val_t, R.get_csr_val(), R_val_memsize, cudaMemcpyHostToDevice); for (int t = 0; t < k; ++t) cudaMemcpy(d_W + t * R.rows_, &(W[t][0]), R.rows_ * sizeof(DTYPE), cudaMemcpyHostToDevice); cudaMemset(d_H, 0, k * R.cols_ * sizeof(DTYPE)); //cpoying test cudaMemcpy(d_test_row, T.getTestRow(), (T.nnz_ + 1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_test_col, T.getTestCol(), (T.nnz_ + 1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_test_val, T.getTestVal(), (T.nnz_ + 1) * sizeof(DTYPE), cudaMemcpyHostToDevice); checkCuda(cudaEventRecord(stop), __LINE__); checkCuda(cudaDeviceSynchronize(), __LINE__); checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__); copyTime = mili; float ACSRTime = 0, textureACSRTime = 0, innerLoopTime = 0; float ACSRPreProcessTime; cudaStream_t streamT; checkCuda(cudaStreamCreate(&streamT), __LINE__); create_stream(); //****************** preprrocessing TILING************* double t1 = seconds(); int total_tileInRows = (R.rows_ + tileSize_H - 1) / tileSize_H; int total_tileInCols = (R.cols_ + tileSize_W - 1) / tileSize_W; MatInt row_lim_R = MatInt(total_tileInRows + 1, VecInt(R.cols_ + 1)); MatInt row_lim_Rt = MatInt(total_tileInCols + 1, VecInt(R.rows_ + 1)); MatInt row_lim_R_odd = MatInt(total_tileInRows + 1, VecInt(R.cols_ + 1)); MatInt row_lim_Rt_odd = MatInt(total_tileInCols + 1, VecInt(R.rows_ + 1)); make_tile(R, row_lim_R, tileSize_H); make_tile(Rt, row_lim_Rt, tileSize_W); //copying tiles limit rowPointers checkCuda(cudaEventRecord(start), __LINE__); checkCuda( cudaMalloc((void**) &d_row_lim_R, (total_tileInRows + 1) * (R.cols_ + 1) * sizeof(int)), 0); checkCuda( cudaMalloc((void**) &d_row_lim_Rt, (total_tileInCols + 1) * (R.rows_ + 1) * sizeof(int)), 0); checkCuda( cudaMemcpy(d_row_lim_R, R.get_csc_col_ptr(), (R.cols_ + 1) * sizeof(int), cudaMemcpyHostToDevice), __LINE__); checkCuda( cudaMemcpy(d_row_lim_Rt, R.get_csr_row_ptr(), (R.rows_ + 1) * sizeof(int), cudaMemcpyHostToDevice), __LINE__); for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H; // - 1; checkCuda( cudaMemcpy(d_row_lim_R + tile_no * (R.cols_ + 1), &(row_lim_R[tile_no][0]), (R.cols_ + 1) * sizeof(int), cudaMemcpyHostToDevice), __LINE__); } for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W; // - 1; checkCuda( cudaMemcpy(d_row_lim_Rt + (tile_no * R.rows_) + tile_no, &(row_lim_Rt[tile_no][0]), (R.rows_ + 1) * sizeof(int), cudaMemcpyHostToDevice), __LINE__); } mili = cuda_timerEnd(start, stop, streamT); copyTime = mili; //******************PreProcess for TILED binning******************************* checkCuda(cudaEventRecord(start), __LINE__); int *tiled_count[total_tileInRows], *tiled_count_Rt[total_tileInCols]; for (int i = 0; i < total_tileInRows; ++i) tiled_count[i] = (int*) malloc(NUM_THRDS * sizeof(int)); for (int i = 0; i < total_tileInCols; ++i) tiled_count_Rt[i] = (int*) malloc(NUM_THRDS * sizeof(int)); int *tiled_rowGroupPtr, *tiled_rowGroupPtr_Rt; // Extract CSR group info on CPU int *tiled_host_rowGroupPtr[total_tileInRows], *tiled_host_rowGroupPtr_Rt[total_tileInCols]; for (int i = 0; i < total_tileInRows; ++i) tiled_host_rowGroupPtr[i] = (int*) malloc( NUM_THRDS * R.cols_ * sizeof(int)); for (int i = 0; i < total_tileInCols; ++i) tiled_host_rowGroupPtr_Rt[i] = (int*) malloc( NUM_THRDS * R.rows_ * sizeof(int)); for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H - 1; tiled_binning(R, (tiled_host_rowGroupPtr[tile_no]), LB, UB, tiled_count[tile_no], row_lim_R, tile_no); } for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W - 1; tiled_binning(Rt, (tiled_host_rowGroupPtr_Rt[tile_no]), LB_Rt, UB_Rt, tiled_count_Rt[tile_no], row_lim_Rt, tile_no); } checkCuda( cudaMalloc((void **) &tiled_rowGroupPtr, total_tileInRows * R.cols_ * sizeof(int)), __LINE__); checkCuda( cudaMalloc((void **) &tiled_rowGroupPtr_Rt, total_tileInCols * R.rows_ * sizeof(int)), __LINE__); int *test1 = (int*) malloc((R.cols_ + 1) * sizeof(int)); //del for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H - 1; sum = 0; for (int i = 0; i < NUM_THRDS; i++) { if (tiled_count[tile_no][i] > 0) { checkCuda( cudaMemcpy( tiled_rowGroupPtr + (tile_no * R.cols_) + sum, &(tiled_host_rowGroupPtr[tile_no][i * R.cols_]), tiled_count[tile_no][i] * sizeof(int), cudaMemcpyHostToDevice), __LINE__); sum += tiled_count[tile_no][i]; } } } for (int tile = tileSize_W; tile < (Rt.rows_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W - 1; sum = 0; for (int i = 0; i < NUM_THRDS; i++) { if (tiled_count_Rt[tile_no][i] > 0) { checkCuda( cudaMemcpy( tiled_rowGroupPtr_Rt + (tile_no * Rt.cols_) + sum, &(tiled_host_rowGroupPtr_Rt[tile_no][i * R.rows_]), tiled_count_Rt[tile_no][i] * sizeof(int), cudaMemcpyHostToDevice), __LINE__); sum += tiled_count_Rt[tile_no][i]; } } } mili = cuda_timerEnd(start, stop, streamT); copyTime = mili; //********************STARTING CCD++ ALGORTIHM************************ printf("tileSize_H,W: %d, %d k: %d lambda: %f\n", tileSize_H, tileSize_W, k, lambda); float mergeR = 0, mergeRT = 0, updateR = 0, updateRT = 0; for (int oiter = 1; oiter <= maxiter; ++oiter) { int early_stop = 0, kk = 0; for (int tt = 0; tt < k; ++tt) { int t = tt; VecData &Wt = W[t], &Ht = H[t]; cudaMemset(d_hArrU, 0, RRows_memsize); cudaMemset(d_gArrV, 0, RCols_memsize); cudaMemset(d_hArrV, 0, RCols_memsize); cudaMemset(d_gArrU, 0, RRows_memsize); //if (oiter > 1) { //**************************Updating R with add true********************************** mergeR = 0; for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H; //printf("*****tile no %d\n", tile_no ); cuda_timerStart(start, streamT); if (t == 0) kk = t; else kk = t - 1; helper_UpdateR( d_row_lim_R + ((tile_no - 1) * (R.cols_ + 1)), d_row_lim_R + (tile_no * R.cols_) + tile_no, d_R_rowIdx, d_R_val, d_W + t * R.rows_, d_H + t * R.cols_, R.rows_, R.cols_, true, tiled_rowGroupPtr + ((tile_no - 1) * R.cols_), &(tiled_count[tile_no - 1][0]), lambda, d_gArrV, d_hArrV, d_W + t * R.rows_, d_W + kk * R.rows_, d_H + kk * R.cols_, t); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; mergeR += mili; } cuda_timerStart(start, streamT); assignment<<<(R.cols_ + 1023) / 1024, 1024>>>(d_R_colPtr, d_v_new, d_gArrV, d_hArrV, lambda, R.cols_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; mergeR += mili; if (oiter == 1 && (t == 1)) printf("time to merge R %f\n", mergeR); //**************************Updating RTranspose with add true********************************** mergeRT = 0; for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W; //printf("tile_no from RT %d\n", tile_no); cuda_timerStart(start, streamT); if (t == 0) kk = t; else kk = t - 1; helper_UpdateR( d_row_lim_Rt + ((tile_no - 1) * (R.rows_ + 1)), d_row_lim_Rt + (tile_no * R.rows_) + (tile_no), d_R_colIdx, d_R_val_t, d_H + t * R.cols_, d_W + t * R.rows_, R.cols_, R.rows_, true, tiled_rowGroupPtr_Rt + ((tile_no - 1) * Rt.cols_), &(tiled_count_Rt[tile_no - 1][0]), lambda, d_gArrU, d_hArrU, d_v_new, d_H + kk * R.cols_, d_W + kk * R.rows_, t); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; mergeRT += mili; //printf("update R in GPU takes %f \n", mili ); } cuda_timerStart(start, streamT); assignment<<<(R.cols_ + 1023) / 1024, 1024>>>(d_R_colPtr, d_H + t * R.cols_, d_gArrV, d_hArrV, lambda, R.cols_); assignment<<<(R.rows_ + 1023) / 1024, 1024>>>(d_R_rowPtr, d_W + t * R.rows_, d_gArrU, d_hArrU, lambda, R.rows_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; mergeRT += mili; if (oiter == 1 && t == 1) printf("time to merge Rt %f\n", mergeRT); } int maxit = inneriter; float init = ACSRTime; int iter = 0; //*************************inner iter*** // if(oiter > 1) iter = 2; // else iter = 1; //maxit = inneriter; for (; iter < maxit; ++iter) { //*************************Update Ht*************** float updateR = 0; for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H; //printf("*****tile no %d\n", tile_no ); cuda_timerStart(start, streamT); helper_rankOneUpdate_v( d_row_lim_R + ((tile_no - 1) * R.cols_) + (tile_no - 1), d_row_lim_R + (tile_no * R.cols_) + tile_no, d_R_rowIdx, d_R_val, d_W + t * R.rows_, d_H + t * R.cols_, R.rows_, R.cols_, true, tiled_rowGroupPtr + ((tile_no - 1) * R.cols_), &(tiled_count[tile_no - 1][0]), lambda, d_gArrV, d_hArrV, d_W + t * R.rows_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateR += mili; } cuda_timerStart(start, streamT); assignment<<<(R.cols_ + 1023) / 1024, 1024>>>(d_R_colPtr, d_H + t * R.cols_, d_gArrV, d_hArrV, lambda, R.cols_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateR += mili; //*************************Update Wt*************** float updateRT = 0; for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W;//printf("tile_no from RT %d\n", tile_no); cuda_timerStart(start, streamT); helper_rankOneUpdate_v( d_row_lim_Rt + ((tile_no - 1) * R.rows_) + (tile_no - 1), d_row_lim_Rt + (tile_no * R.rows_) + (tile_no), d_R_colIdx, d_R_val_t, d_H + t * R.cols_, d_W + t * R.rows_, R.cols_, R.rows_, true, tiled_rowGroupPtr_Rt + ((tile_no - 1) * Rt.cols_), &(tiled_count_Rt[tile_no - 1][0]), lambda, d_gArrU, d_hArrU, d_v_new); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateRT += mili; } cuda_timerStart(start, streamT); assignment<<<(R.rows_ + 1023) / 1024, 1024>>>(d_R_rowPtr, d_W + t * R.rows_, d_gArrU, d_hArrU, lambda, R.rows_); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateRT += mili; // if(oiter ==1 && t ==0 && iter == maxit-1) // printf("time to update Wt %f\n", updateRT); } //**************************Updating R = R - Wt * Ht ***************************** updateR = 0; if (t == k - 1) { for (int tile = tileSize_H; tile < (R.rows_ + tileSize_H - 1); tile += tileSize_H) { int tile_no = tile / tileSize_H; //printf("tile no %d\n", tile_no ); cuda_timerStart(start, streamT); helper_UpdateR( d_row_lim_R + ((tile_no - 1) * (R.cols_ + 1)), d_row_lim_R + (tile_no * R.cols_) + tile_no, d_R_rowIdx, d_R_val, d_W + t * R.rows_, d_H + t * R.cols_, R.rows_, R.cols_, false, tiled_rowGroupPtr + ((tile_no - 1) * R.cols_), &(tiled_count[tile_no - 1][0]), lambda, d_gArrU, d_hArrU, d_W + t * R.rows_, d_W + t * R.rows_, d_H + t * R.cols_, t); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateR += mili; } } //**************************Updating RT = RT - Wt * Ht ***************************** updateRT = 0; if (t == k - 1) { for (int tile = tileSize_W; tile < (R.cols_ + tileSize_W - 1); tile += tileSize_W) { int tile_no = tile / tileSize_W; cuda_timerStart(start, streamT); helper_UpdateR( d_row_lim_Rt + ((tile_no - 1) * (R.rows_ + 1)), d_row_lim_Rt + (tile_no * R.rows_) + (tile_no), d_R_colIdx, d_R_val_t, d_H + t * R.cols_, d_W + t * R.rows_, R.cols_, R.rows_, false, tiled_rowGroupPtr_Rt + ((tile_no - 1) * Rt.cols_), &(tiled_count_Rt[tile_no - 1][0]), lambda, d_gArrU, d_hArrU, d_H + t * R.cols_, d_H + t * R.cols_, d_W + t * R.rows_, t); mili = cuda_timerEnd(start, stop, streamT); ACSRTime += mili; updateRT += mili; } } // if(oiter ==1 && t == k-1) // printf("time to update Rt %f\n", updateRT); if (oiter == 1 && t == 2) printf("iter %d time for 1 feature: %f ms\n", oiter, ACSRTime); } //**************Check RMSE******************** cudaMemset(d_rmse, 0, (T.nnz_ + 1) * sizeof(DTYPE)); cudaMemset(d_pred_v, 0, (T.nnz_ + 1) * sizeof(DTYPE)); GPU_rmse<<<(T.nnz_ + 1023) / 1024, 1024>>>(d_test_row, d_test_col, d_test_val, d_pred_v, d_rmse, d_W, d_H, T.nnz_, k, R.rows_, R.cols_); DTYPE tot_rmse = 0, f_rmse = 0; cudaMemcpy(&(rmse[0]), d_rmse, (T.nnz_ + 1) * sizeof(DTYPE), cudaMemcpyDeviceToHost); //Copy to CPU for (int t = 0; t < k; ++t){ cudaMemcpy(&(W[t][0]),d_W + t * R.rows_, R.rows_ * sizeof(DTYPE), cudaMemcpyDeviceToHost); cudaMemcpy(&(H[t][0]),d_H + t * R.cols_, R.cols_ * sizeof(DTYPE), cudaMemcpyDeviceToHost); } #pragma omp parallel for reduction(+:tot_rmse) for (int i = 0; i < T.nnz_; ++i) tot_rmse += rmse[i]; f_rmse = sqrt(tot_rmse / T.nnz_); printf("iter %d time %f RMSE %f\n", oiter, (ACSRTime / 1000), f_rmse); } for (int i = 0; i <= NUM_THRDS; i++) checkCuda(cudaStreamDestroy(stream[i]), __LINE__); checkCuda(cudaStreamDestroy(streamT), __LINE__); cudaFree(d_u); cudaFree(d_v); cudaFree(d_W); cudaFree(d_H); cudaFree(d_R_rowIdx); cudaFree(d_R_colPtr); cudaFree(d_R_val); cudaFree(d_R_colIdx); cudaFree(d_R_rowPtr); cudaFree(d_R_val_t); cudaFree(d_gArrU); cudaFree(d_gArrV); cudaFree(d_hArrU); cudaFree(d_hArrV); }
88b232fc65d1d025424a3b9fd0a5d680571c3fbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #include "stretching.h" #include "pregpu.h" void Kernel::StretchingInit() { startTimer("Init GPU "); // Start timer hipDeviceReset(); // Exit GPU thread hipSetDevice(MPIRANK % GPUS); // Set GPU device hipDeviceSynchronize(); // Sync GPU threads stopTimer("Init GPU ",MPIRANK==0); // Stop timer & print eraseTimer("Init GPU "); // Erase timer } __device__ void StretchingP2M_core(gpureal *target, gpureal rho, gpureal alpha, gpureal beta, gpureal *sourceShrd, int ithread) { __shared__ gpureal factShrd[2*P]; gpureal Ynm; gpureal YnmAlpha; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int nn = floorf(sqrtf(2*threadIdx.x+0.25)-0.5); int mm = 0; for( int i=0; i<=nn; ++i ) mm += i; mm = threadIdx.x - mm; if( threadIdx.x >= NTERM ) nn = mm = 0; gpureal x = cosf(alpha); gpureal y = sinf(alpha); if( fabsf(y) < EPS ) y = 1 / EPS; gpureal s = sqrtf(1 - x * x); fact = 1; gpureal pn = 1; gpureal rhom = 1; for( int m=0; m<mm; ++m ) { rhom *= rho; pn = -pn * fact * s; fact += 2; } int m = mm; gpureal p = pn; gpureal anm; if(mm==nn) { anm = rhom * rsqrtf(factShrd[2*m]); Ynm = anm * p; } gpureal p1 = p; p = x * (2 * m + 1) * p; if(mm==nn) YnmAlpha = anm * (p - (m + 1) * x * p1) / y; rhom *= rho; gpureal rhon = rhom; int n; for( n=m+1; n<nn; ++n ) { gpureal p2 = p1; p1 = p; p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); rhon *= rho; } if(n<=nn) { anm = rhon * rsqrtf(factShrd[n+m] / factShrd[n-m]); Ynm = anm * p; gpureal p2 = p1; p1 = p; p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); YnmAlpha = anm * ((n - m + 1) * p - (n + 1) * x * p1) / y; } gpureal ere = cosf(-mm * beta); gpureal eim = sinf(-mm * beta); gpureal spherical[6]; gpureal cartesian[6]; spherical[0] = Ynm * nn / rho * ere; spherical[1] = YnmAlpha * ere; spherical[2] = Ynm * mm * eim; spherical[3] = Ynm * nn / rho * eim; spherical[4] = YnmAlpha * eim; spherical[5] = -Ynm * mm * ere; sph2cart(rho,alpha,beta,&spherical[0],&cartesian[0]); sph2cart(rho,alpha,beta,&spherical[3],&cartesian[3]); target[0] += sourceShrd[6*ithread+4] * cartesian[2] - sourceShrd[6*ithread+5] * cartesian[1]; target[1] += sourceShrd[6*ithread+4] * cartesian[5] - sourceShrd[6*ithread+5] * cartesian[4]; target[2] += sourceShrd[6*ithread+5] * cartesian[0] - sourceShrd[6*ithread+3] * cartesian[2]; target[3] += sourceShrd[6*ithread+5] * cartesian[3] - sourceShrd[6*ithread+3] * cartesian[5]; target[4] += sourceShrd[6*ithread+3] * cartesian[1] - sourceShrd[6*ithread+4] * cartesian[0]; target[5] += sourceShrd[6*ithread+3] * cartesian[4] - sourceShrd[6*ithread+4] * cartesian[3]; } __global__ void StretchingP2M_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal target[6] = {0, 0, 0, 0, 0, 0}; __shared__ gpureal targetShrd[3]; __shared__ gpureal sourceShrd[6*THREADS]; int itarget = blockIdx.x * THREADS; targetShrd[0] = targetGlob[6*itarget+0]; targetShrd[1] = targetGlob[6*itarget+1]; targetShrd[2] = targetGlob[6*itarget+2]; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; int size = rangeGlob[keys+3*ilist+2]; for( int iblok=0; iblok<(size-1)/THREADS; ++iblok ) { int isource = begin + iblok * THREADS + threadIdx.x; __syncthreads(); sourceShrd[6*threadIdx.x+0] = sourceGlob[7*isource+0]; sourceShrd[6*threadIdx.x+1] = sourceGlob[7*isource+1]; sourceShrd[6*threadIdx.x+2] = sourceGlob[7*isource+2]; sourceShrd[6*threadIdx.x+3] = sourceGlob[7*isource+3]; sourceShrd[6*threadIdx.x+4] = sourceGlob[7*isource+4]; sourceShrd[6*threadIdx.x+5] = sourceGlob[7*isource+5]; __syncthreads(); for( int i=0; i<THREADS; ++i ) { float3 d; d.x = sourceShrd[6*i+0] - targetShrd[0]; d.y = sourceShrd[6*i+1] - targetShrd[1]; d.z = sourceShrd[6*i+2] - targetShrd[2]; gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); StretchingP2M_core(target,rho,alpha,beta,sourceShrd,i); } } int iblok = (size-1)/THREADS; int isource = begin + iblok * THREADS + threadIdx.x; __syncthreads(); if( threadIdx.x < size - iblok * THREADS ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[7*isource+0]; sourceShrd[6*threadIdx.x+1] = sourceGlob[7*isource+1]; sourceShrd[6*threadIdx.x+2] = sourceGlob[7*isource+2]; sourceShrd[6*threadIdx.x+3] = sourceGlob[7*isource+3]; sourceShrd[6*threadIdx.x+4] = sourceGlob[7*isource+4]; sourceShrd[6*threadIdx.x+5] = sourceGlob[7*isource+5]; } __syncthreads(); for( int i=0; i<size-iblok*THREADS; ++i ) { float3 d; d.x = sourceShrd[6*i+0] - targetShrd[0]; d.y = sourceShrd[6*i+1] - targetShrd[1]; d.z = sourceShrd[6*i+2] - targetShrd[2]; gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); StretchingP2M_core(target,rho,alpha,beta,sourceShrd,i); } } itarget = blockIdx.x * THREADS + threadIdx.x; targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; targetGlob[6*itarget+3] = target[3]; targetGlob[6*itarget+4] = target[4]; targetGlob[6*itarget+5] = target[5]; } __device__ void StretchingM2M_core(gpureal *target, gpureal beta, gpureal *factShrd, gpureal *YnmShrd, gpureal *sourceShrd) { int j = floorf(sqrtf(2*threadIdx.x+0.25)-0.5); int k = 0; for( int i=0; i<=j; ++i ) k += i; k = threadIdx.x - k; if( threadIdx.x >= NTERM ) j = k = 0; gpureal ajk = ODDEVEN(j) * rsqrtf(factShrd[j-k] * factShrd[j+k]); for( int n=0; n<=j; ++n ) { for( int m=-n; m<=min(k-1,n); ++m ) { if( j-n >= k-m ) { int nm = n * n + n + m; int jnkms = (j - n) * (j - n + 1) / 2 + k - m; gpureal ere = cosf(-m * beta); gpureal eim = sinf(-m * beta); gpureal ajnkm = rsqrtf(factShrd[j-n-k+m] * factShrd[j-n+k-m]); gpureal cnm = ODDEVEN((m-abs(m))/2+j); cnm *= ajnkm / ajk * YnmShrd[nm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; target[0] += sourceShrd[6*jnkms+0] * CnmReal; target[0] -= sourceShrd[6*jnkms+1] * CnmImag; target[1] += sourceShrd[6*jnkms+0] * CnmImag; target[1] += sourceShrd[6*jnkms+1] * CnmReal; target[2] += sourceShrd[6*jnkms+2] * CnmReal; target[2] -= sourceShrd[6*jnkms+3] * CnmImag; target[3] += sourceShrd[6*jnkms+2] * CnmImag; target[3] += sourceShrd[6*jnkms+3] * CnmReal; target[4] += sourceShrd[6*jnkms+4] * CnmReal; target[4] -= sourceShrd[6*jnkms+5] * CnmImag; target[5] += sourceShrd[6*jnkms+4] * CnmImag; target[5] += sourceShrd[6*jnkms+5] * CnmReal; } } for( int m=k; m<=n; ++m ) { if( j-n >= m-k ) { int nm = n * n + n + m; int jnkms = (j - n) * (j - n + 1) / 2 - k + m; gpureal ere = cosf(-m * beta); gpureal eim = sinf(-m * beta); gpureal ajnkm = rsqrtf(factShrd[j-n-k+m] * factShrd[j-n+k-m]); gpureal cnm = ODDEVEN(k+j+m); cnm *= ajnkm / ajk * YnmShrd[nm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; target[0] += sourceShrd[6*jnkms+0] * CnmReal; target[0] += sourceShrd[6*jnkms+1] * CnmImag; target[1] += sourceShrd[6*jnkms+0] * CnmImag; target[1] -= sourceShrd[6*jnkms+1] * CnmReal; target[2] += sourceShrd[6*jnkms+2] * CnmReal; target[2] += sourceShrd[6*jnkms+3] * CnmImag; target[3] += sourceShrd[6*jnkms+2] * CnmImag; target[3] -= sourceShrd[6*jnkms+3] * CnmReal; target[4] += sourceShrd[6*jnkms+4] * CnmReal; target[4] += sourceShrd[6*jnkms+5] * CnmImag; target[5] += sourceShrd[6*jnkms+4] * CnmImag; target[5] -= sourceShrd[6*jnkms+5] * CnmReal; } } } } __global__ void StretchingM2M_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal target[6] = {0, 0, 0, 0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; __shared__ gpureal YnmShrd[P*P]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; float3 d; d.x = targetGlob[6*itarget+0] - sourceGlob[begin+0]; d.y = targetGlob[6*itarget+1] - sourceGlob[begin+1]; d.z = targetGlob[6*itarget+2] - sourceGlob[begin+2]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); evalMultipole(YnmShrd,rho,alpha,factShrd); StretchingM2M_core(target,beta,factShrd,YnmShrd,sourceShrd); } itarget = blockIdx.x * THREADS + threadIdx.x; targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; targetGlob[6*itarget+3] = target[3]; targetGlob[6*itarget+4] = target[4]; targetGlob[6*itarget+5] = target[5]; } void Kernel::StretchingM2M_CPU() { const complex I(0.,1.); // Imaginary unit vect dist = CI->X - CJ->X; real rho, alpha, beta; cart2sph(rho,alpha,beta,dist); evalMultipole(rho,alpha,-beta); for( int j=0; j!=P; ++j ) { for( int k=0; k<=j; ++k ) { const int jk = j * j + j + k; const int jks = j * (j + 1) / 2 + k; complex M[3] = {0, 0, 0}; for( int n=0; n<=j; ++n ) { for( int m=-n; m<=::min(k-1,n); ++m ) { if( j-n >= k-m ) { const int jnkm = (j - n) * (j - n) + j - n + k - m; const int jnkms = (j - n) * (j - n + 1) / 2 + k - m; const int nm = n * n + n + m; for( int d=0; d!=3; ++d ) { M[d] += CJ->M[3*jnkms+d] * ::pow(I,double(m-abs(m))) * Ynm[nm] * double(ODDEVEN(n) * Anm[nm] * Anm[jnkm] / Anm[jk]); } } } for( int m=k; m<=n; ++m ) { if( j-n >= m-k ) { const int jnkm = (j - n) * (j - n) + j - n + k - m; const int jnkms = (j - n) * (j - n + 1) / 2 - k + m; const int nm = n * n + n + m; for( int d=0; d!=3; ++d ) { M[d] += std::conj(CJ->M[3*jnkms+d]) * Ynm[nm] * double(ODDEVEN(k+n+m) * Anm[nm] * Anm[jnkm] / Anm[jk]); } } } } for( int d=0; d!=3; ++d ) { CI->M[3*jks+d] += M[d]; } } } } __device__ void StretchingM2L_core(gpureal *target, gpureal beta, gpureal *factShrd, gpureal *YnmShrd, gpureal *sourceShrd) { int j = floorf(sqrtf(2*threadIdx.x+0.25)-0.5); int k = 0; for( int i=0; i<=j; ++i ) k += i; k = threadIdx.x - k; if( threadIdx.x >= NTERM ) j = k = 0; gpureal ajk = ODDEVEN(j) * rsqrtf(factShrd[j-k] * factShrd[j+k]); for( int n=0; n<P; ++n ) { for( int m=-n; m<0; ++m ) { int jnkm = (j + n) * (j + n + 1) / 2 - m + k; gpureal ere = cosf((m - k) * beta); gpureal eim = sinf((m - k) * beta); gpureal anm = rsqrtf(factShrd[n-m] * factShrd[n+m]); gpureal cnm = anm * ajk * YnmShrd[jnkm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; int i = n * (n + 1) / 2 - m; target[0] += sourceShrd[6*i+0] * CnmReal; target[0] += sourceShrd[6*i+1] * CnmImag; target[1] += sourceShrd[6*i+0] * CnmImag; target[1] -= sourceShrd[6*i+1] * CnmReal; target[2] += sourceShrd[6*i+2] * CnmReal; target[2] += sourceShrd[6*i+3] * CnmImag; target[3] += sourceShrd[6*i+2] * CnmImag; target[3] -= sourceShrd[6*i+3] * CnmReal; target[4] += sourceShrd[6*i+4] * CnmReal; target[4] += sourceShrd[6*i+5] * CnmImag; target[5] += sourceShrd[6*i+4] * CnmImag; target[5] -= sourceShrd[6*i+5] * CnmReal; } for( int m=0; m<=n; ++m ) { int jnkm = (j + n) * (j + n + 1) / 2 + abs(m - k); gpureal ere = cosf((m - k) * beta); gpureal eim = sinf((m - k) * beta); gpureal anm = rsqrtf(factShrd[n-m] * factShrd[n+m]); gpureal cnm = ODDEVEN((abs(k - m) - k - m) / 2); cnm *= anm * ajk * YnmShrd[jnkm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; int i = n * (n + 1) / 2 + m; target[0] += sourceShrd[6*i+0] * CnmReal; target[0] -= sourceShrd[6*i+1] * CnmImag; target[1] += sourceShrd[6*i+0] * CnmImag; target[1] += sourceShrd[6*i+1] * CnmReal; target[2] += sourceShrd[6*i+2] * CnmReal; target[2] -= sourceShrd[6*i+3] * CnmImag; target[3] += sourceShrd[6*i+2] * CnmImag; target[3] += sourceShrd[6*i+3] * CnmReal; target[4] += sourceShrd[6*i+4] * CnmReal; target[4] -= sourceShrd[6*i+5] * CnmImag; target[5] += sourceShrd[6*i+4] * CnmImag; target[5] += sourceShrd[6*i+5] * CnmReal; } } } __global__ void StretchingM2L_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal D0 = -constDevc[0]; gpureal target[6] = {0, 0, 0, 0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; __shared__ gpureal YnmShrd[4*NTERM]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; int Iperiodic = rangeGlob[keys+3*ilist+3]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); int I = 0; for( int ix=-1; ix<=1; ++ix ) { for( int iy=-1; iy<=1; ++iy ) { for( int iz=-1; iz<=1; ++iz, ++I ) { if( Iperiodic & (1 << I) ) { float3 d; d.x = ix * D0; d.y = iy * D0; d.z = iz * D0; d.x += targetGlob[6*itarget+0] - sourceGlob[begin+0]; d.y += targetGlob[6*itarget+1] - sourceGlob[begin+1]; d.z += targetGlob[6*itarget+2] - sourceGlob[begin+2]; gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); evalLocal(YnmShrd,rho,alpha,factShrd); StretchingM2L_core(target,beta,factShrd,YnmShrd,sourceShrd); } } } } } itarget = blockIdx.x * THREADS + threadIdx.x; targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; targetGlob[6*itarget+3] = target[3]; targetGlob[6*itarget+4] = target[4]; targetGlob[6*itarget+5] = target[5]; } __device__ void StretchingM2P_core(gpureal *target, gpureal *targetQ, gpureal r, gpureal theta, gpureal phi, gpureal *factShrd, gpureal *sourceShrd) { gpureal x = cosf(theta); gpureal y = sinf(theta); if( fabsf(y) < EPS ) y = 1 / EPS; gpureal s = sqrtf(1 - x * x); gpureal spherical[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; gpureal cartesian[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; gpureal fact = 1; gpureal pn = 1; gpureal rhom = 1.0 / r; for( int m=0; m<P; ++m ) { gpureal p = pn; int i = m * (m + 1) / 2 + m; gpureal ere = cosf(m * phi); if( m == 0 ) ere = 0.5; gpureal eim = sinf(m * phi); gpureal anm = rhom * rsqrtf(factShrd[2*m]); gpureal Ynm = anm * p; gpureal p1 = p; p = x * (2 * m + 1) * p; gpureal YnmTheta = anm * (p - (m + 1) * x * p1) / y; gpureal realj = ere * sourceShrd[6*i+0] - eim * sourceShrd[6*i+1]; gpureal imagj = eim * sourceShrd[6*i+0] + ere * sourceShrd[6*i+1]; spherical[0] -= 2 * (m + 1) / r * Ynm * realj; spherical[1] += 2 * YnmTheta * realj; spherical[2] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+2] - eim * sourceShrd[6*i+3]; imagj = eim * sourceShrd[6*i+2] + ere * sourceShrd[6*i+3]; spherical[3] -= 2 * (m + 1) / r * Ynm * realj; spherical[4] += 2 * YnmTheta * realj; spherical[5] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+4] - eim * sourceShrd[6*i+5]; imagj = eim * sourceShrd[6*i+4] + ere * sourceShrd[6*i+5]; spherical[6] -= 2 * (m + 1) / r * Ynm * realj; spherical[7] += 2 * YnmTheta * realj; spherical[8] -= 2 * m * Ynm * imagj; rhom /= r; gpureal rhon = rhom; for( int n=m+1; n<P; ++n ) { i = n * (n + 1) / 2 + m; anm = rhon * rsqrtf(factShrd[n+m] / factShrd[n-m]); Ynm = anm * p; gpureal p2 = p1; p1 = p; p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); YnmTheta = anm * ((n - m + 1) * p - (n + 1) * x * p1) / y; realj = ere * sourceShrd[6*i+0] - eim * sourceShrd[6*i+1]; imagj = eim * sourceShrd[6*i+0] + ere * sourceShrd[6*i+1]; spherical[0] -= 2 * (n + 1) / r * Ynm * realj; spherical[1] += 2 * YnmTheta * realj; spherical[2] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+2] - eim * sourceShrd[6*i+3]; imagj = eim * sourceShrd[6*i+2] + ere * sourceShrd[6*i+3]; spherical[3] -= 2 * (n + 1) / r * Ynm * realj; spherical[4] += 2 * YnmTheta * realj; spherical[5] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+4] - eim * sourceShrd[6*i+5]; imagj = eim * sourceShrd[6*i+4] + ere * sourceShrd[6*i+5]; spherical[6] -= 2 * (n + 1) / r * Ynm * realj; spherical[7] += 2 * YnmTheta * realj; spherical[8] -= 2 * m * Ynm * imagj; rhon /= r; } pn = -pn * fact * s; fact += 2; } sph2cart(r,theta,phi,&spherical[0],&cartesian[0]); sph2cart(r,theta,phi,&spherical[3],&cartesian[3]); sph2cart(r,theta,phi,&spherical[6],&cartesian[6]); target[0] -= 0.25 / M_PI * (targetQ[0] * cartesian[0] + targetQ[1] * cartesian[1] + targetQ[2] * cartesian[2]); target[1] -= 0.25 / M_PI * (targetQ[0] * cartesian[3] + targetQ[1] * cartesian[4] + targetQ[2] * cartesian[5]); target[2] -= 0.25 / M_PI * (targetQ[0] * cartesian[6] + targetQ[1] * cartesian[7] + targetQ[2] * cartesian[8]); } __global__ void StretchingM2P_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal D0 = -constDevc[0]; gpureal targetX[3], targetQ[3]; gpureal target[3] = {0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS + threadIdx.x; targetX[0] = targetGlob[6*itarget+0]; targetX[1] = targetGlob[6*itarget+1]; targetX[2] = targetGlob[6*itarget+2]; targetQ[0] = targetGlob[6*itarget+3]; targetQ[1] = targetGlob[6*itarget+4]; targetQ[2] = targetGlob[6*itarget+5]; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; int Iperiodic = rangeGlob[keys+3*ilist+3]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); int I = 0; for( int ix=-1; ix<=1; ++ix ) { for( int iy=-1; iy<=1; ++iy ) { for( int iz=-1; iz<=1; ++iz, ++I ) { if( Iperiodic & (1 << I) ) { float3 d; d.x = ix * D0; d.y = iy * D0; d.z = iz * D0; d.x += targetX[0] - sourceGlob[begin+0]; d.y += targetX[1] - sourceGlob[begin+1]; d.z += targetX[2] - sourceGlob[begin+2]; gpureal r,theta,phi; cart2sph(r,theta,phi,d.x,d.y,d.z); StretchingM2P_core(target,targetQ,r,theta,phi,factShrd,sourceShrd); } } } } } targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; } __device__ inline void StretchingP2P_core(gpureal *target, gpureal *targetX, gpureal *targetQ, gpureal *sourceShrd, float3 d, int i) { d.x += targetX[0]; d.x -= sourceShrd[7*i+0]; d.y += targetX[1]; d.y -= sourceShrd[7*i+1]; d.z += targetX[2]; d.z -= sourceShrd[7*i+2]; gpureal R2 = d.x * d.x + d.y * d.y + d.z * d.z + EPS2; #if 0 gpureal S2 = 2 * sourceShrd[7*i+6] * sourceShrd[7*i+6]; gpureal RS = R2 / S2; gpureal cutoff = 0.25 / M_PI / R2 / sqrtf(R2) * (erff( sqrtf(RS) ) - sqrtf(4 / M_PI * RS) * expf(-RS)); target[0] += (targetQ[1] * sourceShrd[7*i+5] - targetQ[2] * sourceShrd[7*i+4]) * cutoff; target[1] += (targetQ[2] * sourceShrd[7*i+3] - targetQ[0] * sourceShrd[7*i+5]) * cutoff; target[2] += (targetQ[0] * sourceShrd[7*i+4] - targetQ[1] * sourceShrd[7*i+3]) * cutoff; cutoff = 0.25 / M_PI / R2 / R2 / sqrtf(R2) * (3 * erff( sqrtf(RS) ) - (2 * RS + 3) * sqrtf(4 / M_PI * RS) * expf(-RS)) * (targetQ[0] * d.x + targetQ[1] * d.y + targetQ[2] * d.z); target[0] += (sourceShrd[7*i+4] * d.z - sourceShrd[7*i+5] * d.y) * cutoff; target[1] += (sourceShrd[7*i+5] * d.x - sourceShrd[7*i+3] * d.z) * cutoff; target[2] += (sourceShrd[7*i+3] * d.y - sourceShrd[7*i+4] * d.x) * cutoff; #else const gpureal SQRT4PI = M_2_SQRTPI; const gpureal FOURPI = 0.25 * M_1_PI; gpureal SQRT_R2_1 = rsqrtf(R2); gpureal RS = R2 * sourceShrd[7*i+6]; gpureal SQRT_RS = sqrtf(RS); gpureal z = SQRT_RS,t,ERF_SQRT_RS; (t)=1.0f/(1.0f+0.5f*(z)); ERF_SQRT_RS=1.0f - (t)*expf(-(z)*(z)-1.26551223f+(t)*(1.00002368f+(t)*(0.37409196f+(t)*(0.09678418f+ (t)*(-0.18628806f+(t)*(0.27886807f+(t)*(-1.13520398f+(t)*(1.48851587f+ (t)*(-0.82215223f+(t)*0.17087277f))))))))); gpureal EXP_RS = expf(-RS); gpureal cutoff = FOURPI * SQRT_R2_1 * SQRT_R2_1 * SQRT_R2_1 * (ERF_SQRT_RS - SQRT4PI * SQRT_RS * EXP_RS); target[0] += (targetQ[1] * sourceShrd[7*i+5] - targetQ[2] * sourceShrd[7*i+4]) * cutoff; target[1] += (targetQ[2] * sourceShrd[7*i+3] - targetQ[0] * sourceShrd[7*i+5]) * cutoff; target[2] += (targetQ[0] * sourceShrd[7*i+4] - targetQ[1] * sourceShrd[7*i+3]) * cutoff; gpureal cutoff2 = FOURPI * SQRT_R2_1 * SQRT_R2_1 * SQRT_R2_1 * SQRT_R2_1 * SQRT_R2_1 * (3.0f * ERF_SQRT_RS - (2.0f * RS + 3.0f) * SQRT4PI * SQRT_RS * EXP_RS) * (targetQ[0] * d.x + targetQ[1] * d.y + targetQ[2] * d.z); target[0] += (sourceShrd[7*i+4] * d.z - sourceShrd[7*i+5] * d.y) * cutoff2; target[1] += (sourceShrd[7*i+5] * d.x - sourceShrd[7*i+3] * d.z) * cutoff2; target[2] += (sourceShrd[7*i+3] * d.y - sourceShrd[7*i+4] * d.x) * cutoff2; #endif } __global__ void StretchingP2P_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal D0 = -constDevc[0]; gpureal targetX[3], targetQ[3]; gpureal target[3] = {0, 0, 0}; __shared__ gpureal sourceShrd[7*THREADS]; int itarget = blockIdx.x * THREADS + threadIdx.x; targetX[0] = targetGlob[6*itarget+0]; targetX[1] = targetGlob[6*itarget+1]; targetX[2] = targetGlob[6*itarget+2]; targetQ[0] = targetGlob[6*itarget+3]; targetQ[1] = targetGlob[6*itarget+4]; targetQ[2] = targetGlob[6*itarget+5]; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; int size = rangeGlob[keys+3*ilist+2]; int Iperiodic = rangeGlob[keys+3*ilist+3]; for( int iblok=0; iblok<(size-1)/THREADS; ++iblok ) { int isource = begin + iblok * THREADS + threadIdx.x; __syncthreads(); sourceShrd[7*threadIdx.x+0] = sourceGlob[7*isource+0]; sourceShrd[7*threadIdx.x+1] = sourceGlob[7*isource+1]; sourceShrd[7*threadIdx.x+2] = sourceGlob[7*isource+2]; sourceShrd[7*threadIdx.x+3] = sourceGlob[7*isource+3]; sourceShrd[7*threadIdx.x+4] = sourceGlob[7*isource+4]; sourceShrd[7*threadIdx.x+5] = sourceGlob[7*isource+5]; // sourceShrd[7*threadIdx.x+6] = sourceGlob[7*isource+6]; sourceShrd[7*threadIdx.x+6] = 0.5f / (sourceGlob[7*isource+6] * sourceGlob[7*isource+6]); __syncthreads(); int I = 0; for( int ix=-1; ix<=1; ++ix ) { for( int iy=-1; iy<=1; ++iy ) { for( int iz=-1; iz<=1; ++iz, ++I ) { if( Iperiodic & (1 << I) ) { float3 d; d.x = ix * D0; d.y = iy * D0; d.z = iz * D0; #pragma unroll 64 for( int i=0; i<THREADS; ++i ) { StretchingP2P_core(target,targetX,targetQ,sourceShrd,d,i); } } } } } } int iblok = (size-1)/THREADS; int isource = begin + iblok * THREADS + threadIdx.x; __syncthreads(); if( threadIdx.x < size - iblok * THREADS ) { sourceShrd[7*threadIdx.x+0] = sourceGlob[7*isource+0]; sourceShrd[7*threadIdx.x+1] = sourceGlob[7*isource+1]; sourceShrd[7*threadIdx.x+2] = sourceGlob[7*isource+2]; sourceShrd[7*threadIdx.x+3] = sourceGlob[7*isource+3]; sourceShrd[7*threadIdx.x+4] = sourceGlob[7*isource+4]; sourceShrd[7*threadIdx.x+5] = sourceGlob[7*isource+5]; // sourceShrd[7*threadIdx.x+6] = sourceGlob[7*isource+6]; sourceShrd[7*threadIdx.x+6] = 0.5f / (sourceGlob[7*isource+6] * sourceGlob[7*isource+6]); } __syncthreads(); int I = 0; int icounter=0; for( int ix=-1; ix<=1; ++ix ) { for( int iy=-1; iy<=1; ++iy ) { for( int iz=-1; iz<=1; ++iz, ++I ) { if( Iperiodic & (1 << I) ) { icounter++; float3 d; d.x = ix * D0; d.y = iy * D0; d.z = iz * D0; for( int i=0; i<size-iblok*THREADS; ++i ) { StretchingP2P_core(target,targetX,targetQ,sourceShrd,d,i); } } } } } } targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; } __device__ void StretchingL2L_core(gpureal *target, gpureal beta, gpureal *factShrd, gpureal *YnmShrd, gpureal *sourceShrd) { int j = floorf(sqrtf(2*threadIdx.x+0.25)-0.5); int k = 0; for( int i=0; i<=j; ++i ) k += i; k = threadIdx.x - k; if( threadIdx.x >= NTERM ) j = k = 0; gpureal ajk = ODDEVEN(j) * rsqrtf(factShrd[j-k] * factShrd[j+k]); for( int n=0; n<P; ++n ) { for( int m=j+k-n; m<0; ++m ) { int nms = n * (n + 1) / 2 - m; int jnkm = (n - j) * (n - j) + n - j + m - k; gpureal ere = cosf((m - k) * beta); gpureal eim = sinf((m - k) * beta); gpureal anm = rsqrtf(factShrd[n-m] * factShrd[n+m]); gpureal cnm = ODDEVEN(k-n) * ajk / anm * YnmShrd[jnkm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; target[0] += sourceShrd[6*nms+0] * CnmReal; target[0] += sourceShrd[6*nms+1] * CnmImag; target[1] += sourceShrd[6*nms+0] * CnmImag; target[1] -= sourceShrd[6*nms+1] * CnmReal; target[2] += sourceShrd[6*nms+2] * CnmReal; target[2] += sourceShrd[6*nms+3] * CnmImag; target[3] += sourceShrd[6*nms+2] * CnmImag; target[3] -= sourceShrd[6*nms+3] * CnmReal; target[4] += sourceShrd[6*nms+4] * CnmReal; target[4] += sourceShrd[6*nms+5] * CnmImag; target[5] += sourceShrd[6*nms+4] * CnmImag; target[5] -= sourceShrd[6*nms+5] * CnmReal; } for( int m=0; m<=n; ++m ) { if( n-j >= abs(m-k) ) { int nms = n * (n + 1) / 2 + m; int jnkm = (n - j) * (n - j) + n - j + m - k; gpureal ere = cosf((m - k) * beta); gpureal eim = sinf((m - k) * beta); gpureal anm = rsqrtf(factShrd[n-m] * factShrd[n+m]); gpureal cnm = ODDEVEN((m-k-abs(m-k)) / 2 - n); cnm *= ajk / anm * YnmShrd[jnkm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; target[0] += sourceShrd[6*nms+0] * CnmReal; target[0] -= sourceShrd[6*nms+1] * CnmImag; target[1] += sourceShrd[6*nms+0] * CnmImag; target[1] += sourceShrd[6*nms+1] * CnmReal; target[2] += sourceShrd[6*nms+2] * CnmReal; target[2] -= sourceShrd[6*nms+3] * CnmImag; target[3] += sourceShrd[6*nms+2] * CnmImag; target[3] += sourceShrd[6*nms+3] * CnmReal; target[4] += sourceShrd[6*nms+4] * CnmReal; target[4] -= sourceShrd[6*nms+5] * CnmImag; target[5] += sourceShrd[6*nms+4] * CnmImag; target[5] += sourceShrd[6*nms+5] * CnmReal; } } } } __global__ void StretchingL2L_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal target[6] = {0, 0, 0, 0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; __shared__ gpureal YnmShrd[P*P]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; float3 d; d.x = targetGlob[6*itarget+0] - sourceGlob[begin+0]; d.y = targetGlob[6*itarget+1] - sourceGlob[begin+1]; d.z = targetGlob[6*itarget+2] - sourceGlob[begin+2]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); evalMultipole(YnmShrd,rho,alpha,factShrd); StretchingL2L_core(target,beta,factShrd,YnmShrd,sourceShrd); } itarget = blockIdx.x * THREADS + threadIdx.x; targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; targetGlob[6*itarget+3] = target[3]; targetGlob[6*itarget+4] = target[4]; targetGlob[6*itarget+5] = target[5]; } __device__ void StretchingL2P_core(gpureal *target, gpureal *targetQ, gpureal r, gpureal theta, gpureal phi, gpureal *factShrd, gpureal *sourceShrd) { gpureal x = cosf(theta); gpureal y = sinf(theta); if( fabsf(y) < EPS ) y = 1 / EPS; gpureal s = sqrtf(1 - x * x); gpureal spherical[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; gpureal cartesian[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; gpureal fact = 1; gpureal pn = 1; gpureal rhom = 1; for( int m=0; m<P; ++m ) { gpureal p = pn; int i = m * (m + 1) / 2 + m; gpureal ere = cosf(m * phi); if( m == 0 ) ere = 0.5; gpureal eim = sinf(m * phi); gpureal anm = rhom * rsqrtf(factShrd[2*m]); gpureal Ynm = anm * p; gpureal p1 = p; p = x * (2 * m + 1) * p; gpureal YnmTheta = anm * (p - (m + 1) * x * p1) / y; gpureal realj = ere * sourceShrd[6*i+0] - eim * sourceShrd[6*i+1]; gpureal imagj = eim * sourceShrd[6*i+0] + ere * sourceShrd[6*i+1]; spherical[0] += 2 * m / r * Ynm * realj; spherical[1] += 2 * YnmTheta * realj; spherical[2] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+2] - eim * sourceShrd[6*i+3]; imagj = eim * sourceShrd[6*i+2] + ere * sourceShrd[6*i+3]; spherical[3] += 2 * m / r * Ynm * realj; spherical[4] += 2 * YnmTheta * realj; spherical[5] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+4] - eim * sourceShrd[6*i+5]; imagj = eim * sourceShrd[6*i+4] + ere * sourceShrd[6*i+5]; spherical[6] += 2 * m / r * Ynm * realj; spherical[7] += 2 * YnmTheta * realj; spherical[8] -= 2 * m * Ynm * imagj; rhom *= r; gpureal rhon = rhom; for( int n=m+1; n<P; ++n ) { i = n * (n + 1) / 2 + m; anm = rhon * rsqrtf(factShrd[n+m] / factShrd[n-m]); Ynm = anm * p; gpureal p2 = p1; p1 = p; p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); YnmTheta = anm * ((n - m + 1) * p - (n + 1) * x * p1) / y; realj = ere * sourceShrd[6*i+0] - eim * sourceShrd[6*i+1]; imagj = eim * sourceShrd[6*i+0] + ere * sourceShrd[6*i+1]; spherical[0] += 2 * n / r * Ynm * realj; spherical[1] += 2 * YnmTheta * realj; spherical[2] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+2] - eim * sourceShrd[6*i+3]; imagj = eim * sourceShrd[6*i+2] + ere * sourceShrd[6*i+3]; spherical[3] += 2 * n / r * Ynm * realj; spherical[4] += 2 * YnmTheta * realj; spherical[5] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+4] - eim * sourceShrd[6*i+5]; imagj = eim * sourceShrd[6*i+4] + ere * sourceShrd[6*i+5]; spherical[6] += 2 * n / r * Ynm * realj; spherical[7] += 2 * YnmTheta * realj; spherical[8] -= 2 * m * Ynm * imagj; rhon *= r; } pn = -pn * fact * s; fact += 2; } sph2cart(r,theta,phi,&spherical[0],&cartesian[0]); sph2cart(r,theta,phi,&spherical[3],&cartesian[3]); sph2cart(r,theta,phi,&spherical[6],&cartesian[6]); target[0] -= 0.25 / M_PI * (targetQ[0] * cartesian[0] + targetQ[1] * cartesian[1] + targetQ[2] * cartesian[2]); target[1] -= 0.25 / M_PI * (targetQ[0] * cartesian[3] + targetQ[1] * cartesian[4] + targetQ[2] * cartesian[5]); target[2] -= 0.25 / M_PI * (targetQ[0] * cartesian[6] + targetQ[1] * cartesian[7] + targetQ[2] * cartesian[8]); } __global__ void StretchingL2P_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal targetX[3], targetQ[3]; gpureal target[3] = {0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS + threadIdx.x; targetX[0] = targetGlob[6*itarget+0]; targetX[1] = targetGlob[6*itarget+1]; targetX[2] = targetGlob[6*itarget+2]; targetQ[0] = targetGlob[6*itarget+3]; targetQ[1] = targetGlob[6*itarget+4]; targetQ[2] = targetGlob[6*itarget+5]; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; float3 d; d.x = targetX[0] - sourceGlob[begin+0]; d.y = targetX[1] - sourceGlob[begin+1]; d.z = targetX[2] - sourceGlob[begin+2]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); gpureal r,theta,phi; cart2sph(r,theta,phi,d.x,d.y,d.z); StretchingL2P_core(target,targetQ,r,theta,phi,factShrd,sourceShrd); } targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; } void Kernel::StretchingFinal() {} #include "gpu.h" CALL_GPU(StretchingP2M,P2M GPUkernel); CALL_GPU(StretchingM2M,M2M GPUkernel); CALL_GPU(StretchingM2L,M2L GPUkernel); CALL_GPU(StretchingM2P,M2P GPUkernel); CALL_GPU(StretchingP2P,P2P GPUkernel); CALL_GPU(StretchingL2L,L2L GPUkernel); CALL_GPU(StretchingL2P,L2P GPUkernel);
88b232fc65d1d025424a3b9fd0a5d680571c3fbc.cu
#include "kernel.h" #include "stretching.h" #include "pregpu.h" void Kernel::StretchingInit() { startTimer("Init GPU "); // Start timer cudaThreadExit(); // Exit GPU thread cudaSetDevice(MPIRANK % GPUS); // Set GPU device cudaThreadSynchronize(); // Sync GPU threads stopTimer("Init GPU ",MPIRANK==0); // Stop timer & print eraseTimer("Init GPU "); // Erase timer } __device__ void StretchingP2M_core(gpureal *target, gpureal rho, gpureal alpha, gpureal beta, gpureal *sourceShrd, int ithread) { __shared__ gpureal factShrd[2*P]; gpureal Ynm; gpureal YnmAlpha; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int nn = floorf(sqrtf(2*threadIdx.x+0.25)-0.5); int mm = 0; for( int i=0; i<=nn; ++i ) mm += i; mm = threadIdx.x - mm; if( threadIdx.x >= NTERM ) nn = mm = 0; gpureal x = cosf(alpha); gpureal y = sinf(alpha); if( fabsf(y) < EPS ) y = 1 / EPS; gpureal s = sqrtf(1 - x * x); fact = 1; gpureal pn = 1; gpureal rhom = 1; for( int m=0; m<mm; ++m ) { rhom *= rho; pn = -pn * fact * s; fact += 2; } int m = mm; gpureal p = pn; gpureal anm; if(mm==nn) { anm = rhom * rsqrtf(factShrd[2*m]); Ynm = anm * p; } gpureal p1 = p; p = x * (2 * m + 1) * p; if(mm==nn) YnmAlpha = anm * (p - (m + 1) * x * p1) / y; rhom *= rho; gpureal rhon = rhom; int n; for( n=m+1; n<nn; ++n ) { gpureal p2 = p1; p1 = p; p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); rhon *= rho; } if(n<=nn) { anm = rhon * rsqrtf(factShrd[n+m] / factShrd[n-m]); Ynm = anm * p; gpureal p2 = p1; p1 = p; p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); YnmAlpha = anm * ((n - m + 1) * p - (n + 1) * x * p1) / y; } gpureal ere = cosf(-mm * beta); gpureal eim = sinf(-mm * beta); gpureal spherical[6]; gpureal cartesian[6]; spherical[0] = Ynm * nn / rho * ere; spherical[1] = YnmAlpha * ere; spherical[2] = Ynm * mm * eim; spherical[3] = Ynm * nn / rho * eim; spherical[4] = YnmAlpha * eim; spherical[5] = -Ynm * mm * ere; sph2cart(rho,alpha,beta,&spherical[0],&cartesian[0]); sph2cart(rho,alpha,beta,&spherical[3],&cartesian[3]); target[0] += sourceShrd[6*ithread+4] * cartesian[2] - sourceShrd[6*ithread+5] * cartesian[1]; target[1] += sourceShrd[6*ithread+4] * cartesian[5] - sourceShrd[6*ithread+5] * cartesian[4]; target[2] += sourceShrd[6*ithread+5] * cartesian[0] - sourceShrd[6*ithread+3] * cartesian[2]; target[3] += sourceShrd[6*ithread+5] * cartesian[3] - sourceShrd[6*ithread+3] * cartesian[5]; target[4] += sourceShrd[6*ithread+3] * cartesian[1] - sourceShrd[6*ithread+4] * cartesian[0]; target[5] += sourceShrd[6*ithread+3] * cartesian[4] - sourceShrd[6*ithread+4] * cartesian[3]; } __global__ void StretchingP2M_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal target[6] = {0, 0, 0, 0, 0, 0}; __shared__ gpureal targetShrd[3]; __shared__ gpureal sourceShrd[6*THREADS]; int itarget = blockIdx.x * THREADS; targetShrd[0] = targetGlob[6*itarget+0]; targetShrd[1] = targetGlob[6*itarget+1]; targetShrd[2] = targetGlob[6*itarget+2]; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; int size = rangeGlob[keys+3*ilist+2]; for( int iblok=0; iblok<(size-1)/THREADS; ++iblok ) { int isource = begin + iblok * THREADS + threadIdx.x; __syncthreads(); sourceShrd[6*threadIdx.x+0] = sourceGlob[7*isource+0]; sourceShrd[6*threadIdx.x+1] = sourceGlob[7*isource+1]; sourceShrd[6*threadIdx.x+2] = sourceGlob[7*isource+2]; sourceShrd[6*threadIdx.x+3] = sourceGlob[7*isource+3]; sourceShrd[6*threadIdx.x+4] = sourceGlob[7*isource+4]; sourceShrd[6*threadIdx.x+5] = sourceGlob[7*isource+5]; __syncthreads(); for( int i=0; i<THREADS; ++i ) { float3 d; d.x = sourceShrd[6*i+0] - targetShrd[0]; d.y = sourceShrd[6*i+1] - targetShrd[1]; d.z = sourceShrd[6*i+2] - targetShrd[2]; gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); StretchingP2M_core(target,rho,alpha,beta,sourceShrd,i); } } int iblok = (size-1)/THREADS; int isource = begin + iblok * THREADS + threadIdx.x; __syncthreads(); if( threadIdx.x < size - iblok * THREADS ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[7*isource+0]; sourceShrd[6*threadIdx.x+1] = sourceGlob[7*isource+1]; sourceShrd[6*threadIdx.x+2] = sourceGlob[7*isource+2]; sourceShrd[6*threadIdx.x+3] = sourceGlob[7*isource+3]; sourceShrd[6*threadIdx.x+4] = sourceGlob[7*isource+4]; sourceShrd[6*threadIdx.x+5] = sourceGlob[7*isource+5]; } __syncthreads(); for( int i=0; i<size-iblok*THREADS; ++i ) { float3 d; d.x = sourceShrd[6*i+0] - targetShrd[0]; d.y = sourceShrd[6*i+1] - targetShrd[1]; d.z = sourceShrd[6*i+2] - targetShrd[2]; gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); StretchingP2M_core(target,rho,alpha,beta,sourceShrd,i); } } itarget = blockIdx.x * THREADS + threadIdx.x; targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; targetGlob[6*itarget+3] = target[3]; targetGlob[6*itarget+4] = target[4]; targetGlob[6*itarget+5] = target[5]; } __device__ void StretchingM2M_core(gpureal *target, gpureal beta, gpureal *factShrd, gpureal *YnmShrd, gpureal *sourceShrd) { int j = floorf(sqrtf(2*threadIdx.x+0.25)-0.5); int k = 0; for( int i=0; i<=j; ++i ) k += i; k = threadIdx.x - k; if( threadIdx.x >= NTERM ) j = k = 0; gpureal ajk = ODDEVEN(j) * rsqrtf(factShrd[j-k] * factShrd[j+k]); for( int n=0; n<=j; ++n ) { for( int m=-n; m<=min(k-1,n); ++m ) { if( j-n >= k-m ) { int nm = n * n + n + m; int jnkms = (j - n) * (j - n + 1) / 2 + k - m; gpureal ere = cosf(-m * beta); gpureal eim = sinf(-m * beta); gpureal ajnkm = rsqrtf(factShrd[j-n-k+m] * factShrd[j-n+k-m]); gpureal cnm = ODDEVEN((m-abs(m))/2+j); cnm *= ajnkm / ajk * YnmShrd[nm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; target[0] += sourceShrd[6*jnkms+0] * CnmReal; target[0] -= sourceShrd[6*jnkms+1] * CnmImag; target[1] += sourceShrd[6*jnkms+0] * CnmImag; target[1] += sourceShrd[6*jnkms+1] * CnmReal; target[2] += sourceShrd[6*jnkms+2] * CnmReal; target[2] -= sourceShrd[6*jnkms+3] * CnmImag; target[3] += sourceShrd[6*jnkms+2] * CnmImag; target[3] += sourceShrd[6*jnkms+3] * CnmReal; target[4] += sourceShrd[6*jnkms+4] * CnmReal; target[4] -= sourceShrd[6*jnkms+5] * CnmImag; target[5] += sourceShrd[6*jnkms+4] * CnmImag; target[5] += sourceShrd[6*jnkms+5] * CnmReal; } } for( int m=k; m<=n; ++m ) { if( j-n >= m-k ) { int nm = n * n + n + m; int jnkms = (j - n) * (j - n + 1) / 2 - k + m; gpureal ere = cosf(-m * beta); gpureal eim = sinf(-m * beta); gpureal ajnkm = rsqrtf(factShrd[j-n-k+m] * factShrd[j-n+k-m]); gpureal cnm = ODDEVEN(k+j+m); cnm *= ajnkm / ajk * YnmShrd[nm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; target[0] += sourceShrd[6*jnkms+0] * CnmReal; target[0] += sourceShrd[6*jnkms+1] * CnmImag; target[1] += sourceShrd[6*jnkms+0] * CnmImag; target[1] -= sourceShrd[6*jnkms+1] * CnmReal; target[2] += sourceShrd[6*jnkms+2] * CnmReal; target[2] += sourceShrd[6*jnkms+3] * CnmImag; target[3] += sourceShrd[6*jnkms+2] * CnmImag; target[3] -= sourceShrd[6*jnkms+3] * CnmReal; target[4] += sourceShrd[6*jnkms+4] * CnmReal; target[4] += sourceShrd[6*jnkms+5] * CnmImag; target[5] += sourceShrd[6*jnkms+4] * CnmImag; target[5] -= sourceShrd[6*jnkms+5] * CnmReal; } } } } __global__ void StretchingM2M_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal target[6] = {0, 0, 0, 0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; __shared__ gpureal YnmShrd[P*P]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; float3 d; d.x = targetGlob[6*itarget+0] - sourceGlob[begin+0]; d.y = targetGlob[6*itarget+1] - sourceGlob[begin+1]; d.z = targetGlob[6*itarget+2] - sourceGlob[begin+2]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); evalMultipole(YnmShrd,rho,alpha,factShrd); StretchingM2M_core(target,beta,factShrd,YnmShrd,sourceShrd); } itarget = blockIdx.x * THREADS + threadIdx.x; targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; targetGlob[6*itarget+3] = target[3]; targetGlob[6*itarget+4] = target[4]; targetGlob[6*itarget+5] = target[5]; } void Kernel::StretchingM2M_CPU() { const complex I(0.,1.); // Imaginary unit vect dist = CI->X - CJ->X; real rho, alpha, beta; cart2sph(rho,alpha,beta,dist); evalMultipole(rho,alpha,-beta); for( int j=0; j!=P; ++j ) { for( int k=0; k<=j; ++k ) { const int jk = j * j + j + k; const int jks = j * (j + 1) / 2 + k; complex M[3] = {0, 0, 0}; for( int n=0; n<=j; ++n ) { for( int m=-n; m<=std::min(k-1,n); ++m ) { if( j-n >= k-m ) { const int jnkm = (j - n) * (j - n) + j - n + k - m; const int jnkms = (j - n) * (j - n + 1) / 2 + k - m; const int nm = n * n + n + m; for( int d=0; d!=3; ++d ) { M[d] += CJ->M[3*jnkms+d] * std::pow(I,double(m-abs(m))) * Ynm[nm] * double(ODDEVEN(n) * Anm[nm] * Anm[jnkm] / Anm[jk]); } } } for( int m=k; m<=n; ++m ) { if( j-n >= m-k ) { const int jnkm = (j - n) * (j - n) + j - n + k - m; const int jnkms = (j - n) * (j - n + 1) / 2 - k + m; const int nm = n * n + n + m; for( int d=0; d!=3; ++d ) { M[d] += std::conj(CJ->M[3*jnkms+d]) * Ynm[nm] * double(ODDEVEN(k+n+m) * Anm[nm] * Anm[jnkm] / Anm[jk]); } } } } for( int d=0; d!=3; ++d ) { CI->M[3*jks+d] += M[d]; } } } } __device__ void StretchingM2L_core(gpureal *target, gpureal beta, gpureal *factShrd, gpureal *YnmShrd, gpureal *sourceShrd) { int j = floorf(sqrtf(2*threadIdx.x+0.25)-0.5); int k = 0; for( int i=0; i<=j; ++i ) k += i; k = threadIdx.x - k; if( threadIdx.x >= NTERM ) j = k = 0; gpureal ajk = ODDEVEN(j) * rsqrtf(factShrd[j-k] * factShrd[j+k]); for( int n=0; n<P; ++n ) { for( int m=-n; m<0; ++m ) { int jnkm = (j + n) * (j + n + 1) / 2 - m + k; gpureal ere = cosf((m - k) * beta); gpureal eim = sinf((m - k) * beta); gpureal anm = rsqrtf(factShrd[n-m] * factShrd[n+m]); gpureal cnm = anm * ajk * YnmShrd[jnkm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; int i = n * (n + 1) / 2 - m; target[0] += sourceShrd[6*i+0] * CnmReal; target[0] += sourceShrd[6*i+1] * CnmImag; target[1] += sourceShrd[6*i+0] * CnmImag; target[1] -= sourceShrd[6*i+1] * CnmReal; target[2] += sourceShrd[6*i+2] * CnmReal; target[2] += sourceShrd[6*i+3] * CnmImag; target[3] += sourceShrd[6*i+2] * CnmImag; target[3] -= sourceShrd[6*i+3] * CnmReal; target[4] += sourceShrd[6*i+4] * CnmReal; target[4] += sourceShrd[6*i+5] * CnmImag; target[5] += sourceShrd[6*i+4] * CnmImag; target[5] -= sourceShrd[6*i+5] * CnmReal; } for( int m=0; m<=n; ++m ) { int jnkm = (j + n) * (j + n + 1) / 2 + abs(m - k); gpureal ere = cosf((m - k) * beta); gpureal eim = sinf((m - k) * beta); gpureal anm = rsqrtf(factShrd[n-m] * factShrd[n+m]); gpureal cnm = ODDEVEN((abs(k - m) - k - m) / 2); cnm *= anm * ajk * YnmShrd[jnkm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; int i = n * (n + 1) / 2 + m; target[0] += sourceShrd[6*i+0] * CnmReal; target[0] -= sourceShrd[6*i+1] * CnmImag; target[1] += sourceShrd[6*i+0] * CnmImag; target[1] += sourceShrd[6*i+1] * CnmReal; target[2] += sourceShrd[6*i+2] * CnmReal; target[2] -= sourceShrd[6*i+3] * CnmImag; target[3] += sourceShrd[6*i+2] * CnmImag; target[3] += sourceShrd[6*i+3] * CnmReal; target[4] += sourceShrd[6*i+4] * CnmReal; target[4] -= sourceShrd[6*i+5] * CnmImag; target[5] += sourceShrd[6*i+4] * CnmImag; target[5] += sourceShrd[6*i+5] * CnmReal; } } } __global__ void StretchingM2L_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal D0 = -constDevc[0]; gpureal target[6] = {0, 0, 0, 0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; __shared__ gpureal YnmShrd[4*NTERM]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; int Iperiodic = rangeGlob[keys+3*ilist+3]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); int I = 0; for( int ix=-1; ix<=1; ++ix ) { for( int iy=-1; iy<=1; ++iy ) { for( int iz=-1; iz<=1; ++iz, ++I ) { if( Iperiodic & (1 << I) ) { float3 d; d.x = ix * D0; d.y = iy * D0; d.z = iz * D0; d.x += targetGlob[6*itarget+0] - sourceGlob[begin+0]; d.y += targetGlob[6*itarget+1] - sourceGlob[begin+1]; d.z += targetGlob[6*itarget+2] - sourceGlob[begin+2]; gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); evalLocal(YnmShrd,rho,alpha,factShrd); StretchingM2L_core(target,beta,factShrd,YnmShrd,sourceShrd); } } } } } itarget = blockIdx.x * THREADS + threadIdx.x; targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; targetGlob[6*itarget+3] = target[3]; targetGlob[6*itarget+4] = target[4]; targetGlob[6*itarget+5] = target[5]; } __device__ void StretchingM2P_core(gpureal *target, gpureal *targetQ, gpureal r, gpureal theta, gpureal phi, gpureal *factShrd, gpureal *sourceShrd) { gpureal x = cosf(theta); gpureal y = sinf(theta); if( fabsf(y) < EPS ) y = 1 / EPS; gpureal s = sqrtf(1 - x * x); gpureal spherical[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; gpureal cartesian[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; gpureal fact = 1; gpureal pn = 1; gpureal rhom = 1.0 / r; for( int m=0; m<P; ++m ) { gpureal p = pn; int i = m * (m + 1) / 2 + m; gpureal ere = cosf(m * phi); if( m == 0 ) ere = 0.5; gpureal eim = sinf(m * phi); gpureal anm = rhom * rsqrtf(factShrd[2*m]); gpureal Ynm = anm * p; gpureal p1 = p; p = x * (2 * m + 1) * p; gpureal YnmTheta = anm * (p - (m + 1) * x * p1) / y; gpureal realj = ere * sourceShrd[6*i+0] - eim * sourceShrd[6*i+1]; gpureal imagj = eim * sourceShrd[6*i+0] + ere * sourceShrd[6*i+1]; spherical[0] -= 2 * (m + 1) / r * Ynm * realj; spherical[1] += 2 * YnmTheta * realj; spherical[2] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+2] - eim * sourceShrd[6*i+3]; imagj = eim * sourceShrd[6*i+2] + ere * sourceShrd[6*i+3]; spherical[3] -= 2 * (m + 1) / r * Ynm * realj; spherical[4] += 2 * YnmTheta * realj; spherical[5] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+4] - eim * sourceShrd[6*i+5]; imagj = eim * sourceShrd[6*i+4] + ere * sourceShrd[6*i+5]; spherical[6] -= 2 * (m + 1) / r * Ynm * realj; spherical[7] += 2 * YnmTheta * realj; spherical[8] -= 2 * m * Ynm * imagj; rhom /= r; gpureal rhon = rhom; for( int n=m+1; n<P; ++n ) { i = n * (n + 1) / 2 + m; anm = rhon * rsqrtf(factShrd[n+m] / factShrd[n-m]); Ynm = anm * p; gpureal p2 = p1; p1 = p; p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); YnmTheta = anm * ((n - m + 1) * p - (n + 1) * x * p1) / y; realj = ere * sourceShrd[6*i+0] - eim * sourceShrd[6*i+1]; imagj = eim * sourceShrd[6*i+0] + ere * sourceShrd[6*i+1]; spherical[0] -= 2 * (n + 1) / r * Ynm * realj; spherical[1] += 2 * YnmTheta * realj; spherical[2] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+2] - eim * sourceShrd[6*i+3]; imagj = eim * sourceShrd[6*i+2] + ere * sourceShrd[6*i+3]; spherical[3] -= 2 * (n + 1) / r * Ynm * realj; spherical[4] += 2 * YnmTheta * realj; spherical[5] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+4] - eim * sourceShrd[6*i+5]; imagj = eim * sourceShrd[6*i+4] + ere * sourceShrd[6*i+5]; spherical[6] -= 2 * (n + 1) / r * Ynm * realj; spherical[7] += 2 * YnmTheta * realj; spherical[8] -= 2 * m * Ynm * imagj; rhon /= r; } pn = -pn * fact * s; fact += 2; } sph2cart(r,theta,phi,&spherical[0],&cartesian[0]); sph2cart(r,theta,phi,&spherical[3],&cartesian[3]); sph2cart(r,theta,phi,&spherical[6],&cartesian[6]); target[0] -= 0.25 / M_PI * (targetQ[0] * cartesian[0] + targetQ[1] * cartesian[1] + targetQ[2] * cartesian[2]); target[1] -= 0.25 / M_PI * (targetQ[0] * cartesian[3] + targetQ[1] * cartesian[4] + targetQ[2] * cartesian[5]); target[2] -= 0.25 / M_PI * (targetQ[0] * cartesian[6] + targetQ[1] * cartesian[7] + targetQ[2] * cartesian[8]); } __global__ void StretchingM2P_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal D0 = -constDevc[0]; gpureal targetX[3], targetQ[3]; gpureal target[3] = {0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS + threadIdx.x; targetX[0] = targetGlob[6*itarget+0]; targetX[1] = targetGlob[6*itarget+1]; targetX[2] = targetGlob[6*itarget+2]; targetQ[0] = targetGlob[6*itarget+3]; targetQ[1] = targetGlob[6*itarget+4]; targetQ[2] = targetGlob[6*itarget+5]; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; int Iperiodic = rangeGlob[keys+3*ilist+3]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); int I = 0; for( int ix=-1; ix<=1; ++ix ) { for( int iy=-1; iy<=1; ++iy ) { for( int iz=-1; iz<=1; ++iz, ++I ) { if( Iperiodic & (1 << I) ) { float3 d; d.x = ix * D0; d.y = iy * D0; d.z = iz * D0; d.x += targetX[0] - sourceGlob[begin+0]; d.y += targetX[1] - sourceGlob[begin+1]; d.z += targetX[2] - sourceGlob[begin+2]; gpureal r,theta,phi; cart2sph(r,theta,phi,d.x,d.y,d.z); StretchingM2P_core(target,targetQ,r,theta,phi,factShrd,sourceShrd); } } } } } targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; } __device__ inline void StretchingP2P_core(gpureal *target, gpureal *targetX, gpureal *targetQ, gpureal *sourceShrd, float3 d, int i) { d.x += targetX[0]; d.x -= sourceShrd[7*i+0]; d.y += targetX[1]; d.y -= sourceShrd[7*i+1]; d.z += targetX[2]; d.z -= sourceShrd[7*i+2]; gpureal R2 = d.x * d.x + d.y * d.y + d.z * d.z + EPS2; #if 0 gpureal S2 = 2 * sourceShrd[7*i+6] * sourceShrd[7*i+6]; gpureal RS = R2 / S2; gpureal cutoff = 0.25 / M_PI / R2 / sqrtf(R2) * (erff( sqrtf(RS) ) - sqrtf(4 / M_PI * RS) * expf(-RS)); target[0] += (targetQ[1] * sourceShrd[7*i+5] - targetQ[2] * sourceShrd[7*i+4]) * cutoff; target[1] += (targetQ[2] * sourceShrd[7*i+3] - targetQ[0] * sourceShrd[7*i+5]) * cutoff; target[2] += (targetQ[0] * sourceShrd[7*i+4] - targetQ[1] * sourceShrd[7*i+3]) * cutoff; cutoff = 0.25 / M_PI / R2 / R2 / sqrtf(R2) * (3 * erff( sqrtf(RS) ) - (2 * RS + 3) * sqrtf(4 / M_PI * RS) * expf(-RS)) * (targetQ[0] * d.x + targetQ[1] * d.y + targetQ[2] * d.z); target[0] += (sourceShrd[7*i+4] * d.z - sourceShrd[7*i+5] * d.y) * cutoff; target[1] += (sourceShrd[7*i+5] * d.x - sourceShrd[7*i+3] * d.z) * cutoff; target[2] += (sourceShrd[7*i+3] * d.y - sourceShrd[7*i+4] * d.x) * cutoff; #else const gpureal SQRT4PI = M_2_SQRTPI; const gpureal FOURPI = 0.25 * M_1_PI; gpureal SQRT_R2_1 = rsqrtf(R2); gpureal RS = R2 * sourceShrd[7*i+6]; gpureal SQRT_RS = sqrtf(RS); gpureal z = SQRT_RS,t,ERF_SQRT_RS; (t)=1.0f/(1.0f+0.5f*(z)); ERF_SQRT_RS=1.0f - (t)*expf(-(z)*(z)-1.26551223f+(t)*(1.00002368f+(t)*(0.37409196f+(t)*(0.09678418f+ (t)*(-0.18628806f+(t)*(0.27886807f+(t)*(-1.13520398f+(t)*(1.48851587f+ (t)*(-0.82215223f+(t)*0.17087277f))))))))); gpureal EXP_RS = expf(-RS); gpureal cutoff = FOURPI * SQRT_R2_1 * SQRT_R2_1 * SQRT_R2_1 * (ERF_SQRT_RS - SQRT4PI * SQRT_RS * EXP_RS); target[0] += (targetQ[1] * sourceShrd[7*i+5] - targetQ[2] * sourceShrd[7*i+4]) * cutoff; target[1] += (targetQ[2] * sourceShrd[7*i+3] - targetQ[0] * sourceShrd[7*i+5]) * cutoff; target[2] += (targetQ[0] * sourceShrd[7*i+4] - targetQ[1] * sourceShrd[7*i+3]) * cutoff; gpureal cutoff2 = FOURPI * SQRT_R2_1 * SQRT_R2_1 * SQRT_R2_1 * SQRT_R2_1 * SQRT_R2_1 * (3.0f * ERF_SQRT_RS - (2.0f * RS + 3.0f) * SQRT4PI * SQRT_RS * EXP_RS) * (targetQ[0] * d.x + targetQ[1] * d.y + targetQ[2] * d.z); target[0] += (sourceShrd[7*i+4] * d.z - sourceShrd[7*i+5] * d.y) * cutoff2; target[1] += (sourceShrd[7*i+5] * d.x - sourceShrd[7*i+3] * d.z) * cutoff2; target[2] += (sourceShrd[7*i+3] * d.y - sourceShrd[7*i+4] * d.x) * cutoff2; #endif } __global__ void StretchingP2P_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal D0 = -constDevc[0]; gpureal targetX[3], targetQ[3]; gpureal target[3] = {0, 0, 0}; __shared__ gpureal sourceShrd[7*THREADS]; int itarget = blockIdx.x * THREADS + threadIdx.x; targetX[0] = targetGlob[6*itarget+0]; targetX[1] = targetGlob[6*itarget+1]; targetX[2] = targetGlob[6*itarget+2]; targetQ[0] = targetGlob[6*itarget+3]; targetQ[1] = targetGlob[6*itarget+4]; targetQ[2] = targetGlob[6*itarget+5]; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; int size = rangeGlob[keys+3*ilist+2]; int Iperiodic = rangeGlob[keys+3*ilist+3]; for( int iblok=0; iblok<(size-1)/THREADS; ++iblok ) { int isource = begin + iblok * THREADS + threadIdx.x; __syncthreads(); sourceShrd[7*threadIdx.x+0] = sourceGlob[7*isource+0]; sourceShrd[7*threadIdx.x+1] = sourceGlob[7*isource+1]; sourceShrd[7*threadIdx.x+2] = sourceGlob[7*isource+2]; sourceShrd[7*threadIdx.x+3] = sourceGlob[7*isource+3]; sourceShrd[7*threadIdx.x+4] = sourceGlob[7*isource+4]; sourceShrd[7*threadIdx.x+5] = sourceGlob[7*isource+5]; // sourceShrd[7*threadIdx.x+6] = sourceGlob[7*isource+6]; sourceShrd[7*threadIdx.x+6] = 0.5f / (sourceGlob[7*isource+6] * sourceGlob[7*isource+6]); __syncthreads(); int I = 0; for( int ix=-1; ix<=1; ++ix ) { for( int iy=-1; iy<=1; ++iy ) { for( int iz=-1; iz<=1; ++iz, ++I ) { if( Iperiodic & (1 << I) ) { float3 d; d.x = ix * D0; d.y = iy * D0; d.z = iz * D0; #pragma unroll 64 for( int i=0; i<THREADS; ++i ) { StretchingP2P_core(target,targetX,targetQ,sourceShrd,d,i); } } } } } } int iblok = (size-1)/THREADS; int isource = begin + iblok * THREADS + threadIdx.x; __syncthreads(); if( threadIdx.x < size - iblok * THREADS ) { sourceShrd[7*threadIdx.x+0] = sourceGlob[7*isource+0]; sourceShrd[7*threadIdx.x+1] = sourceGlob[7*isource+1]; sourceShrd[7*threadIdx.x+2] = sourceGlob[7*isource+2]; sourceShrd[7*threadIdx.x+3] = sourceGlob[7*isource+3]; sourceShrd[7*threadIdx.x+4] = sourceGlob[7*isource+4]; sourceShrd[7*threadIdx.x+5] = sourceGlob[7*isource+5]; // sourceShrd[7*threadIdx.x+6] = sourceGlob[7*isource+6]; sourceShrd[7*threadIdx.x+6] = 0.5f / (sourceGlob[7*isource+6] * sourceGlob[7*isource+6]); } __syncthreads(); int I = 0; int icounter=0; for( int ix=-1; ix<=1; ++ix ) { for( int iy=-1; iy<=1; ++iy ) { for( int iz=-1; iz<=1; ++iz, ++I ) { if( Iperiodic & (1 << I) ) { icounter++; float3 d; d.x = ix * D0; d.y = iy * D0; d.z = iz * D0; for( int i=0; i<size-iblok*THREADS; ++i ) { StretchingP2P_core(target,targetX,targetQ,sourceShrd,d,i); } } } } } } targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; } __device__ void StretchingL2L_core(gpureal *target, gpureal beta, gpureal *factShrd, gpureal *YnmShrd, gpureal *sourceShrd) { int j = floorf(sqrtf(2*threadIdx.x+0.25)-0.5); int k = 0; for( int i=0; i<=j; ++i ) k += i; k = threadIdx.x - k; if( threadIdx.x >= NTERM ) j = k = 0; gpureal ajk = ODDEVEN(j) * rsqrtf(factShrd[j-k] * factShrd[j+k]); for( int n=0; n<P; ++n ) { for( int m=j+k-n; m<0; ++m ) { int nms = n * (n + 1) / 2 - m; int jnkm = (n - j) * (n - j) + n - j + m - k; gpureal ere = cosf((m - k) * beta); gpureal eim = sinf((m - k) * beta); gpureal anm = rsqrtf(factShrd[n-m] * factShrd[n+m]); gpureal cnm = ODDEVEN(k-n) * ajk / anm * YnmShrd[jnkm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; target[0] += sourceShrd[6*nms+0] * CnmReal; target[0] += sourceShrd[6*nms+1] * CnmImag; target[1] += sourceShrd[6*nms+0] * CnmImag; target[1] -= sourceShrd[6*nms+1] * CnmReal; target[2] += sourceShrd[6*nms+2] * CnmReal; target[2] += sourceShrd[6*nms+3] * CnmImag; target[3] += sourceShrd[6*nms+2] * CnmImag; target[3] -= sourceShrd[6*nms+3] * CnmReal; target[4] += sourceShrd[6*nms+4] * CnmReal; target[4] += sourceShrd[6*nms+5] * CnmImag; target[5] += sourceShrd[6*nms+4] * CnmImag; target[5] -= sourceShrd[6*nms+5] * CnmReal; } for( int m=0; m<=n; ++m ) { if( n-j >= abs(m-k) ) { int nms = n * (n + 1) / 2 + m; int jnkm = (n - j) * (n - j) + n - j + m - k; gpureal ere = cosf((m - k) * beta); gpureal eim = sinf((m - k) * beta); gpureal anm = rsqrtf(factShrd[n-m] * factShrd[n+m]); gpureal cnm = ODDEVEN((m-k-abs(m-k)) / 2 - n); cnm *= ajk / anm * YnmShrd[jnkm]; gpureal CnmReal = cnm * ere; gpureal CnmImag = cnm * eim; target[0] += sourceShrd[6*nms+0] * CnmReal; target[0] -= sourceShrd[6*nms+1] * CnmImag; target[1] += sourceShrd[6*nms+0] * CnmImag; target[1] += sourceShrd[6*nms+1] * CnmReal; target[2] += sourceShrd[6*nms+2] * CnmReal; target[2] -= sourceShrd[6*nms+3] * CnmImag; target[3] += sourceShrd[6*nms+2] * CnmImag; target[3] += sourceShrd[6*nms+3] * CnmReal; target[4] += sourceShrd[6*nms+4] * CnmReal; target[4] -= sourceShrd[6*nms+5] * CnmImag; target[5] += sourceShrd[6*nms+4] * CnmImag; target[5] += sourceShrd[6*nms+5] * CnmReal; } } } } __global__ void StretchingL2L_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal target[6] = {0, 0, 0, 0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; __shared__ gpureal YnmShrd[P*P]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; float3 d; d.x = targetGlob[6*itarget+0] - sourceGlob[begin+0]; d.y = targetGlob[6*itarget+1] - sourceGlob[begin+1]; d.z = targetGlob[6*itarget+2] - sourceGlob[begin+2]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); gpureal rho,alpha,beta; cart2sph(rho,alpha,beta,d.x,d.y,d.z); evalMultipole(YnmShrd,rho,alpha,factShrd); StretchingL2L_core(target,beta,factShrd,YnmShrd,sourceShrd); } itarget = blockIdx.x * THREADS + threadIdx.x; targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; targetGlob[6*itarget+3] = target[3]; targetGlob[6*itarget+4] = target[4]; targetGlob[6*itarget+5] = target[5]; } __device__ void StretchingL2P_core(gpureal *target, gpureal *targetQ, gpureal r, gpureal theta, gpureal phi, gpureal *factShrd, gpureal *sourceShrd) { gpureal x = cosf(theta); gpureal y = sinf(theta); if( fabsf(y) < EPS ) y = 1 / EPS; gpureal s = sqrtf(1 - x * x); gpureal spherical[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; gpureal cartesian[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; gpureal fact = 1; gpureal pn = 1; gpureal rhom = 1; for( int m=0; m<P; ++m ) { gpureal p = pn; int i = m * (m + 1) / 2 + m; gpureal ere = cosf(m * phi); if( m == 0 ) ere = 0.5; gpureal eim = sinf(m * phi); gpureal anm = rhom * rsqrtf(factShrd[2*m]); gpureal Ynm = anm * p; gpureal p1 = p; p = x * (2 * m + 1) * p; gpureal YnmTheta = anm * (p - (m + 1) * x * p1) / y; gpureal realj = ere * sourceShrd[6*i+0] - eim * sourceShrd[6*i+1]; gpureal imagj = eim * sourceShrd[6*i+0] + ere * sourceShrd[6*i+1]; spherical[0] += 2 * m / r * Ynm * realj; spherical[1] += 2 * YnmTheta * realj; spherical[2] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+2] - eim * sourceShrd[6*i+3]; imagj = eim * sourceShrd[6*i+2] + ere * sourceShrd[6*i+3]; spherical[3] += 2 * m / r * Ynm * realj; spherical[4] += 2 * YnmTheta * realj; spherical[5] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+4] - eim * sourceShrd[6*i+5]; imagj = eim * sourceShrd[6*i+4] + ere * sourceShrd[6*i+5]; spherical[6] += 2 * m / r * Ynm * realj; spherical[7] += 2 * YnmTheta * realj; spherical[8] -= 2 * m * Ynm * imagj; rhom *= r; gpureal rhon = rhom; for( int n=m+1; n<P; ++n ) { i = n * (n + 1) / 2 + m; anm = rhon * rsqrtf(factShrd[n+m] / factShrd[n-m]); Ynm = anm * p; gpureal p2 = p1; p1 = p; p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); YnmTheta = anm * ((n - m + 1) * p - (n + 1) * x * p1) / y; realj = ere * sourceShrd[6*i+0] - eim * sourceShrd[6*i+1]; imagj = eim * sourceShrd[6*i+0] + ere * sourceShrd[6*i+1]; spherical[0] += 2 * n / r * Ynm * realj; spherical[1] += 2 * YnmTheta * realj; spherical[2] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+2] - eim * sourceShrd[6*i+3]; imagj = eim * sourceShrd[6*i+2] + ere * sourceShrd[6*i+3]; spherical[3] += 2 * n / r * Ynm * realj; spherical[4] += 2 * YnmTheta * realj; spherical[5] -= 2 * m * Ynm * imagj; realj = ere * sourceShrd[6*i+4] - eim * sourceShrd[6*i+5]; imagj = eim * sourceShrd[6*i+4] + ere * sourceShrd[6*i+5]; spherical[6] += 2 * n / r * Ynm * realj; spherical[7] += 2 * YnmTheta * realj; spherical[8] -= 2 * m * Ynm * imagj; rhon *= r; } pn = -pn * fact * s; fact += 2; } sph2cart(r,theta,phi,&spherical[0],&cartesian[0]); sph2cart(r,theta,phi,&spherical[3],&cartesian[3]); sph2cart(r,theta,phi,&spherical[6],&cartesian[6]); target[0] -= 0.25 / M_PI * (targetQ[0] * cartesian[0] + targetQ[1] * cartesian[1] + targetQ[2] * cartesian[2]); target[1] -= 0.25 / M_PI * (targetQ[0] * cartesian[3] + targetQ[1] * cartesian[4] + targetQ[2] * cartesian[5]); target[2] -= 0.25 / M_PI * (targetQ[0] * cartesian[6] + targetQ[1] * cartesian[7] + targetQ[2] * cartesian[8]); } __global__ void StretchingL2P_GPU(int *keysGlob, int *rangeGlob, gpureal *targetGlob, gpureal *sourceGlob) { int keys = keysGlob[blockIdx.x]; int numList = rangeGlob[keys]; gpureal targetX[3], targetQ[3]; gpureal target[3] = {0, 0, 0}; __shared__ gpureal sourceShrd[6*THREADS]; __shared__ gpureal factShrd[2*P]; gpureal fact = 1; for( int i=0; i<2*P; ++i ) { factShrd[i] = fact; fact *= i + 1; } __syncthreads(); int itarget = blockIdx.x * THREADS + threadIdx.x; targetX[0] = targetGlob[6*itarget+0]; targetX[1] = targetGlob[6*itarget+1]; targetX[2] = targetGlob[6*itarget+2]; targetQ[0] = targetGlob[6*itarget+3]; targetQ[1] = targetGlob[6*itarget+4]; targetQ[2] = targetGlob[6*itarget+5]; for( int ilist=0; ilist<numList; ++ilist ) { int begin = rangeGlob[keys+3*ilist+1]; float3 d; d.x = targetX[0] - sourceGlob[begin+0]; d.y = targetX[1] - sourceGlob[begin+1]; d.z = targetX[2] - sourceGlob[begin+2]; __syncthreads(); if( threadIdx.x < NTERM ) { sourceShrd[6*threadIdx.x+0] = sourceGlob[begin+6*threadIdx.x+3]; sourceShrd[6*threadIdx.x+1] = sourceGlob[begin+6*threadIdx.x+4]; sourceShrd[6*threadIdx.x+2] = sourceGlob[begin+6*threadIdx.x+5]; sourceShrd[6*threadIdx.x+3] = sourceGlob[begin+6*threadIdx.x+6]; sourceShrd[6*threadIdx.x+4] = sourceGlob[begin+6*threadIdx.x+7]; sourceShrd[6*threadIdx.x+5] = sourceGlob[begin+6*threadIdx.x+8]; } __syncthreads(); gpureal r,theta,phi; cart2sph(r,theta,phi,d.x,d.y,d.z); StretchingL2P_core(target,targetQ,r,theta,phi,factShrd,sourceShrd); } targetGlob[6*itarget+0] = target[0]; targetGlob[6*itarget+1] = target[1]; targetGlob[6*itarget+2] = target[2]; } void Kernel::StretchingFinal() {} #include "gpu.h" CALL_GPU(StretchingP2M,P2M GPUkernel); CALL_GPU(StretchingM2M,M2M GPUkernel); CALL_GPU(StretchingM2L,M2L GPUkernel); CALL_GPU(StretchingM2P,M2P GPUkernel); CALL_GPU(StretchingP2P,P2P GPUkernel); CALL_GPU(StretchingL2L,L2L GPUkernel); CALL_GPU(StretchingL2P,L2P GPUkernel);
3eba0d808776ec7e2eaa42ac1a03820703b6c1dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "helpers.h" #include "FixImproperCVFF.h" #include "FixHelpers.h" #include "cutils_func.h" #define SMALL 0.001f #include "ImproperEvaluate.h" namespace py = boost::python; const std::string improperCVFFType = "ImproperCVFF"; FixImproperCVFF::FixImproperCVFF(SHARED(State) state_, std::string handle) : FixPotentialMultiAtom (state_, handle, improperCVFFType, true) { readFromRestart(); } void FixImproperCVFF::compute(int virialMode) { int nAtoms = state->atoms.size(); GPUData &gpd = state->gpd; int activeIdx = gpd.activeIdx(); if (forcersGPU.size()) { if (virialMode) { hipLaunchKernelGGL(( compute_force_improper<ImproperCVFFType, ImproperEvaluatorCVFF, true>) , dim3(NBLOCK(forcersGPU.size())), dim3(PERBLOCK), sharedMemSizeForParams, 0, forcersGPU.size(), gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator); } else { hipLaunchKernelGGL(( compute_force_improper<ImproperCVFFType, ImproperEvaluatorCVFF, false>) , dim3(NBLOCK(forcersGPU.size())), dim3(PERBLOCK), sharedMemSizeForParams, 0, forcersGPU.size(), gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator); } } } void FixImproperCVFF::singlePointEng(real *perParticleEng) { int nAtoms = state->atoms.size(); int activeIdx = state->gpd.activeIdx(); if (forcersGPU.size()) { hipLaunchKernelGGL(( compute_energy_improper), dim3(NBLOCK(forcersGPU.size())), dim3(PERBLOCK), sharedMemSizeForParams, 0, forcersGPU.size(), state->gpd.xs(activeIdx), perParticleEng, state->gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), usingSharedMemForParams, evaluator); } } void FixImproperCVFF::createImproper(Atom *a, Atom *b, Atom *c, Atom *d, double k, int dParam, int n, int type) { std::vector<Atom *> atoms = {a, b, c, d}; validAtoms(atoms); if (type == -1) { assert(k!=COEF_DEFAULT and (dParam==1 or dParam==-1) and n!=COEF_DEFAULT); } forcers.push_back(ImproperCVFF(a, b, c, d, k, dParam, n, type)); pyListInterface.updateAppendedMember(); } void FixImproperCVFF::setImproperTypeCoefs(int type, double k, int d, int n) { ImproperCVFF dummy(k, d, n, type); setForcerType(type, dummy); } bool FixImproperCVFF::readFromRestart() { auto restData = getRestartNode(); if (restData) { auto curr_node = restData.first_child(); while (curr_node) { std::string tag = curr_node.name(); if (tag == "types") { for (auto type_node = curr_node.first_child(); type_node; type_node = type_node.next_sibling()) { int type; double k; int d; int n; std::string type_ = type_node.attribute("id").value(); type = std::atoi(type_.c_str()); std::string k_ = type_node.attribute("k").value(); std::string d_ = type_node.attribute("d").value(); std::string n_ = type_node.attribute("n").value(); k = std::atof(k_.c_str()); d = std::atof(d_.c_str()); n = std::atof(n_.c_str()); setImproperTypeCoefs(type, k, d, n); } } else if (tag == "members") { for (auto member_node = curr_node.first_child(); member_node; member_node = member_node.next_sibling()) { int type; double k; int dParam; int n; int ids[4]; std::string type_ = member_node.attribute("type").value(); std::string atom_a = member_node.attribute("atomID_a").value(); std::string atom_b = member_node.attribute("atomID_b").value(); std::string atom_c = member_node.attribute("atomID_c").value(); std::string atom_d = member_node.attribute("atomID_d").value(); std::string k_ = member_node.attribute("k").value(); std::string d_ = member_node.attribute("d").value(); std::string n_ = member_node.attribute("n").value(); k = std::atof(k_.c_str()); dParam = std::atoi(d_.c_str()); n = std::atoi(n_.c_str()); type = std::atoi(type_.c_str()); ids[0] = std::atoi(atom_a.c_str()); ids[1] = std::atoi(atom_b.c_str()); ids[2] = std::atoi(atom_c.c_str()); ids[3] = std::atoi(atom_d.c_str()); Atom * a = &state->idToAtom(ids[0]); Atom * b = &state->idToAtom(ids[1]); Atom * c = &state->idToAtom(ids[2]); Atom * d = &state->idToAtom(ids[3]); createImproper(a, b, c, d, k, dParam, n, type); } } curr_node = curr_node.next_sibling(); } } return true; } __host__ void export_FixImproperCVFF() { boost::python::class_<FixImproperCVFF, SHARED(FixImproperCVFF), boost::python::bases<Fix, TypedItemHolder> > ( "FixImproperCVFF", boost::python::init<SHARED(State), std::string> ( boost::python::args("state", "handle")) ) .def("createImproper", &FixImproperCVFF::createImproper, (boost::python::arg("k")=COEF_DEFAULT, boost::python::arg("d")=COEF_DEFAULT, boost::python::arg("n")=COEF_DEFAULT, boost::python::arg("type")=-1) ) .def("setImproperTypeCoefs", &FixImproperCVFF::setImproperTypeCoefs, (boost::python::arg("type")=COEF_DEFAULT, boost::python::arg("k")=COEF_DEFAULT, boost::python::arg("d")=COEF_DEFAULT, boost::python::arg("n")=COEF_DEFAULT ) ) .def_readonly("impropers", &FixImproperCVFF::pyForcers) ; }
3eba0d808776ec7e2eaa42ac1a03820703b6c1dd.cu
#include "helpers.h" #include "FixImproperCVFF.h" #include "FixHelpers.h" #include "cutils_func.h" #define SMALL 0.001f #include "ImproperEvaluate.h" namespace py = boost::python; const std::string improperCVFFType = "ImproperCVFF"; FixImproperCVFF::FixImproperCVFF(SHARED(State) state_, std::string handle) : FixPotentialMultiAtom (state_, handle, improperCVFFType, true) { readFromRestart(); } void FixImproperCVFF::compute(int virialMode) { int nAtoms = state->atoms.size(); GPUData &gpd = state->gpd; int activeIdx = gpd.activeIdx(); if (forcersGPU.size()) { if (virialMode) { compute_force_improper<ImproperCVFFType, ImproperEvaluatorCVFF, true> <<<NBLOCK(forcersGPU.size()), PERBLOCK, sharedMemSizeForParams>>>(forcersGPU.size(), gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator); } else { compute_force_improper<ImproperCVFFType, ImproperEvaluatorCVFF, false> <<<NBLOCK(forcersGPU.size()), PERBLOCK, sharedMemSizeForParams>>>(forcersGPU.size(), gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator); } } } void FixImproperCVFF::singlePointEng(real *perParticleEng) { int nAtoms = state->atoms.size(); int activeIdx = state->gpd.activeIdx(); if (forcersGPU.size()) { compute_energy_improper<<<NBLOCK(forcersGPU.size()), PERBLOCK, sharedMemSizeForParams>>>(forcersGPU.size(), state->gpd.xs(activeIdx), perParticleEng, state->gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), usingSharedMemForParams, evaluator); } } void FixImproperCVFF::createImproper(Atom *a, Atom *b, Atom *c, Atom *d, double k, int dParam, int n, int type) { std::vector<Atom *> atoms = {a, b, c, d}; validAtoms(atoms); if (type == -1) { assert(k!=COEF_DEFAULT and (dParam==1 or dParam==-1) and n!=COEF_DEFAULT); } forcers.push_back(ImproperCVFF(a, b, c, d, k, dParam, n, type)); pyListInterface.updateAppendedMember(); } void FixImproperCVFF::setImproperTypeCoefs(int type, double k, int d, int n) { ImproperCVFF dummy(k, d, n, type); setForcerType(type, dummy); } bool FixImproperCVFF::readFromRestart() { auto restData = getRestartNode(); if (restData) { auto curr_node = restData.first_child(); while (curr_node) { std::string tag = curr_node.name(); if (tag == "types") { for (auto type_node = curr_node.first_child(); type_node; type_node = type_node.next_sibling()) { int type; double k; int d; int n; std::string type_ = type_node.attribute("id").value(); type = std::atoi(type_.c_str()); std::string k_ = type_node.attribute("k").value(); std::string d_ = type_node.attribute("d").value(); std::string n_ = type_node.attribute("n").value(); k = std::atof(k_.c_str()); d = std::atof(d_.c_str()); n = std::atof(n_.c_str()); setImproperTypeCoefs(type, k, d, n); } } else if (tag == "members") { for (auto member_node = curr_node.first_child(); member_node; member_node = member_node.next_sibling()) { int type; double k; int dParam; int n; int ids[4]; std::string type_ = member_node.attribute("type").value(); std::string atom_a = member_node.attribute("atomID_a").value(); std::string atom_b = member_node.attribute("atomID_b").value(); std::string atom_c = member_node.attribute("atomID_c").value(); std::string atom_d = member_node.attribute("atomID_d").value(); std::string k_ = member_node.attribute("k").value(); std::string d_ = member_node.attribute("d").value(); std::string n_ = member_node.attribute("n").value(); k = std::atof(k_.c_str()); dParam = std::atoi(d_.c_str()); n = std::atoi(n_.c_str()); type = std::atoi(type_.c_str()); ids[0] = std::atoi(atom_a.c_str()); ids[1] = std::atoi(atom_b.c_str()); ids[2] = std::atoi(atom_c.c_str()); ids[3] = std::atoi(atom_d.c_str()); Atom * a = &state->idToAtom(ids[0]); Atom * b = &state->idToAtom(ids[1]); Atom * c = &state->idToAtom(ids[2]); Atom * d = &state->idToAtom(ids[3]); createImproper(a, b, c, d, k, dParam, n, type); } } curr_node = curr_node.next_sibling(); } } return true; } __host__ void export_FixImproperCVFF() { boost::python::class_<FixImproperCVFF, SHARED(FixImproperCVFF), boost::python::bases<Fix, TypedItemHolder> > ( "FixImproperCVFF", boost::python::init<SHARED(State), std::string> ( boost::python::args("state", "handle")) ) .def("createImproper", &FixImproperCVFF::createImproper, (boost::python::arg("k")=COEF_DEFAULT, boost::python::arg("d")=COEF_DEFAULT, boost::python::arg("n")=COEF_DEFAULT, boost::python::arg("type")=-1) ) .def("setImproperTypeCoefs", &FixImproperCVFF::setImproperTypeCoefs, (boost::python::arg("type")=COEF_DEFAULT, boost::python::arg("k")=COEF_DEFAULT, boost::python::arg("d")=COEF_DEFAULT, boost::python::arg("n")=COEF_DEFAULT ) ) .def_readonly("impropers", &FixImproperCVFF::pyForcers) ; }
b288f4a29c6b6ae40dece64ffec20a55057bd46d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int index=blockIdx.x*numCols+blockIdx.y; unsigned char color=0.299f*rgbaImage[index].x+ 0.587f*rgbaImage[index].y+ 0.114f*rgbaImage[index].z; greyImage[index]=color; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(1, 1, 1); //TODO const dim3 gridSize( numRows, numCols, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
b288f4a29c6b6ae40dece64ffec20a55057bd46d.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int index=blockIdx.x*numCols+blockIdx.y; unsigned char color=0.299f*rgbaImage[index].x+ 0.587f*rgbaImage[index].y+ 0.114f*rgbaImage[index].z; greyImage[index]=color; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(1, 1, 1); //TODO const dim3 gridSize( numRows, numCols, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
678d0a5f62bcbcb03d70b7bfb2fbf18a08a85dc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include "pad_conv2d.h" #define CUDA_KERNEL_LOOP(i ,n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<(n); i+= blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N){ return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ float dmcn_im2col_bilinear( const float* bottom_data, const int data_width, const int height, const int width, float h, float w){ int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh, hw = 1 - lw; float v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; float v2 = 0; if (h_low >=0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; float v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; float v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __device__ float dmcn_get_gradient_weight( float argmax_h, // offset h float argmax_w, // offset w const int h, const int w, // coordinate const int height, const int width){ if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } __device__ float dmcn_get_coordinate_weight( float argmax_h, float argmax_w, const int height, const int width, const float* im_data, const int data_width, const int bp_dir ) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } __global__ void add_bias_kernel( int n, float* data_out, const float* bias, const int out_channels, const int height_out, const int width_out ){ CUDA_KERNEL_LOOP(index, n){ const int c_col = (index / width_out / height_out) % out_channels; float value = bias[c_col]; atomicAdd(data_out + index, value); } } __global__ void calculate_dbias_kernel( int n, const float* grad_output, float* grad_bias, const int out_channels, const int height_out, const int width_out ){ CUDA_KERNEL_LOOP(index, n){ const int c_col = (index / width_out / height_out) % out_channels; float value = grad_output[index]; atomicAdd(grad_bias + c_col, value); } } __global__ void pad_conv2d_im2col_kernel( int n, const float* data_im, const float* data_rate, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int num_channels, const int height_col, const int width_col, float* data_col ){ CUDA_KERNEL_LOOP(index, n){ const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = index / width_col / height_col; const int c_col = c_im * kernel_h * kernel_w; const float rate = data_rate[h_col * width_col + w_col]; const int h_in = h_col * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_col * stride_w + (int)((kernel_w - 1 ) / 2); float* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im + c_im * height * width; for (int i = - (int)(kernel_h / 2); i <= (int)(kernel_h / 2); ++i) { for (int j = - (int)(kernel_w / 2); j <= (int)(kernel_w / 2); ++j) { float val = static_cast<float>(0); const float h_im = h_in + i * 1 * rate; const float w_im = w_in + j * 1 * rate; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } __global__ void pad_conv2d_col2im_coord_kernel( const int n, const float* data_col, const float* data_im, const float* data_rate, const int channels, const int height, const int width, // C, H, W const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* grad_rate_map ){ CUDA_KERNEL_LOOP(index, n){ // the relative location in the filter const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // index(spatial location). int w_out = index % width_col; int h_out = (index / width_col) % height_col; // const int h_in = h_out * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_out * stride_w + (int)((kernel_w - 1 ) / 2); // perspective map, , prate const float rate = data_rate[h_out * width_col + w_out]; const float cur_inv_h_data = h_in + (i - (int)((kernel_h - 1 ) / 2)) * rate; const float cur_inv_w_data = w_in + (j - (int)((kernel_w - 1 ) / 2)) * rate; const float reletive_i = (i - (int)((kernel_h - 1 ) / 2)); const float reletive_j = (j - (int)((kernel_w - 1 ) / 2)); if (reletive_i != 0 || reletive_j != 0){ float val_h = 0; float val_w = 0; float h_weight = dmcn_get_coordinate_weight( cur_inv_h_data, cur_inv_w_data, height, width, data_im + c * height * width, width, 0); float w_weight = dmcn_get_coordinate_weight( cur_inv_h_data, cur_inv_w_data, height, width, data_im + c * height * width, width, 1); val_h = (h_weight) * data_col[index]; val_w = (w_weight) * data_col[index]; float gradient = 0; float tmp = val_h * reletive_i + val_w * reletive_j; gradient = tmp / std::sqrt(float(reletive_i * reletive_i + reletive_j * reletive_j)); atomicAdd(grad_rate_map + h_out * width_col + w_out, gradient); } } } __global__ void pad_conv2d_col2im_kernel( const int n, const float* data_col, const float* data_rate, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* grad_im ){ CUDA_KERNEL_LOOP(index, n){ // the relative location in the filter const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // which channel // index(spatial location). int w_out = index % width_col; int h_out = (index / width_col) % height_col; // const int h_in = h_out * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_out * stride_w + (int)((kernel_w - 1 ) / 2); // perspective map, , prate const float rate = data_rate[h_out * width_col + w_out]; const float cur_inv_h_data = h_in + (i - (int)((kernel_h - 1 ) / 2)) * rate; const float cur_inv_w_data = w_in + (j - (int)((kernel_w - 1 ) / 2)) * rate; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; const float cur_top_grad = data_col[index]; for (int dy = 0; dy <= 1; dy++) { for (int dx = 0; dx <= 1; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; float weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } void pad_conv2d_im2col(hipStream_t stream, const float* data_im, const float* data_rate, const int in_channels, const int height, const int width, const int kernel_h, const int kernel_w, // const int pad_h, const int pad_w, const int stride_h, const int stride_w, // const int dilation_h, const int dilation_w, const int height_out, const int width_out, float* data_col){ int num_kernels = in_channels * height_out * width_out; hipLaunchKernelGGL(( pad_conv2d_im2col_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im, data_rate, height, width, kernel_h, kernel_w, stride_h, stride_w, in_channels, height_out, width_out, data_col ); } void pad_conv2d_col2im_coord(hipStream_t stream, const float* data_col, const float* data_im, const float* data_rate, const int in_channels, const int height, const int width, // C, H, W const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* grad_rate_map){ int num_kernels = in_channels * kernel_h * kernel_w * height_col * width_col; hipLaunchKernelGGL(( pad_conv2d_col2im_coord_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col, data_im, data_rate, in_channels, height, width, kernel_h, kernel_w, stride_h, stride_w, height_col, width_col, grad_rate_map ); } void pad_conv2d_col2im(hipStream_t stream, const float* data_col, const float* data_rate, const int in_channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_out, const int width_out, float* grad_im){ int num_kernels = in_channels * kernel_h * kernel_w * height_out * width_out; hipLaunchKernelGGL(( pad_conv2d_col2im_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col, data_rate, in_channels, height, width, kernel_h, kernel_w, stride_h, stride_w, height_out, width_out, grad_im ); } void add_bias(hipStream_t stream, float* data_out, const float* bias, const int out_channels, const int height_out, const int width_out ){ int num_kernels = out_channels * height_out * width_out; hipLaunchKernelGGL(( add_bias_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_out, bias, out_channels, height_out, width_out ); } void calculate_dbias(hipStream_t stream, const float* grad_output, float* grad_bias, const int out_channels, const int height_out, const int width_out ){ int num_kernels = out_channels * height_out * width_out; hipLaunchKernelGGL(( calculate_dbias_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, grad_output, grad_bias, out_channels, height_out, width_out ); }
678d0a5f62bcbcb03d70b7bfb2fbf18a08a85dc6.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include "pad_conv2d.h" #define CUDA_KERNEL_LOOP(i ,n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<(n); i+= blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N){ return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ float dmcn_im2col_bilinear( const float* bottom_data, const int data_width, const int height, const int width, float h, float w){ int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh, hw = 1 - lw; float v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; float v2 = 0; if (h_low >=0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; float v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; float v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __device__ float dmcn_get_gradient_weight( float argmax_h, // offset h float argmax_w, // offset w const int h, const int w, // coordinate const int height, const int width){ if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } __device__ float dmcn_get_coordinate_weight( float argmax_h, float argmax_w, const int height, const int width, const float* im_data, const int data_width, const int bp_dir ) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } __global__ void add_bias_kernel( int n, float* data_out, const float* bias, const int out_channels, const int height_out, const int width_out ){ CUDA_KERNEL_LOOP(index, n){ const int c_col = (index / width_out / height_out) % out_channels; float value = bias[c_col]; atomicAdd(data_out + index, value); } } __global__ void calculate_dbias_kernel( int n, const float* grad_output, float* grad_bias, const int out_channels, const int height_out, const int width_out ){ CUDA_KERNEL_LOOP(index, n){ const int c_col = (index / width_out / height_out) % out_channels; float value = grad_output[index]; atomicAdd(grad_bias + c_col, value); } } __global__ void pad_conv2d_im2col_kernel( int n, const float* data_im, const float* data_rate, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int num_channels, const int height_col, const int width_col, float* data_col ){ CUDA_KERNEL_LOOP(index, n){ const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = index / width_col / height_col; const int c_col = c_im * kernel_h * kernel_w; const float rate = data_rate[h_col * width_col + w_col]; const int h_in = h_col * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_col * stride_w + (int)((kernel_w - 1 ) / 2); float* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im + c_im * height * width; for (int i = - (int)(kernel_h / 2); i <= (int)(kernel_h / 2); ++i) { for (int j = - (int)(kernel_w / 2); j <= (int)(kernel_w / 2); ++j) { float val = static_cast<float>(0); const float h_im = h_in + i * 1 * rate; const float w_im = w_in + j * 1 * rate; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } __global__ void pad_conv2d_col2im_coord_kernel( const int n, const float* data_col, const float* data_im, const float* data_rate, const int channels, const int height, const int width, // 输入的C, H, W const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* grad_rate_map ){ CUDA_KERNEL_LOOP(index, n){ // the relative location in the filter const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // 计算当前这个index对应的值被卷积操作的哪个内积点(也就是输出的spatial location)使用了. int w_out = index % width_col; int h_out = (index / width_col) % height_col; // 该次内积的卷积核中心的对应在输入图上坐标 const int h_in = h_out * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_out * stride_w + (int)((kernel_w - 1 ) / 2); // 如果后期改了perspective map的输入的话, 需要修改一下, 现在的话就假设prate是单通道的输入 const float rate = data_rate[h_out * width_col + w_out]; const float cur_inv_h_data = h_in + (i - (int)((kernel_h - 1 ) / 2)) * rate; const float cur_inv_w_data = w_in + (j - (int)((kernel_w - 1 ) / 2)) * rate; const float reletive_i = (i - (int)((kernel_h - 1 ) / 2)); const float reletive_j = (j - (int)((kernel_w - 1 ) / 2)); if (reletive_i != 0 || reletive_j != 0){ float val_h = 0; float val_w = 0; float h_weight = dmcn_get_coordinate_weight( cur_inv_h_data, cur_inv_w_data, height, width, data_im + c * height * width, width, 0); float w_weight = dmcn_get_coordinate_weight( cur_inv_h_data, cur_inv_w_data, height, width, data_im + c * height * width, width, 1); val_h = (h_weight) * data_col[index]; val_w = (w_weight) * data_col[index]; float gradient = 0; float tmp = val_h * reletive_i + val_w * reletive_j; gradient = tmp / std::sqrt(float(reletive_i * reletive_i + reletive_j * reletive_j)); atomicAdd(grad_rate_map + h_out * width_col + w_out, gradient); } } } __global__ void pad_conv2d_col2im_kernel( const int n, const float* data_col, const float* data_rate, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* grad_im ){ CUDA_KERNEL_LOOP(index, n){ // the relative location in the filter const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // which channel // 计算当前这个index对应的值被卷积操作的哪个内积点(也就是输出的spatial location)使用了. int w_out = index % width_col; int h_out = (index / width_col) % height_col; // 该次内积的卷积核中心的对应在输入图上坐标 const int h_in = h_out * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_out * stride_w + (int)((kernel_w - 1 ) / 2); // 如果后期改了perspective map的输入的话, 需要修改一下, 现在的话就假设prate是单通道的输入 const float rate = data_rate[h_out * width_col + w_out]; const float cur_inv_h_data = h_in + (i - (int)((kernel_h - 1 ) / 2)) * rate; const float cur_inv_w_data = w_in + (j - (int)((kernel_w - 1 ) / 2)) * rate; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; const float cur_top_grad = data_col[index]; for (int dy = 0; dy <= 1; dy++) { for (int dx = 0; dx <= 1; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; float weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } void pad_conv2d_im2col(cudaStream_t stream, const float* data_im, const float* data_rate, const int in_channels, const int height, const int width, const int kernel_h, const int kernel_w, // const int pad_h, const int pad_w, const int stride_h, const int stride_w, // const int dilation_h, const int dilation_w, const int height_out, const int width_out, float* data_col){ int num_kernels = in_channels * height_out * width_out; pad_conv2d_im2col_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im, data_rate, height, width, kernel_h, kernel_w, stride_h, stride_w, in_channels, height_out, width_out, data_col ); } void pad_conv2d_col2im_coord(cudaStream_t stream, const float* data_col, const float* data_im, const float* data_rate, const int in_channels, const int height, const int width, // 输入的C, H, W const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* grad_rate_map){ int num_kernels = in_channels * kernel_h * kernel_w * height_col * width_col; pad_conv2d_col2im_coord_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_im, data_rate, in_channels, height, width, kernel_h, kernel_w, stride_h, stride_w, height_col, width_col, grad_rate_map ); } void pad_conv2d_col2im(cudaStream_t stream, const float* data_col, const float* data_rate, const int in_channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_out, const int width_out, float* grad_im){ int num_kernels = in_channels * kernel_h * kernel_w * height_out * width_out; pad_conv2d_col2im_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_rate, in_channels, height, width, kernel_h, kernel_w, stride_h, stride_w, height_out, width_out, grad_im ); } void add_bias(cudaStream_t stream, float* data_out, const float* bias, const int out_channels, const int height_out, const int width_out ){ int num_kernels = out_channels * height_out * width_out; add_bias_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_out, bias, out_channels, height_out, width_out ); } void calculate_dbias(cudaStream_t stream, const float* grad_output, float* grad_bias, const int out_channels, const int height_out, const int width_out ){ int num_kernels = out_channels * height_out * width_out; calculate_dbias_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, grad_output, grad_bias, out_channels, height_out, width_out ); }
2b1118f4276e0c91641edebb5d6c68a4329980eb.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/reduce_ops/cub_reduce.h" #include "paddle/fluid/operators/trace_op.h" namespace paddle { namespace operators { template <typename T> struct IdentityFunctor { HOSTDEVICE explicit inline IdentityFunctor() {} HOSTDEVICE inline T operator()(const T& x) const { return x; } }; template <typename DeviceContext, typename T> class TraceCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* input = context.Input<framework::Tensor>("Input"); auto* out = context.Output<framework::Tensor>("Out"); const int64_t offset = context.Attr<int>("offset"); const int64_t dim1 = context.Attr<int>("axis1"); const int64_t dim2 = context.Attr<int>("axis2"); T* out_data = out->mutable_data<T>(context.GetPlace()); const framework::Tensor diag = Diagonal<DeviceContext, T>(context, input, offset, dim1, dim2); if (diag.numel() > 0) { auto stream = context.cuda_device_context().stream(); std::vector<int> reduce_dims; reduce_dims.push_back(out->dims().size()); TensorReduce<T, T, hipcub::Sum, IdentityFunctor<T>>( diag, out, reduce_dims, static_cast<T>(0), hipcub::Sum(), IdentityFunctor<T>(), stream); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace platform = paddle::platform; REGISTER_OP_CUDA_KERNEL( trace, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, int>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, platform::float16>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, double>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( trace_grad, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, int>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, platform::float16>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, float>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, double>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>);
2b1118f4276e0c91641edebb5d6c68a4329980eb.cu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/reduce_ops/cub_reduce.h" #include "paddle/fluid/operators/trace_op.h" namespace paddle { namespace operators { template <typename T> struct IdentityFunctor { HOSTDEVICE explicit inline IdentityFunctor() {} HOSTDEVICE inline T operator()(const T& x) const { return x; } }; template <typename DeviceContext, typename T> class TraceCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* input = context.Input<framework::Tensor>("Input"); auto* out = context.Output<framework::Tensor>("Out"); const int64_t offset = context.Attr<int>("offset"); const int64_t dim1 = context.Attr<int>("axis1"); const int64_t dim2 = context.Attr<int>("axis2"); T* out_data = out->mutable_data<T>(context.GetPlace()); const framework::Tensor diag = Diagonal<DeviceContext, T>(context, input, offset, dim1, dim2); if (diag.numel() > 0) { auto stream = context.cuda_device_context().stream(); std::vector<int> reduce_dims; reduce_dims.push_back(out->dims().size()); TensorReduce<T, T, cub::Sum, IdentityFunctor<T>>( diag, out, reduce_dims, static_cast<T>(0), cub::Sum(), IdentityFunctor<T>(), stream); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace platform = paddle::platform; REGISTER_OP_CUDA_KERNEL( trace, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, int>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, platform::float16>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, double>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( trace_grad, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, int>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, platform::float16>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, float>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, double>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>);
e198fd5a0cea82dcd3838f3dc8a9c21d96e60139.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // file: raceCondition.cu __global__ void raceCondition(int *A) { __shared__ int Shared[64]; Shared[threadIdx.x] = A[threadIdx.x]; // no synchronization barrier! A[threadIdx.x] = Shared[63 - threadIdx.x]; // line 9 - faulting load } int main() { int *validPtr = 0; hipMalloc((void **)&validPtr, sizeof(int)*64); hipLaunchKernelGGL(( raceCondition), dim3(dim3(1,1)), dim3(dim3(64, 1)) , 0, 0, validPtr ); return 0; }
e198fd5a0cea82dcd3838f3dc8a9c21d96e60139.cu
// file: raceCondition.cu __global__ void raceCondition(int *A) { __shared__ int Shared[64]; Shared[threadIdx.x] = A[threadIdx.x]; // no synchronization barrier! A[threadIdx.x] = Shared[63 - threadIdx.x]; // line 9 - faulting load } int main() { int *validPtr = 0; cudaMalloc((void **)&validPtr, sizeof(int)*64); raceCondition<<< dim3(1,1), dim3(64, 1) >>>( validPtr ); return 0; }
cf29a3aee9cb3f9443d0cf52512d262c8f92c08b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // N-queen for CUDA // // Copyright(c) 2008 Ping-Che Chen //#define WIN32_LEAN_AND_MEAN //#include <windows.h> #include <stdio.h> #include <cutil.h> #include <iostream> using namespace std; #define THREAD_NUM 96 int bunk = 0; // this is a dummy variable used for making sure clock() are not optimized out /* * ---------------------------------------------------------------- * This is a recursive version of n-queen backtracking solver. * A non-recursive version is used instead. * ---------------------------------------------------------------- long long solve_nqueen_internal(int n, unsigned int mask, unsigned int l_mask, unsigned int r_mask, unsigned int t_mask) { if(mask == t_mask) { return 1; } unsigned int m = (mask | l_mask | r_mask); if((m & t_mask) == t_mask) { return 0; } long long total = 0; unsigned int index = (m + 1) & ~m; while((index & t_mask) != 0) { total += solve_nqueen_internal(mask | index, (l_mask | index) << 1, (r_mask | index) >> 1, t_mask); m |= index; index = (m + 1) & ~m; } return total; } long long solve_nqueen(int n) { return solve_nqueen_internal(0, 0, 0, (1 << n) - 1); } */ /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver. * This provides the basis for the CUDA version. * ------------------------------------------------------------------- */ long long solve_nqueen(int n) { unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; if(n <= 0 || n > 32) { return 0; } const unsigned int t_mask = (1 << n) - 1; long long total = 0; long long upper_total = 0; int i = 0, j; unsigned int index; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; for(j = 0; j < (n + 1) / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; if(n % 2 == 1 && j == (n + 1) / 2 - 1) { upper_total = total; total = 0; } while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = ((m[i] + 1) ^ m[i]) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == n) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } } bunk = 2; if(n % 2 == 0) { return total * 2; } else { return upper_total * 2 + total; } } /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver * with multi-thread support. * ------------------------------------------------------------------- */ /* struct thread_context { HANDLE thread; bool stop; long long total; int n; unsigned int mask; unsigned int l_mask; unsigned int r_mask; unsigned int t_mask; HANDLE ready; HANDLE complete; }; DWORD WINAPI solve_nqueen_proc(LPVOID param) { thread_context* ctx = (thread_context*) param; unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int t_mask; long long total; unsigned int index; unsigned int mark; for(;;) { WaitForSingleObject(ctx->ready, INFINITE); if(ctx->stop) { break; } int i = 0; mask[0] = ctx->mask; l_mask[0] = ctx->l_mask; r_mask[0] = ctx->r_mask; m[0] = mask[0] | l_mask[0] | r_mask[0]; total = 0; t_mask = ctx->t_mask; mark = ctx->n; while(i >= 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } ctx->total = total; SetEvent(ctx->complete); } return 0; } long long solve_nqueen_mcpu(int n) { if(n <= 0 || n > 32) { return 0; } SYSTEM_INFO info; thread_context* threads; int num_threads; GetSystemInfo(&info); num_threads = info.dwNumberOfProcessors; if(num_threads == 1) { // only one cpu found, use single thread version return solve_nqueen(n); } threads = new thread_context[num_threads]; int j; for(j = 0; j < num_threads; j++) { threads[j].stop = false; threads[j].ready = CreateEvent(0, FALSE, FALSE, 0); threads[j].complete = CreateEvent(0, FALSE, TRUE, 0); threads[j].thread = CreateThread(0, 0, solve_nqueen_proc, threads + j, 0, 0); threads[j].total = 0; } int thread_idx = 0; const unsigned int t_mask = (1 << n) - 1; long long total = 0; unsigned int index; unsigned int m_mask = 0; if(n % 2 == 1) { m_mask = 1 << ((n + 1) / 2 - 1); } for(j = 0; j < (n + 1) / 2; j++) { index = 1 << j; WaitForSingleObject(threads[thread_idx].complete, INFINITE); if(threads[thread_idx].mask != m_mask) { total += threads[thread_idx].total * 2; } else { total += threads[thread_idx].total; } threads[thread_idx].mask = index; threads[thread_idx].l_mask = index << 1; threads[thread_idx].r_mask = index >> 1; threads[thread_idx].t_mask = t_mask; threads[thread_idx].total = 0; threads[thread_idx].n = n - 1; SetEvent(threads[thread_idx].ready); thread_idx = (thread_idx + 1) % num_threads; } // collect all threads... HANDLE* events = new HANDLE[num_threads]; for(j = 0; j < num_threads; j++) { events[j] = threads[j].complete; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { if(threads[j].mask != m_mask) { total += threads[j].total * 2; } else { total += threads[j].total; } threads[j].stop = true; SetEvent(threads[j].ready); events[j] = threads[j].thread; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { CloseHandle(threads[j].thread); CloseHandle(threads[j].ready); CloseHandle(threads[j].complete); } delete[] threads; delete[] events; bunk = 3; return total; } */ /* -------------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver for CUDA. * It receives multiple initial conditions from a CPU iterator, and count * each conditions. * -------------------------------------------------------------------------- */ __global__ void solve_nqueen_cuda_kernel(int n, int mark, unsigned int* total_masks, unsigned int* total_l_masks, unsigned int* total_r_masks, unsigned int* results, int total_conditions) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; __shared__ unsigned int mask[THREAD_NUM][10]; __shared__ unsigned int l_mask[THREAD_NUM][10]; __shared__ unsigned int r_mask[THREAD_NUM][10]; __shared__ unsigned int m[THREAD_NUM][10]; __shared__ unsigned int sum[THREAD_NUM]; const unsigned int t_mask = (1 << n) - 1; int total = 0; int i = 0; unsigned int index; if(idx < total_conditions) { mask[tid][i] = total_masks[idx]; l_mask[tid][i] = total_l_masks[idx]; r_mask[tid][i] = total_r_masks[idx]; m[tid][i] = mask[tid][i] | l_mask[tid][i] | r_mask[tid][i]; while(i >= 0) { if((m[tid][i] & t_mask) == t_mask) { i--; } else { index = (m[tid][i] + 1) & ~m[tid][i]; m[tid][i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[tid][i + 1] = mask[tid][i] | index; l_mask[tid][i + 1] = (l_mask[tid][i] | index) << 1; r_mask[tid][i + 1] = (r_mask[tid][i] | index) >> 1; m[tid][i + 1] = (mask[tid][i + 1] | l_mask[tid][i + 1] | r_mask[tid][i + 1]); i++; } } else { i --; } } } sum[tid] = total; } else { sum[tid] = 0; } __syncthreads(); // reduction if(tid < 64 && tid + 64 < THREAD_NUM) { sum[tid] += sum[tid + 64]; } __syncthreads(); if(tid < 32) { sum[tid] += sum[tid + 32]; } __syncthreads(); if(tid < 16) { sum[tid] += sum[tid + 16]; } __syncthreads(); if(tid < 8) { sum[tid] += sum[tid + 8]; } __syncthreads(); if(tid < 4) { sum[tid] += sum[tid + 4]; } __syncthreads(); if(tid < 2) { sum[tid] += sum[tid + 2]; } __syncthreads(); if(tid < 1) { sum[tid] += sum[tid + 1]; } __syncthreads(); if(tid == 0) { results[bid] = sum[0]; } } long long solve_nqueen_cuda(int n, int steps) { // generating start conditions unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int index; if(n <= 0 || n > 32) { return 0; } unsigned int* total_masks = new unsigned int[steps]; unsigned int* total_l_masks = new unsigned int[steps]; unsigned int* total_r_masks = new unsigned int[steps]; unsigned int* results = new unsigned int[steps]; unsigned int* masks_cuda; unsigned int* l_masks_cuda; unsigned int* r_masks_cuda; unsigned int* results_cuda; hipMalloc((void**) &masks_cuda, sizeof(int) * steps); hipMalloc((void**) &l_masks_cuda, sizeof(int) * steps); hipMalloc((void**) &r_masks_cuda, sizeof(int) * steps); hipMalloc((void**) &results_cuda, sizeof(int) * steps / THREAD_NUM); const unsigned int t_mask = (1 << n) - 1; const unsigned int mark = n > 11 ? n - 10 : 2; long long total = 0; int total_conditions = 0; int i = 0, j; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; bool computed = false; for(j = 0; j < n / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } } if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } total *= 2; if(n % 2 == 1) { computed = false; total_conditions = 0; index = (1 << (n - 1) / 2); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } } hipFree(masks_cuda); hipFree(l_masks_cuda); hipFree(r_masks_cuda); hipFree(results_cuda); delete[] total_masks; delete[] total_l_masks; delete[] total_r_masks; delete[] results; bunk = 1; return total; } bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } int main(int argc, char** argv) { unsigned int hTimer; double gpuTime; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(hipSetDevice(dev)); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); int n = 4;//11; clock_t start, end; long long solution; bool cpu = true, gpu = true; int argstart = 1, steps = 24576; if(argc >= 2 && argv[1][0] == '-') { if(argv[1][1] == 'c' || argv[1][1] == 'C') { gpu = false; } else if(argv[1][1] == 'g' || argv[1][1] == 'G') { cpu = false; } argstart = 2; } if(argc < argstart + 1) { printf("Usage: %s [-c|-g] n steps\n", argv[0]); printf(" -c: CPU only\n"); printf(" -g: GPU only\n"); printf(" n: n-queen\n"); printf(" steps: step for GPU\n"); printf("Default to 8 queen\n"); } else { n = atoi(argv[argstart]); if(n <= 1 || n > 32) { printf("Invalid n, n should be > 1 and <= 32\n"); printf("Note: n > 18 will require a very very long time to compute!\n"); return 0; } if(argc >= argstart + 2) { steps = atoi(argv[argstart + 1]); if(steps <= THREAD_NUM || steps % THREAD_NUM != 0) { printf("Invalid step, step should be multiple of %d\n", THREAD_NUM); return 0; } } } if(gpu) { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); } if(cpu) { CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); //start = clock(); solution = solve_nqueen(n); //solve_nqueen_mcpu(n); //solution = solve_nqueen(n); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("CPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } if(gpu) { //start = clock(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); solution = solve_nqueen_cuda(n, steps); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("GPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } return 0; }
cf29a3aee9cb3f9443d0cf52512d262c8f92c08b.cu
// N-queen for CUDA // // Copyright(c) 2008 Ping-Che Chen //#define WIN32_LEAN_AND_MEAN //#include <windows.h> #include <stdio.h> #include <cutil.h> #include <iostream> using namespace std; #define THREAD_NUM 96 int bunk = 0; // this is a dummy variable used for making sure clock() are not optimized out /* * ---------------------------------------------------------------- * This is a recursive version of n-queen backtracking solver. * A non-recursive version is used instead. * ---------------------------------------------------------------- long long solve_nqueen_internal(int n, unsigned int mask, unsigned int l_mask, unsigned int r_mask, unsigned int t_mask) { if(mask == t_mask) { return 1; } unsigned int m = (mask | l_mask | r_mask); if((m & t_mask) == t_mask) { return 0; } long long total = 0; unsigned int index = (m + 1) & ~m; while((index & t_mask) != 0) { total += solve_nqueen_internal(mask | index, (l_mask | index) << 1, (r_mask | index) >> 1, t_mask); m |= index; index = (m + 1) & ~m; } return total; } long long solve_nqueen(int n) { return solve_nqueen_internal(0, 0, 0, (1 << n) - 1); } */ /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver. * This provides the basis for the CUDA version. * ------------------------------------------------------------------- */ long long solve_nqueen(int n) { unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; if(n <= 0 || n > 32) { return 0; } const unsigned int t_mask = (1 << n) - 1; long long total = 0; long long upper_total = 0; int i = 0, j; unsigned int index; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; for(j = 0; j < (n + 1) / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; if(n % 2 == 1 && j == (n + 1) / 2 - 1) { upper_total = total; total = 0; } while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = ((m[i] + 1) ^ m[i]) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == n) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } } bunk = 2; if(n % 2 == 0) { return total * 2; } else { return upper_total * 2 + total; } } /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver * with multi-thread support. * ------------------------------------------------------------------- */ /* struct thread_context { HANDLE thread; bool stop; long long total; int n; unsigned int mask; unsigned int l_mask; unsigned int r_mask; unsigned int t_mask; HANDLE ready; HANDLE complete; }; DWORD WINAPI solve_nqueen_proc(LPVOID param) { thread_context* ctx = (thread_context*) param; unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int t_mask; long long total; unsigned int index; unsigned int mark; for(;;) { WaitForSingleObject(ctx->ready, INFINITE); if(ctx->stop) { break; } int i = 0; mask[0] = ctx->mask; l_mask[0] = ctx->l_mask; r_mask[0] = ctx->r_mask; m[0] = mask[0] | l_mask[0] | r_mask[0]; total = 0; t_mask = ctx->t_mask; mark = ctx->n; while(i >= 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } ctx->total = total; SetEvent(ctx->complete); } return 0; } long long solve_nqueen_mcpu(int n) { if(n <= 0 || n > 32) { return 0; } SYSTEM_INFO info; thread_context* threads; int num_threads; GetSystemInfo(&info); num_threads = info.dwNumberOfProcessors; if(num_threads == 1) { // only one cpu found, use single thread version return solve_nqueen(n); } threads = new thread_context[num_threads]; int j; for(j = 0; j < num_threads; j++) { threads[j].stop = false; threads[j].ready = CreateEvent(0, FALSE, FALSE, 0); threads[j].complete = CreateEvent(0, FALSE, TRUE, 0); threads[j].thread = CreateThread(0, 0, solve_nqueen_proc, threads + j, 0, 0); threads[j].total = 0; } int thread_idx = 0; const unsigned int t_mask = (1 << n) - 1; long long total = 0; unsigned int index; unsigned int m_mask = 0; if(n % 2 == 1) { m_mask = 1 << ((n + 1) / 2 - 1); } for(j = 0; j < (n + 1) / 2; j++) { index = 1 << j; WaitForSingleObject(threads[thread_idx].complete, INFINITE); if(threads[thread_idx].mask != m_mask) { total += threads[thread_idx].total * 2; } else { total += threads[thread_idx].total; } threads[thread_idx].mask = index; threads[thread_idx].l_mask = index << 1; threads[thread_idx].r_mask = index >> 1; threads[thread_idx].t_mask = t_mask; threads[thread_idx].total = 0; threads[thread_idx].n = n - 1; SetEvent(threads[thread_idx].ready); thread_idx = (thread_idx + 1) % num_threads; } // collect all threads... HANDLE* events = new HANDLE[num_threads]; for(j = 0; j < num_threads; j++) { events[j] = threads[j].complete; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { if(threads[j].mask != m_mask) { total += threads[j].total * 2; } else { total += threads[j].total; } threads[j].stop = true; SetEvent(threads[j].ready); events[j] = threads[j].thread; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { CloseHandle(threads[j].thread); CloseHandle(threads[j].ready); CloseHandle(threads[j].complete); } delete[] threads; delete[] events; bunk = 3; return total; } */ /* -------------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver for CUDA. * It receives multiple initial conditions from a CPU iterator, and count * each conditions. * -------------------------------------------------------------------------- */ __global__ void solve_nqueen_cuda_kernel(int n, int mark, unsigned int* total_masks, unsigned int* total_l_masks, unsigned int* total_r_masks, unsigned int* results, int total_conditions) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; __shared__ unsigned int mask[THREAD_NUM][10]; __shared__ unsigned int l_mask[THREAD_NUM][10]; __shared__ unsigned int r_mask[THREAD_NUM][10]; __shared__ unsigned int m[THREAD_NUM][10]; __shared__ unsigned int sum[THREAD_NUM]; const unsigned int t_mask = (1 << n) - 1; int total = 0; int i = 0; unsigned int index; if(idx < total_conditions) { mask[tid][i] = total_masks[idx]; l_mask[tid][i] = total_l_masks[idx]; r_mask[tid][i] = total_r_masks[idx]; m[tid][i] = mask[tid][i] | l_mask[tid][i] | r_mask[tid][i]; while(i >= 0) { if((m[tid][i] & t_mask) == t_mask) { i--; } else { index = (m[tid][i] + 1) & ~m[tid][i]; m[tid][i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[tid][i + 1] = mask[tid][i] | index; l_mask[tid][i + 1] = (l_mask[tid][i] | index) << 1; r_mask[tid][i + 1] = (r_mask[tid][i] | index) >> 1; m[tid][i + 1] = (mask[tid][i + 1] | l_mask[tid][i + 1] | r_mask[tid][i + 1]); i++; } } else { i --; } } } sum[tid] = total; } else { sum[tid] = 0; } __syncthreads(); // reduction if(tid < 64 && tid + 64 < THREAD_NUM) { sum[tid] += sum[tid + 64]; } __syncthreads(); if(tid < 32) { sum[tid] += sum[tid + 32]; } __syncthreads(); if(tid < 16) { sum[tid] += sum[tid + 16]; } __syncthreads(); if(tid < 8) { sum[tid] += sum[tid + 8]; } __syncthreads(); if(tid < 4) { sum[tid] += sum[tid + 4]; } __syncthreads(); if(tid < 2) { sum[tid] += sum[tid + 2]; } __syncthreads(); if(tid < 1) { sum[tid] += sum[tid + 1]; } __syncthreads(); if(tid == 0) { results[bid] = sum[0]; } } long long solve_nqueen_cuda(int n, int steps) { // generating start conditions unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int index; if(n <= 0 || n > 32) { return 0; } unsigned int* total_masks = new unsigned int[steps]; unsigned int* total_l_masks = new unsigned int[steps]; unsigned int* total_r_masks = new unsigned int[steps]; unsigned int* results = new unsigned int[steps]; unsigned int* masks_cuda; unsigned int* l_masks_cuda; unsigned int* r_masks_cuda; unsigned int* results_cuda; cudaMalloc((void**) &masks_cuda, sizeof(int) * steps); cudaMalloc((void**) &l_masks_cuda, sizeof(int) * steps); cudaMalloc((void**) &r_masks_cuda, sizeof(int) * steps); cudaMalloc((void**) &results_cuda, sizeof(int) * steps / THREAD_NUM); const unsigned int t_mask = (1 << n) - 1; const unsigned int mark = n > 11 ? n - 10 : 2; long long total = 0; int total_conditions = 0; int i = 0, j; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; bool computed = false; for(j = 0; j < n / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } } if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } total *= 2; if(n % 2 == 1) { computed = false; total_conditions = 0; index = (1 << (n - 1) / 2); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } } cudaFree(masks_cuda); cudaFree(l_masks_cuda); cudaFree(r_masks_cuda); cudaFree(results_cuda); delete[] total_masks; delete[] total_l_masks; delete[] total_r_masks; delete[] results; bunk = 1; return total; } bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } int main(int argc, char** argv) { unsigned int hTimer; double gpuTime; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(cudaSetDevice(dev)); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); int n = 4;//11; clock_t start, end; long long solution; bool cpu = true, gpu = true; int argstart = 1, steps = 24576; if(argc >= 2 && argv[1][0] == '-') { if(argv[1][1] == 'c' || argv[1][1] == 'C') { gpu = false; } else if(argv[1][1] == 'g' || argv[1][1] == 'G') { cpu = false; } argstart = 2; } if(argc < argstart + 1) { printf("Usage: %s [-c|-g] n steps\n", argv[0]); printf(" -c: CPU only\n"); printf(" -g: GPU only\n"); printf(" n: n-queen\n"); printf(" steps: step for GPU\n"); printf("Default to 8 queen\n"); } else { n = atoi(argv[argstart]); if(n <= 1 || n > 32) { printf("Invalid n, n should be > 1 and <= 32\n"); printf("Note: n > 18 will require a very very long time to compute!\n"); return 0; } if(argc >= argstart + 2) { steps = atoi(argv[argstart + 1]); if(steps <= THREAD_NUM || steps % THREAD_NUM != 0) { printf("Invalid step, step should be multiple of %d\n", THREAD_NUM); return 0; } } } if(gpu) { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); } if(cpu) { CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); //start = clock(); solution = solve_nqueen(n); //solve_nqueen_mcpu(n); //solution = solve_nqueen(n); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("CPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } if(gpu) { //start = clock(); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); solution = solve_nqueen_cuda(n, steps); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("GPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } return 0; }
db1e511b46406f531773ccab939684f5c9dadcd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/optimizers/lars_momentum_op.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/kernels/funcs/aligned_vector.h" #include "paddle/phi/kernels/funcs/math_cuda_utils.h" #if TORCH_HIP_VERSION >= 11000 #include <hip/hip_cooperative_groups.h> #endif #ifdef __HIPCC__ #define LARS_BLOCK_SIZE 256 #else #define LARS_BLOCK_SIZE 512 #endif #define LARS_MAX_MERGED_OPS 60 namespace paddle { namespace operators { template <typename T> using MultiPrecisionType = typename phi::dtype::MPTypeTrait<T>::Type; __device__ __forceinline__ float Sqrt(float x) { return sqrtf(x); } __device__ __forceinline__ double Sqrt(double x) { return sqrt(x); } __device__ __forceinline__ float Fma(float x, float y, float z) { return fmaf(x, y, z); } __device__ __forceinline__ double Fma(double x, double y, double z) { return fma(x, y, z); } template <typename T> class LarsThreadConfig { public: int grid_for_norm; int grid_for_lars; #if TORCH_HIP_VERSION >= 11000 private: int grid_stride; public: explicit LarsThreadConfig(int64_t numel, int sm_num, int num_blocks_per_sm) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_lars = ::min(::min(sm_num * num_blocks_per_sm, grid), LARS_BLOCK_SIZE); grid_stride = LARS_BLOCK_SIZE * grid_for_lars; } int GetRepeatTimes(int64_t numel) { return (numel + grid_stride - 1) / grid_stride - 1; } #else int repeat_times; explicit LarsThreadConfig(const int64_t numel) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_norm = ::min(grid, LARS_BLOCK_SIZE); const int grid_stride = grid_for_norm * LARS_BLOCK_SIZE; repeat_times = (numel + grid_stride - 1) / grid_stride - 1; // Determine to read 4 fp16 or float data once, but 2 double data once. grid_for_lars = std::is_same<double, T>::value ? (numel + (LARS_BLOCK_SIZE << 1) - 1) / (LARS_BLOCK_SIZE << 1) : (numel + (LARS_BLOCK_SIZE << 2) - 1) / (LARS_BLOCK_SIZE << 2); } #endif }; template <typename T, typename MT, int VecSize, bool IsAmp = false> __device__ inline void VectorizeLarsUpdate(const T* __restrict__ grad, const MT* param, const MT* velocity, T* param_out, MT* velocity_out, const MT mu, MT local_lr, const MT lars_weight_decay, const MT rescale_grad, const int tid, const int grid_stride, const int numel, MT* master_param_out = nullptr) { using VecType = phi::AlignedVector<T, VecSize>; using VecMType = phi::AlignedVector<MT, VecSize>; int main = numel >> (VecSize >> 1); int tail_offset = main * VecSize; const VecType* grad_vec = reinterpret_cast<const VecType*>(grad); const VecMType* param_vec = reinterpret_cast<const VecMType*>(param); const VecMType* velocity_vec = reinterpret_cast<const VecMType*>(velocity); VecType* param_out_vec = reinterpret_cast<VecType*>(param_out); VecMType* velocity_out_vec = reinterpret_cast<VecMType*>(velocity_out); VecMType* master_param_out_vec; if (IsAmp) { master_param_out_vec = reinterpret_cast<VecMType*>(master_param_out); } for (int i = tid; i < main; i += grid_stride) { VecType param_out_tmp; VecMType velocity_tmp, param_tmp; VecType grad_data = grad_vec[i]; VecMType param_data = param_vec[i]; VecMType velocity_data = velocity_vec[i]; #pragma unroll for (int j = 0; j < VecSize; ++j) { MT grad_val = static_cast<MT>(grad_data[j]) * rescale_grad; velocity_tmp[j] = Fma(velocity_data[j], mu, local_lr * Fma(lars_weight_decay, param_data[j], grad_val)); param_tmp[j] = param_data[j] - velocity_tmp[j]; param_out_tmp[j] = static_cast<T>(param_tmp[j]); } param_out_vec[i] = param_out_tmp; velocity_out_vec[i] = velocity_tmp; if (IsAmp) { master_param_out_vec[i] = param_tmp; } } for (int i = tid + tail_offset; i < numel; i += grid_stride) { MT grad_val = static_cast<MT>(grad[i]) * rescale_grad; MT param_val = param[i]; MT velocity_tmp = Fma(velocity[i], mu, local_lr * Fma(lars_weight_decay, param_val, grad_val)); MT param_tmp = param_val - velocity_tmp; param_out[i] = static_cast<T>(param_tmp); velocity_out[i] = velocity_tmp; if (IsAmp) { master_param_out[i] = param_tmp; } } } #if TORCH_HIP_VERSION >= 11000 /* Once TORCH_HIP_VERSION is beyond 11, cooperative_groups can be involved in without --rdc=true compile flag, then L2_norm kernel can be set with __device__ and cooperative_groups::grid_group also can be involved. Otherwise, adding this flag may affect much, L2_norm kernel shall be set with __global__.*/ // TODO(limingshu): declaration of cooperative_groups wapper is invalid in host. template <typename T, typename MT> __forceinline__ __device__ void L2NormKernel( const cooperative_groups::grid_group* cg, #else template <typename T, typename MT> __global__ void L2NormKernel( #endif const T* p_data, const T* __restrict__ g_data, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int64_t numel, const int repeat_times, const MT rescale_grad, const int thresh = 0, MT* __restrict__ p_n = nullptr, MT* __restrict__ g_n = nullptr) { __shared__ MT s_buffer[2]; int tid = threadIdx.x + blockDim.x * blockIdx.x; int grid_stride = LARS_BLOCK_SIZE * gridDim.x; MT p_tmp = static_cast<MT>(0); MT g_tmp = static_cast<MT>(0); while (tid < numel) { MT tmp0 = static_cast<MT>(p_data[tid]); MT tmp1 = static_cast<MT>(g_data[tid]); p_tmp += (tmp0 * tmp0); g_tmp += (tmp1 * tmp1); tid += grid_stride; } p_tmp = phi::funcs::BlockReduceSum<MT>(p_tmp, FINAL_MASK); g_tmp = phi::funcs::BlockReduceSum<MT>(g_tmp, FINAL_MASK); if (threadIdx.x == 0) { p_buffer[blockIdx.x] = p_tmp; g_buffer[blockIdx.x] = g_tmp; } #if TORCH_HIP_VERSION >= 11000 cg->sync(); // Grid sync for writring partial result to gloabl memory MT p_part_sum = threadIdx.x < gridDim.x ? p_buffer[threadIdx.x] : 0; MT g_part_sum = threadIdx.x < gridDim.x ? g_buffer[threadIdx.x] : 0; MT tmp0 = phi::funcs::BlockReduceSum<MT>(p_part_sum, FINAL_MASK); MT tmp1 = phi::funcs::BlockReduceSum<MT>(g_part_sum, FINAL_MASK); if (threadIdx.x == 0) { s_buffer[0] = tmp0; s_buffer[1] = tmp1; } __syncthreads(); *p_n = Sqrt(s_buffer[0]); *g_n = rescale_grad * Sqrt(s_buffer[1]); #endif } template <typename T, typename MT> __forceinline__ __device__ void MomentumUpdate( const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, const MT mu, const MT lars_weight_decay, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const MT param_norm, const MT grad_norm, const int tid, const int grid_stride, const int64_t numel, const bool is_amp) { const MT lr = learning_rate[0]; MT local_lr = lr; if (param_norm > static_cast<MT>(0) && grad_norm > static_cast<MT>(0)) { local_lr = lr * lars_coeff * param_norm / (fma(lars_weight_decay, param_norm, grad_norm) + epsilon); } if (is_amp) { VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/true>(grad, master_param, velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel, master_param_out); } else { if (std::is_same<T, float>::value || std::is_same<T, paddle::platform::float16>::value) { /* TODO(limingshu): pointer cast may damage memory accessing for fp16 */ VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } else { VectorizeLarsUpdate<T, MT, /*VecSize=*/2, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } } } #if TORCH_HIP_VERSION >= 11000 template <typename T, typename MT> struct LarsParamWarpper { int64_t numel_arr[LARS_MAX_MERGED_OPS]; int repeat_arr[LARS_MAX_MERGED_OPS]; const T* __restrict__ g_arr[LARS_MAX_MERGED_OPS]; const MT* __restrict__ lr_arr[LARS_MAX_MERGED_OPS]; T* __restrict__ p_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ v_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ master_p_out_arr[LARS_MAX_MERGED_OPS]; MT weight_decay_arr[LARS_MAX_MERGED_OPS]; }; template <typename T, typename MT> __global__ void MergedMomentumLarsKernel(LarsParamWarpper<T, MT> lars_warpper, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int op_num, const MT mu, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const bool is_amp) { int grid_stride = gridDim.x * LARS_BLOCK_SIZE; int tid = threadIdx.x + blockIdx.x * blockDim.x; const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); for (int i = 0; i < op_num; ++i) { int numel = lars_warpper.numel_arr[i]; MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], p_buffer, g_buffer, numel, lars_warpper.repeat_arr[i], rescale_grad, 0, &param_norm, &grad_norm); MomentumUpdate<T, MT>(lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], lars_warpper.v_out_arr[i], lars_warpper.p_out_arr[i], lars_warpper.v_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.lr_arr[i], mu, lars_warpper.weight_decay_arr[i], lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } } #endif template <typename T, typename MT> __global__ void MomentumLarsKernel(const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const MT mu, const MT lars_coeff, const MT lars_weight_decay, const MT epsilon, const MT rescale_grad, const int repeat_times, const int thresh, const int64_t numel, const bool is_amp) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int grid_stride = gridDim.x * LARS_BLOCK_SIZE; #if TORCH_HIP_VERSION >= 11000 const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, param, grad, p_buffer, g_buffer, numel, repeat_times, rescale_grad, gridDim.x, &param_norm, &grad_norm); #else const MT rescale_grad_pow = rescale_grad * rescale_grad; MT param_part_norm = threadIdx.x < thresh ? p_buffer[threadIdx.x] : 0; MT grad_part_norm = threadIdx.x < thresh ? g_buffer[threadIdx.x] : 0; __syncthreads(); MT param_norm = Sqrt(phi::funcs::BlockReduceSum<MT>(param_part_norm, FINAL_MASK)); MT grad_norm = Sqrt(rescale_grad_pow * phi::funcs::BlockReduceSum<MT>( grad_part_norm, FINAL_MASK)); #endif MomentumUpdate<T, MT>(param, grad, velocity, param_out, velocity_out, master_param, master_param_out, learning_rate, mu, lars_weight_decay, lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } template <typename T, typename MT> inline void SeparatedLarsMomentumOpCUDAKernel(const phi::GPUContext& cuda_ctx, const T* param_data, T* param_out_data, const MT* velocity_data, MT* velocity_out_data, const T* grad_data, const MT* lr, MT* p_buffer, MT* g_buffer, const MT mu, const MT lars_coeff, const MT weight_decay, const MT epsilon, const MT rescale_grad, const int64_t numel, const MT* master_param_data, MT* master_out_data, const bool is_amp) { LarsThreadConfig<T> lars_thread_config(numel); hipLaunchKernelGGL(( L2NormKernel<T, MT>), dim3(lars_thread_config.grid_for_norm), dim3(LARS_BLOCK_SIZE), 0, cuda_ctx.stream(), param_data, grad_data, p_buffer, g_buffer, numel, lars_thread_config.repeat_times, rescale_grad); hipLaunchKernelGGL(( MomentumLarsKernel<T, MT>) , dim3(lars_thread_config.grid_for_lars), dim3(LARS_BLOCK_SIZE), 0, cuda_ctx.stream(), param_data, grad_data, velocity_data, param_out_data, velocity_out_data, master_param_data, master_out_data, lr, p_buffer, g_buffer, mu, lars_coeff, weight_decay, epsilon, rescale_grad, 0, lars_thread_config.grid_for_norm, numel, is_amp); } template <typename T, typename DeviceContext> class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> { using MT = MultiPrecisionType<T>; public: void Compute(const framework::ExecutionContext& ctx) const override { int num_blocks_per_sm = 0; bool multi_precision = ctx.Attr<bool>("multi_precision"); auto& cuda_ctx = ctx.template device_context<phi::GPUContext>(); int sm_num = cuda_ctx.GetSMCount(); phi::DenseTensor tmp_buffer_t = ctx.AllocateTmpTensor<MT, phi::GPUContext>( {LARS_BLOCK_SIZE << 1}, cuda_ctx); auto* p_buffer = tmp_buffer_t.mutable_data<MT>(ctx.GetPlace()); auto* g_buffer = p_buffer + LARS_BLOCK_SIZE; MT mu = static_cast<MT>(ctx.Attr<float>("mu")); MT lars_coeff = static_cast<MT>(ctx.Attr<float>("lars_coeff")); MT epsilon = static_cast<MT>(ctx.Attr<float>("epsilon")); MT rescale_grad = static_cast<MT>(ctx.Attr<float>("rescale_grad")); auto weight_decay_arr = ctx.Attr<std::vector<float>>("lars_weight_decay"); auto grad = ctx.MultiInput<phi::DenseTensor>("Grad"); auto param = ctx.MultiInput<phi::DenseTensor>("Param"); auto velocity = ctx.MultiInput<phi::DenseTensor>("Velocity"); auto param_out = ctx.MultiOutput<phi::DenseTensor>("ParamOut"); auto velocity_out = ctx.MultiOutput<phi::DenseTensor>("VelocityOut"); auto learning_rate = ctx.MultiInput<phi::DenseTensor>("LearningRate"); auto master_param = ctx.MultiInput<phi::DenseTensor>("MasterParam"); auto master_param_out = ctx.MultiOutput<phi::DenseTensor>("MasterParamOut"); int op_num = grad.size(); #if TORCH_HIP_VERSION >= 11000 if (op_num > 1) { LarsParamWarpper<T, MT> lars_warpper; PADDLE_ENFORCE_LT( op_num, LARS_MAX_MERGED_OPS, platform::errors::InvalidArgument( "The maximum number of merged-ops supported is (%d), but" "lars op required for trainning this model is (%d)\n", LARS_MAX_MERGED_OPS, op_num)); /* Implementation of lars optimizer consists of following two steps: 1. Figure out the L2 norm statistic result of grad data and param data. 2. Update param and velocity with usage of L2 norm statistic result. Step1 and step2 can be merged with api provided by nvida hipLaunchCooperativeKernel: - The thread quantity shall less than pyhsical SM limited threads - Launche as thread-block can synchronizlly execute. */ hipOccupancyMaxActiveBlocksPerMultiprocessor( &num_blocks_per_sm, MergedMomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); size_t total_numel = 0; for (int i = 0; i < op_num; ++i) { size_t temp_numel = param[i]->numel(); total_numel += temp_numel; lars_warpper.numel_arr[i] = temp_numel; lars_warpper.g_arr[i] = grad[i]->data<T>(); lars_warpper.lr_arr[i] = learning_rate[i]->data<MT>(); lars_warpper.p_out_arr[i] = param_out[i]->mutable_data<T>(ctx.GetPlace()); lars_warpper.v_out_arr[i] = velocity_out[i]->mutable_data<MT>(ctx.GetPlace()); lars_warpper.weight_decay_arr[i] = static_cast<MT>(weight_decay_arr[i]); PADDLE_ENFORCE_EQ( param[i]->data<T>(), lars_warpper.p_out_arr[i], platform::errors::InvalidArgument( "Input(Param) and Output(ParamOut) must be the same Tensors.")); PADDLE_ENFORCE_EQ(velocity[i]->data<MT>(), lars_warpper.v_out_arr[i], platform::errors::InvalidArgument( "Input(Velocity) and Output(VelocityOut) must be " "the same Tensors.")); } int64_t avg_numel = total_numel / op_num; LarsThreadConfig<float> lars_thread_config( avg_numel, sm_num, num_blocks_per_sm); for (int i = 0; i < op_num; ++i) { lars_warpper.repeat_arr[i] = lars_thread_config.GetRepeatTimes(lars_warpper.numel_arr[i]); } if (multi_precision) { for (int i = 0; i < op_num; ++i) { lars_warpper.master_p_out_arr[i] = master_param_out[i]->mutable_data<MT>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(master_param[i]->data<MT>(), lars_warpper.master_p_out_arr[i], platform::errors::InvalidArgument( "Input(MasterParam) and Output(MasterParamOut) " "must be the same Tensors.")); } } void* cuda_param[] = {reinterpret_cast<void*>(&lars_warpper), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&op_num), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads, and thead of each block synchronizedly cooperate. hipLaunchCooperativeKernel( reinterpret_cast<void*>(MergedMomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } else { auto* param_data = param[0]->data<T>(); auto* grad_data = grad[0]->data<T>(); auto* velocity_data = velocity[0]->data<MT>(); auto* lr = learning_rate[0]->data<MT>(); auto* param_out_data = param_out[0]->mutable_data<T>(ctx.GetPlace()); auto* velocity_out_data = velocity_out[0]->mutable_data<MT>(ctx.GetPlace()); const MT* master_param_data = multi_precision ? master_param[0]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[0]->mutable_data<MT>(ctx.GetPlace()) : nullptr; int64_t numel = param[0]->numel(); MT lars_weight_decay = weight_decay_arr[0]; // Figure out how many blocks can be active in each sm. hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, MomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); LarsThreadConfig<float> lars_thread_config( numel, sm_num, num_blocks_per_sm); int repeat_times = lars_thread_config.GetRepeatTimes(numel); int thresh = 0; void* cuda_param[] = { reinterpret_cast<void*>(&param_data), reinterpret_cast<void*>(&grad_data), reinterpret_cast<void*>(&velocity_data), reinterpret_cast<void*>(&param_out_data), reinterpret_cast<void*>(&velocity_out_data), reinterpret_cast<void*>(&master_param_data), reinterpret_cast<void*>(&master_param_out_data), reinterpret_cast<void*>(&lr), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&lars_weight_decay), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&repeat_times), reinterpret_cast<void*>(&thresh), // Just a placeholder reinterpret_cast<void*>(&numel), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads. hipLaunchCooperativeKernel( reinterpret_cast<void*>(MomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } #else for (int i = 0; i < op_num; ++i) { const MT* master_param_data = multi_precision ? master_param[i]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[i]->mutable_data<MT>(ctx.GetPlace()) : nullptr; SeparatedLarsMomentumOpCUDAKernel<T, MT>( cuda_ctx, param[i]->data<T>(), param_out[i]->mutable_data<T>(ctx.GetPlace()), velocity[i]->data<MT>(), velocity_out[i]->mutable_data<MT>(ctx.GetPlace()), grad[i]->data<T>(), learning_rate[i]->data<MT>(), p_buffer, g_buffer, mu, lars_coeff, weight_decay_arr[i], epsilon, rescale_grad, param[i]->numel(), master_param_data, master_param_out_data, multi_precision); } #endif } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; PD_REGISTER_STRUCT_KERNEL(lars_momentum, GPU, ALL_LAYOUT, ops::LarsMomentumOpCUDAKernel, float, double, plat::float16) {}
db1e511b46406f531773ccab939684f5c9dadcd9.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/optimizers/lars_momentum_op.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/kernels/funcs/aligned_vector.h" #include "paddle/phi/kernels/funcs/math_cuda_utils.h" #if CUDA_VERSION >= 11000 #include <cooperative_groups.h> #endif #ifdef __HIPCC__ #define LARS_BLOCK_SIZE 256 #else #define LARS_BLOCK_SIZE 512 #endif #define LARS_MAX_MERGED_OPS 60 namespace paddle { namespace operators { template <typename T> using MultiPrecisionType = typename phi::dtype::MPTypeTrait<T>::Type; __device__ __forceinline__ float Sqrt(float x) { return sqrtf(x); } __device__ __forceinline__ double Sqrt(double x) { return sqrt(x); } __device__ __forceinline__ float Fma(float x, float y, float z) { return fmaf(x, y, z); } __device__ __forceinline__ double Fma(double x, double y, double z) { return fma(x, y, z); } template <typename T> class LarsThreadConfig { public: int grid_for_norm; int grid_for_lars; #if CUDA_VERSION >= 11000 private: int grid_stride; public: explicit LarsThreadConfig(int64_t numel, int sm_num, int num_blocks_per_sm) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_lars = std::min(std::min(sm_num * num_blocks_per_sm, grid), LARS_BLOCK_SIZE); grid_stride = LARS_BLOCK_SIZE * grid_for_lars; } int GetRepeatTimes(int64_t numel) { return (numel + grid_stride - 1) / grid_stride - 1; } #else int repeat_times; explicit LarsThreadConfig(const int64_t numel) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_norm = std::min(grid, LARS_BLOCK_SIZE); const int grid_stride = grid_for_norm * LARS_BLOCK_SIZE; repeat_times = (numel + grid_stride - 1) / grid_stride - 1; // Determine to read 4 fp16 or float data once, but 2 double data once. grid_for_lars = std::is_same<double, T>::value ? (numel + (LARS_BLOCK_SIZE << 1) - 1) / (LARS_BLOCK_SIZE << 1) : (numel + (LARS_BLOCK_SIZE << 2) - 1) / (LARS_BLOCK_SIZE << 2); } #endif }; template <typename T, typename MT, int VecSize, bool IsAmp = false> __device__ inline void VectorizeLarsUpdate(const T* __restrict__ grad, const MT* param, const MT* velocity, T* param_out, MT* velocity_out, const MT mu, MT local_lr, const MT lars_weight_decay, const MT rescale_grad, const int tid, const int grid_stride, const int numel, MT* master_param_out = nullptr) { using VecType = phi::AlignedVector<T, VecSize>; using VecMType = phi::AlignedVector<MT, VecSize>; int main = numel >> (VecSize >> 1); int tail_offset = main * VecSize; const VecType* grad_vec = reinterpret_cast<const VecType*>(grad); const VecMType* param_vec = reinterpret_cast<const VecMType*>(param); const VecMType* velocity_vec = reinterpret_cast<const VecMType*>(velocity); VecType* param_out_vec = reinterpret_cast<VecType*>(param_out); VecMType* velocity_out_vec = reinterpret_cast<VecMType*>(velocity_out); VecMType* master_param_out_vec; if (IsAmp) { master_param_out_vec = reinterpret_cast<VecMType*>(master_param_out); } for (int i = tid; i < main; i += grid_stride) { VecType param_out_tmp; VecMType velocity_tmp, param_tmp; VecType grad_data = grad_vec[i]; VecMType param_data = param_vec[i]; VecMType velocity_data = velocity_vec[i]; #pragma unroll for (int j = 0; j < VecSize; ++j) { MT grad_val = static_cast<MT>(grad_data[j]) * rescale_grad; velocity_tmp[j] = Fma(velocity_data[j], mu, local_lr * Fma(lars_weight_decay, param_data[j], grad_val)); param_tmp[j] = param_data[j] - velocity_tmp[j]; param_out_tmp[j] = static_cast<T>(param_tmp[j]); } param_out_vec[i] = param_out_tmp; velocity_out_vec[i] = velocity_tmp; if (IsAmp) { master_param_out_vec[i] = param_tmp; } } for (int i = tid + tail_offset; i < numel; i += grid_stride) { MT grad_val = static_cast<MT>(grad[i]) * rescale_grad; MT param_val = param[i]; MT velocity_tmp = Fma(velocity[i], mu, local_lr * Fma(lars_weight_decay, param_val, grad_val)); MT param_tmp = param_val - velocity_tmp; param_out[i] = static_cast<T>(param_tmp); velocity_out[i] = velocity_tmp; if (IsAmp) { master_param_out[i] = param_tmp; } } } #if CUDA_VERSION >= 11000 /* Once CUDA_VERSION is beyond 11, cooperative_groups can be involved in without --rdc=true compile flag, then L2_norm kernel can be set with __device__ and cooperative_groups::grid_group also can be involved. Otherwise, adding this flag may affect much, L2_norm kernel shall be set with __global__.*/ // TODO(limingshu): declaration of cooperative_groups wapper is invalid in host. template <typename T, typename MT> __forceinline__ __device__ void L2NormKernel( const cooperative_groups::grid_group* cg, #else template <typename T, typename MT> __global__ void L2NormKernel( #endif const T* p_data, const T* __restrict__ g_data, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int64_t numel, const int repeat_times, const MT rescale_grad, const int thresh = 0, MT* __restrict__ p_n = nullptr, MT* __restrict__ g_n = nullptr) { __shared__ MT s_buffer[2]; int tid = threadIdx.x + blockDim.x * blockIdx.x; int grid_stride = LARS_BLOCK_SIZE * gridDim.x; MT p_tmp = static_cast<MT>(0); MT g_tmp = static_cast<MT>(0); while (tid < numel) { MT tmp0 = static_cast<MT>(p_data[tid]); MT tmp1 = static_cast<MT>(g_data[tid]); p_tmp += (tmp0 * tmp0); g_tmp += (tmp1 * tmp1); tid += grid_stride; } p_tmp = phi::funcs::BlockReduceSum<MT>(p_tmp, FINAL_MASK); g_tmp = phi::funcs::BlockReduceSum<MT>(g_tmp, FINAL_MASK); if (threadIdx.x == 0) { p_buffer[blockIdx.x] = p_tmp; g_buffer[blockIdx.x] = g_tmp; } #if CUDA_VERSION >= 11000 cg->sync(); // Grid sync for writring partial result to gloabl memory MT p_part_sum = threadIdx.x < gridDim.x ? p_buffer[threadIdx.x] : 0; MT g_part_sum = threadIdx.x < gridDim.x ? g_buffer[threadIdx.x] : 0; MT tmp0 = phi::funcs::BlockReduceSum<MT>(p_part_sum, FINAL_MASK); MT tmp1 = phi::funcs::BlockReduceSum<MT>(g_part_sum, FINAL_MASK); if (threadIdx.x == 0) { s_buffer[0] = tmp0; s_buffer[1] = tmp1; } __syncthreads(); *p_n = Sqrt(s_buffer[0]); *g_n = rescale_grad * Sqrt(s_buffer[1]); #endif } template <typename T, typename MT> __forceinline__ __device__ void MomentumUpdate( const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, const MT mu, const MT lars_weight_decay, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const MT param_norm, const MT grad_norm, const int tid, const int grid_stride, const int64_t numel, const bool is_amp) { const MT lr = learning_rate[0]; MT local_lr = lr; if (param_norm > static_cast<MT>(0) && grad_norm > static_cast<MT>(0)) { local_lr = lr * lars_coeff * param_norm / (fma(lars_weight_decay, param_norm, grad_norm) + epsilon); } if (is_amp) { VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/true>(grad, master_param, velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel, master_param_out); } else { if (std::is_same<T, float>::value || std::is_same<T, paddle::platform::float16>::value) { /* TODO(limingshu): pointer cast may damage memory accessing for fp16 */ VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } else { VectorizeLarsUpdate<T, MT, /*VecSize=*/2, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } } } #if CUDA_VERSION >= 11000 template <typename T, typename MT> struct LarsParamWarpper { int64_t numel_arr[LARS_MAX_MERGED_OPS]; int repeat_arr[LARS_MAX_MERGED_OPS]; const T* __restrict__ g_arr[LARS_MAX_MERGED_OPS]; const MT* __restrict__ lr_arr[LARS_MAX_MERGED_OPS]; T* __restrict__ p_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ v_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ master_p_out_arr[LARS_MAX_MERGED_OPS]; MT weight_decay_arr[LARS_MAX_MERGED_OPS]; }; template <typename T, typename MT> __global__ void MergedMomentumLarsKernel(LarsParamWarpper<T, MT> lars_warpper, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int op_num, const MT mu, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const bool is_amp) { int grid_stride = gridDim.x * LARS_BLOCK_SIZE; int tid = threadIdx.x + blockIdx.x * blockDim.x; const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); for (int i = 0; i < op_num; ++i) { int numel = lars_warpper.numel_arr[i]; MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], p_buffer, g_buffer, numel, lars_warpper.repeat_arr[i], rescale_grad, 0, &param_norm, &grad_norm); MomentumUpdate<T, MT>(lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], lars_warpper.v_out_arr[i], lars_warpper.p_out_arr[i], lars_warpper.v_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.lr_arr[i], mu, lars_warpper.weight_decay_arr[i], lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } } #endif template <typename T, typename MT> __global__ void MomentumLarsKernel(const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const MT mu, const MT lars_coeff, const MT lars_weight_decay, const MT epsilon, const MT rescale_grad, const int repeat_times, const int thresh, const int64_t numel, const bool is_amp) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int grid_stride = gridDim.x * LARS_BLOCK_SIZE; #if CUDA_VERSION >= 11000 const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, param, grad, p_buffer, g_buffer, numel, repeat_times, rescale_grad, gridDim.x, &param_norm, &grad_norm); #else const MT rescale_grad_pow = rescale_grad * rescale_grad; MT param_part_norm = threadIdx.x < thresh ? p_buffer[threadIdx.x] : 0; MT grad_part_norm = threadIdx.x < thresh ? g_buffer[threadIdx.x] : 0; __syncthreads(); MT param_norm = Sqrt(phi::funcs::BlockReduceSum<MT>(param_part_norm, FINAL_MASK)); MT grad_norm = Sqrt(rescale_grad_pow * phi::funcs::BlockReduceSum<MT>( grad_part_norm, FINAL_MASK)); #endif MomentumUpdate<T, MT>(param, grad, velocity, param_out, velocity_out, master_param, master_param_out, learning_rate, mu, lars_weight_decay, lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } template <typename T, typename MT> inline void SeparatedLarsMomentumOpCUDAKernel(const phi::GPUContext& cuda_ctx, const T* param_data, T* param_out_data, const MT* velocity_data, MT* velocity_out_data, const T* grad_data, const MT* lr, MT* p_buffer, MT* g_buffer, const MT mu, const MT lars_coeff, const MT weight_decay, const MT epsilon, const MT rescale_grad, const int64_t numel, const MT* master_param_data, MT* master_out_data, const bool is_amp) { LarsThreadConfig<T> lars_thread_config(numel); L2NormKernel<T, MT><<<lars_thread_config.grid_for_norm, LARS_BLOCK_SIZE, 0, cuda_ctx.stream()>>>(param_data, grad_data, p_buffer, g_buffer, numel, lars_thread_config.repeat_times, rescale_grad); MomentumLarsKernel<T, MT> <<<lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, 0, cuda_ctx.stream()>>>(param_data, grad_data, velocity_data, param_out_data, velocity_out_data, master_param_data, master_out_data, lr, p_buffer, g_buffer, mu, lars_coeff, weight_decay, epsilon, rescale_grad, 0, lars_thread_config.grid_for_norm, numel, is_amp); } template <typename T, typename DeviceContext> class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> { using MT = MultiPrecisionType<T>; public: void Compute(const framework::ExecutionContext& ctx) const override { int num_blocks_per_sm = 0; bool multi_precision = ctx.Attr<bool>("multi_precision"); auto& cuda_ctx = ctx.template device_context<phi::GPUContext>(); int sm_num = cuda_ctx.GetSMCount(); phi::DenseTensor tmp_buffer_t = ctx.AllocateTmpTensor<MT, phi::GPUContext>( {LARS_BLOCK_SIZE << 1}, cuda_ctx); auto* p_buffer = tmp_buffer_t.mutable_data<MT>(ctx.GetPlace()); auto* g_buffer = p_buffer + LARS_BLOCK_SIZE; MT mu = static_cast<MT>(ctx.Attr<float>("mu")); MT lars_coeff = static_cast<MT>(ctx.Attr<float>("lars_coeff")); MT epsilon = static_cast<MT>(ctx.Attr<float>("epsilon")); MT rescale_grad = static_cast<MT>(ctx.Attr<float>("rescale_grad")); auto weight_decay_arr = ctx.Attr<std::vector<float>>("lars_weight_decay"); auto grad = ctx.MultiInput<phi::DenseTensor>("Grad"); auto param = ctx.MultiInput<phi::DenseTensor>("Param"); auto velocity = ctx.MultiInput<phi::DenseTensor>("Velocity"); auto param_out = ctx.MultiOutput<phi::DenseTensor>("ParamOut"); auto velocity_out = ctx.MultiOutput<phi::DenseTensor>("VelocityOut"); auto learning_rate = ctx.MultiInput<phi::DenseTensor>("LearningRate"); auto master_param = ctx.MultiInput<phi::DenseTensor>("MasterParam"); auto master_param_out = ctx.MultiOutput<phi::DenseTensor>("MasterParamOut"); int op_num = grad.size(); #if CUDA_VERSION >= 11000 if (op_num > 1) { LarsParamWarpper<T, MT> lars_warpper; PADDLE_ENFORCE_LT( op_num, LARS_MAX_MERGED_OPS, platform::errors::InvalidArgument( "The maximum number of merged-ops supported is (%d), but" "lars op required for trainning this model is (%d)\n", LARS_MAX_MERGED_OPS, op_num)); /* Implementation of lars optimizer consists of following two steps: 1. Figure out the L2 norm statistic result of grad data and param data. 2. Update param and velocity with usage of L2 norm statistic result. Step1 and step2 can be merged with api provided by nvida cudaLaunchCooperativeKernel: - The thread quantity shall less than pyhsical SM limited threads - Launche as thread-block can synchronizlly execute. */ cudaOccupancyMaxActiveBlocksPerMultiprocessor( &num_blocks_per_sm, MergedMomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); size_t total_numel = 0; for (int i = 0; i < op_num; ++i) { size_t temp_numel = param[i]->numel(); total_numel += temp_numel; lars_warpper.numel_arr[i] = temp_numel; lars_warpper.g_arr[i] = grad[i]->data<T>(); lars_warpper.lr_arr[i] = learning_rate[i]->data<MT>(); lars_warpper.p_out_arr[i] = param_out[i]->mutable_data<T>(ctx.GetPlace()); lars_warpper.v_out_arr[i] = velocity_out[i]->mutable_data<MT>(ctx.GetPlace()); lars_warpper.weight_decay_arr[i] = static_cast<MT>(weight_decay_arr[i]); PADDLE_ENFORCE_EQ( param[i]->data<T>(), lars_warpper.p_out_arr[i], platform::errors::InvalidArgument( "Input(Param) and Output(ParamOut) must be the same Tensors.")); PADDLE_ENFORCE_EQ(velocity[i]->data<MT>(), lars_warpper.v_out_arr[i], platform::errors::InvalidArgument( "Input(Velocity) and Output(VelocityOut) must be " "the same Tensors.")); } int64_t avg_numel = total_numel / op_num; LarsThreadConfig<float> lars_thread_config( avg_numel, sm_num, num_blocks_per_sm); for (int i = 0; i < op_num; ++i) { lars_warpper.repeat_arr[i] = lars_thread_config.GetRepeatTimes(lars_warpper.numel_arr[i]); } if (multi_precision) { for (int i = 0; i < op_num; ++i) { lars_warpper.master_p_out_arr[i] = master_param_out[i]->mutable_data<MT>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(master_param[i]->data<MT>(), lars_warpper.master_p_out_arr[i], platform::errors::InvalidArgument( "Input(MasterParam) and Output(MasterParamOut) " "must be the same Tensors.")); } } void* cuda_param[] = {reinterpret_cast<void*>(&lars_warpper), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&op_num), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads, and thead of each block synchronizedly cooperate. cudaLaunchCooperativeKernel( reinterpret_cast<void*>(MergedMomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } else { auto* param_data = param[0]->data<T>(); auto* grad_data = grad[0]->data<T>(); auto* velocity_data = velocity[0]->data<MT>(); auto* lr = learning_rate[0]->data<MT>(); auto* param_out_data = param_out[0]->mutable_data<T>(ctx.GetPlace()); auto* velocity_out_data = velocity_out[0]->mutable_data<MT>(ctx.GetPlace()); const MT* master_param_data = multi_precision ? master_param[0]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[0]->mutable_data<MT>(ctx.GetPlace()) : nullptr; int64_t numel = param[0]->numel(); MT lars_weight_decay = weight_decay_arr[0]; // Figure out how many blocks can be active in each sm. cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, MomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); LarsThreadConfig<float> lars_thread_config( numel, sm_num, num_blocks_per_sm); int repeat_times = lars_thread_config.GetRepeatTimes(numel); int thresh = 0; void* cuda_param[] = { reinterpret_cast<void*>(&param_data), reinterpret_cast<void*>(&grad_data), reinterpret_cast<void*>(&velocity_data), reinterpret_cast<void*>(&param_out_data), reinterpret_cast<void*>(&velocity_out_data), reinterpret_cast<void*>(&master_param_data), reinterpret_cast<void*>(&master_param_out_data), reinterpret_cast<void*>(&lr), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&lars_weight_decay), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&repeat_times), reinterpret_cast<void*>(&thresh), // Just a placeholder reinterpret_cast<void*>(&numel), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads. cudaLaunchCooperativeKernel( reinterpret_cast<void*>(MomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } #else for (int i = 0; i < op_num; ++i) { const MT* master_param_data = multi_precision ? master_param[i]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[i]->mutable_data<MT>(ctx.GetPlace()) : nullptr; SeparatedLarsMomentumOpCUDAKernel<T, MT>( cuda_ctx, param[i]->data<T>(), param_out[i]->mutable_data<T>(ctx.GetPlace()), velocity[i]->data<MT>(), velocity_out[i]->mutable_data<MT>(ctx.GetPlace()), grad[i]->data<T>(), learning_rate[i]->data<MT>(), p_buffer, g_buffer, mu, lars_coeff, weight_decay_arr[i], epsilon, rescale_grad, param[i]->numel(), master_param_data, master_param_out_data, multi_precision); } #endif } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; PD_REGISTER_STRUCT_KERNEL(lars_momentum, GPU, ALL_LAYOUT, ops::LarsMomentumOpCUDAKernel, float, double, plat::float16) {}
57d8ff4f84edac8a093814d07b7c69e006fe9515.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel_generatePoints( hiprandState_t * globalState, int* counts, int totalNumThreads) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; float x,y; if(index >= totalNumThreads){ return; } hiprandState_t localState = globalState[index]; for(int i = 0; i < NUM_POINTS_PER_THREAD; i++) { x = hiprand_uniform( &localState); y = hiprand_uniform( &localState); if(x*x+y*y <=1){ counts[index]++; } } globalState[index] = localState; }
57d8ff4f84edac8a093814d07b7c69e006fe9515.cu
#include "includes.h" __global__ void kernel_generatePoints( curandState * globalState, int* counts, int totalNumThreads) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; float x,y; if(index >= totalNumThreads){ return; } curandState localState = globalState[index]; for(int i = 0; i < NUM_POINTS_PER_THREAD; i++) { x = curand_uniform( &localState); y = curand_uniform( &localState); if(x*x+y*y <=1){ counts[index]++; } } globalState[index] = localState; }
40e084b6270fddaa02c6ff11b5f78f831ceb7b5c.hip
// !!! This is a file automatically generated by hipify!!! /* The code here isn't in its working stage. Moreover I am limited with the size of image that I can work with, if I use the following technique. Need to work around the size of kernel too. FIXME: Refine this appreach before using it. */ #include "assert.h" #include "hip/hip_runtime.h" #include "api.h" #define KERNEL_RADIUS 8 #define KERNEL_WIDTH ( 2 * KERNEL_RADIUS + 1 ) // +1 is there because I need to keep in mind the current pixel being processed upon. __device__ __constant__ float kernel[KERNEL_WIDTH]; #define ROW_TILE_WIDTH 128 // Just for referenece ROW_TILE_HEIGHT = 1 #define COLUMN_TILE_WIDTH 16 #define COLUMN_TILE_HEIGHT 48 #define KERNEL_RADIUS_ALIGNED 16 #define MUL(a, b) __mul24(a, b) // It provides additional speedup for multiplications. /////////////////////////////////// Loop unroll templates /////////////////////////////////////// /////////////////////////// try and use #pragma unroll instead ////////////////////////////////// template<int i> __device__ float unrollRow(float *data) { return data[KERNEL_RADIUS - i] * kernel[i] + unrollRow<i - 1>(data); } template<> __device__ float unrollRow<-1>(float *data) { return 0; } template<int i> __device__ float unrollColumn(float *data) { return data[(KERNEL_RADIUS - i) * COLUMN_TILE_WIDTH] * kernel[i] + unrollColumn<i - 1>(data); } template<> __device__ float unrollColumn<-1>(float *data) { return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////// Row convultion ///////////////////////////////////////////// __global__ void row_convolution( gpu_context *ctx, (float *)input, (float *)result ) { __shared__ float smem[KERNEL_RADIUS + ROW_TILE_WIDTH + KERNEL_RADIUS]; // Apron are the extra pixels that are needed to calculate pixel values for input pixels near the border. int tile_start = MUL( blockIdx.x, ROW_TILE_WIDTH); int tile_end = tile_start + ROW_TILE_WIDTH - 1; int apron_start = tile_start - KERNEL_RADIUS; int apron_start_aligned = tile_start - KERNEL_RADIUS_ALIGNED; int apron_end = tile_end + KERNEL_RADIUS; //There may be cases when the apron goes beyond the image borders i.e when consider pixels at the borders. //In those cases just clamp the pixels to the pixels at the border. int tile_end_clamped = max( tile_end, ctx->width - 1); int apron_start_clamped = min( apron_start, 0); int apron_end_clamped = max( apron_end, ctx->height - 1); // Calculating the x and y offset of the current block int yo = MUL( blockIdx.y, ctx->width); // FIXME: Verify this width paramemter. It can be faulty. int x_input = apron_start_aligned + threadIdx.x; // x offst in the input buffer // We need to have inactive threads at the start // (they are made just to align the kernel radius to half warp) if( x_input >= apron_start ) { int x_shared = x_input - apron_start; // The position inside the shared memory. This will eventually lead to x_shared = x - 8. Which is observable from the fact // that shared memory doesnt insclude kernel_radius_aligned. smem[x_shared] = ((x_input >= apron_start_clamped) && (x_input <= apron_end_clamped)) ? input[x_input + yo] : 0; } __syncthreads(); //x offset in the result buffer int x_result = tile_start + threadIdx.x; if( x_result < tile_end_clamped) { int x_shared2 = x_result - apron_start; float sum = 0; #ifdef UNROLL_INNER sum = unrollRow<2 * KERNEL_RADIUS>(smem + x_shared2); #else for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS ; k++) sum += smem[x_shared2 + k] * kernel[KERNEL_RADIUS - k]; #endif result[yo + x_result] = sum; // yo is the y offset to the input and result buffer. } } /////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////// Column Convolution ///////////////////////////////////////////// __global__ void column_convolution( gpu_context *ctx, (float *)input, (float *)result, int shared_stride, int global_stride) { __shared__ float smem[COLUMN_TILE_WIDTH * (KERNEL_RADIUS + COLUMN_TILE_HEIGHT + KERNEL_RADIUS)]; /////// Calculating the starting and ending indices //////// int tile_start = MUL( blockIdx.y, COLUMN_TILE_HEIGHT); int tile_end = tile_start + COLUMN_TILE_HEIGHT - 1; int apron_start = tile_start - KERNEL_RADIUS; int apron_end = tile_end + KERNEL_RADIUS; /////// Clamping the indices to image borders //////// int tile_end_clamped = min( tile_end, ctx->height); int apron_start_clamped = max( apron_start, 0); int apron_end_clamped = min( apron_end, ctx->height - 1); int xpos = MUL( blockIdx.x, COLUMN_TILE_WIDTH) + threadIdx.x; //// Calculating the corresponding global and shared memory position ///// int shared_pos = MUL( threadIdx.y, COLUMN_TILE_WIDTH) + threadIdx.x; int global_pos = MUL( apron_start + threadIdx.y, ctx->width) + xpos; //// Filling of the shared memory //// #pragma unroll for(int y = apron_start + threadIdx.y; y<= apron_end; y += blockDim.y) { smem[shared_pos] = ((y >= apron_start_clamped) && (y <= apron_end_clamped)) ? input[global_pos] : 0; shared_pos += shared_stride; global_pos += global_stride; } __syncthreads(); /////// The global and shared positions excluding the apron pixels /////// shared_pos = MUL(threadIdx.y + KERNEL_RADIUS, COLUMN_TILE_WIDTH) + threadIdx.x; global_pos = MUL(threadIdx.y + tile_start, ctx->width) + xpos; for(int y = threadIdx.y; y <= tile_end_clamped ; y += blockDim.y ) { float sum = 0; #ifdef UNROLL_INNER sum = unrollColumn<2 * KERNEL_RADIUS>(kernel + shared_pos); #else for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS ; k++) sum += smem[ shared_pos + MUL( k, COLUMN_TILE_WIDTH) ] * kernel[KERNEL_RADIUS - k]; #endif result[global_pos] = sum; shared_pos += shared_stride; global_pos += global_stride; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////// Wrapper combinig the row and column convolution ///////////////////////////////// gpu_error_t gpu_blur(gpu_context *ctx) { float *input; float *result; float *h_Kernel; h_Kernel = (float *)malloc((KERNEL_WIDTH * sizeof(float))); hipMalloc( (void **)&input, (ctx->width * ctx->height * sizeof(float))); hipMalloc( (void **)&result, (ctx->width * ctx->height * sizeof(float))); float kernelSum = 0; for(i = 0; i < KERNEL_WIDTH; i++) { float dist = (float)(i - KERNEL_RADIUS) / (float)KERNEL_RADIUS; h_Kernel[i] = expf(- dist * dist / 2); kernelSum += h_Kernel[i]; } for(i = 0; i < KERNEL_WIDTH; i++) h_Kernel[i] /= kernelSum; hipMemcpyToSymbol( kernel, h_Kernel, (KERNEL_WIDTH * sizeof(float)) ); for(int i=0; i < ctx->width * ctx->height ;i++) { input[i] = (float)ctx->output_buffer_1[i]; } int temp1 = (ctx->width % ROW_TILE_WIDTH != 0) ? (ctx->width / ROW_TILE_WIDTH + 1) : (ctx->width / ROW_TILE_WIDTH) ; int temp2 = (ctx->width % COLUMN_TILE_WIDTH != 0) ? (ctx->width / COLUMN_TILE_WIDTH + 1) : (ctx->width / COLUMN_TILE_WIDTH); int temp3 = (ctx->height % COLUMN_TILE_HEIGHT != 0) ? (ctx->height / COLUMN_TILE_HEIGHT + 1) : (ctx->height / COLUMN_TILE_HEIGHT); dim3 row_threads(KERNEL_RADIUS_ALIGNED + ROW_TILE_WIDTH + KERNEL_RADIUS); dim3 row_blocks(temp1,ctx->height); dim3 column_threads(COLUMN_TILE_WIDTH, 8); dim3 column_blocks( temp2, temp3); hipDeviceSynchronize(); hipLaunchKernelGGL(( row_convolution), dim3(row_blocks),dim3(row_threads), 0, 0, ctx, input, result); hipLaunchKernelGGL(( column_convolution), dim3(column_blocks),dim3(column_threads), 0, 0, ctx, input, result, COLUMN_TILE_WIDTH * column_threads.y, ctx->width * column_threads.y); for(int i=0; i < ctx->width * ctx->height ;i++) { ctx->output_buffer_1[i] = (unsigned char)result[i]; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////
40e084b6270fddaa02c6ff11b5f78f831ceb7b5c.cu
/* The code here isn't in its working stage. Moreover I am limited with the size of image that I can work with, if I use the following technique. Need to work around the size of kernel too. FIXME: Refine this appreach before using it. */ #include "assert.h" #include "cuda.h" #include "api.h" #define KERNEL_RADIUS 8 #define KERNEL_WIDTH ( 2 * KERNEL_RADIUS + 1 ) // +1 is there because I need to keep in mind the current pixel being processed upon. __device__ __constant__ float kernel[KERNEL_WIDTH]; #define ROW_TILE_WIDTH 128 // Just for referenece ROW_TILE_HEIGHT = 1 #define COLUMN_TILE_WIDTH 16 #define COLUMN_TILE_HEIGHT 48 #define KERNEL_RADIUS_ALIGNED 16 #define MUL(a, b) __mul24(a, b) // It provides additional speedup for multiplications. /////////////////////////////////// Loop unroll templates /////////////////////////////////////// /////////////////////////// try and use #pragma unroll instead ////////////////////////////////// template<int i> __device__ float unrollRow(float *data) { return data[KERNEL_RADIUS - i] * kernel[i] + unrollRow<i - 1>(data); } template<> __device__ float unrollRow<-1>(float *data) { return 0; } template<int i> __device__ float unrollColumn(float *data) { return data[(KERNEL_RADIUS - i) * COLUMN_TILE_WIDTH] * kernel[i] + unrollColumn<i - 1>(data); } template<> __device__ float unrollColumn<-1>(float *data) { return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////// Row convultion ///////////////////////////////////////////// __global__ void row_convolution( gpu_context *ctx, (float *)input, (float *)result ) { __shared__ float smem[KERNEL_RADIUS + ROW_TILE_WIDTH + KERNEL_RADIUS]; // Apron are the extra pixels that are needed to calculate pixel values for input pixels near the border. int tile_start = MUL( blockIdx.x, ROW_TILE_WIDTH); int tile_end = tile_start + ROW_TILE_WIDTH - 1; int apron_start = tile_start - KERNEL_RADIUS; int apron_start_aligned = tile_start - KERNEL_RADIUS_ALIGNED; int apron_end = tile_end + KERNEL_RADIUS; //There may be cases when the apron goes beyond the image borders i.e when consider pixels at the borders. //In those cases just clamp the pixels to the pixels at the border. int tile_end_clamped = max( tile_end, ctx->width - 1); int apron_start_clamped = min( apron_start, 0); int apron_end_clamped = max( apron_end, ctx->height - 1); // Calculating the x and y offset of the current block int yo = MUL( blockIdx.y, ctx->width); // FIXME: Verify this width paramemter. It can be faulty. int x_input = apron_start_aligned + threadIdx.x; // x offst in the input buffer // We need to have inactive threads at the start // (they are made just to align the kernel radius to half warp) if( x_input >= apron_start ) { int x_shared = x_input - apron_start; // The position inside the shared memory. This will eventually lead to x_shared = x - 8. Which is observable from the fact // that shared memory doesnt insclude kernel_radius_aligned. smem[x_shared] = ((x_input >= apron_start_clamped) && (x_input <= apron_end_clamped)) ? input[x_input + yo] : 0; } __syncthreads(); //x offset in the result buffer int x_result = tile_start + threadIdx.x; if( x_result < tile_end_clamped) { int x_shared2 = x_result - apron_start; float sum = 0; #ifdef UNROLL_INNER sum = unrollRow<2 * KERNEL_RADIUS>(smem + x_shared2); #else for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS ; k++) sum += smem[x_shared2 + k] * kernel[KERNEL_RADIUS - k]; #endif result[yo + x_result] = sum; // yo is the y offset to the input and result buffer. } } /////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////// Column Convolution ///////////////////////////////////////////// __global__ void column_convolution( gpu_context *ctx, (float *)input, (float *)result, int shared_stride, int global_stride) { __shared__ float smem[COLUMN_TILE_WIDTH * (KERNEL_RADIUS + COLUMN_TILE_HEIGHT + KERNEL_RADIUS)]; /////// Calculating the starting and ending indices //////// int tile_start = MUL( blockIdx.y, COLUMN_TILE_HEIGHT); int tile_end = tile_start + COLUMN_TILE_HEIGHT - 1; int apron_start = tile_start - KERNEL_RADIUS; int apron_end = tile_end + KERNEL_RADIUS; /////// Clamping the indices to image borders //////// int tile_end_clamped = min( tile_end, ctx->height); int apron_start_clamped = max( apron_start, 0); int apron_end_clamped = min( apron_end, ctx->height - 1); int xpos = MUL( blockIdx.x, COLUMN_TILE_WIDTH) + threadIdx.x; //// Calculating the corresponding global and shared memory position ///// int shared_pos = MUL( threadIdx.y, COLUMN_TILE_WIDTH) + threadIdx.x; int global_pos = MUL( apron_start + threadIdx.y, ctx->width) + xpos; //// Filling of the shared memory //// #pragma unroll for(int y = apron_start + threadIdx.y; y<= apron_end; y += blockDim.y) { smem[shared_pos] = ((y >= apron_start_clamped) && (y <= apron_end_clamped)) ? input[global_pos] : 0; shared_pos += shared_stride; global_pos += global_stride; } __syncthreads(); /////// The global and shared positions excluding the apron pixels /////// shared_pos = MUL(threadIdx.y + KERNEL_RADIUS, COLUMN_TILE_WIDTH) + threadIdx.x; global_pos = MUL(threadIdx.y + tile_start, ctx->width) + xpos; for(int y = threadIdx.y; y <= tile_end_clamped ; y += blockDim.y ) { float sum = 0; #ifdef UNROLL_INNER sum = unrollColumn<2 * KERNEL_RADIUS>(kernel + shared_pos); #else for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS ; k++) sum += smem[ shared_pos + MUL( k, COLUMN_TILE_WIDTH) ] * kernel[KERNEL_RADIUS - k]; #endif result[global_pos] = sum; shared_pos += shared_stride; global_pos += global_stride; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////// Wrapper combinig the row and column convolution ///////////////////////////////// gpu_error_t gpu_blur(gpu_context *ctx) { float *input; float *result; float *h_Kernel; h_Kernel = (float *)malloc((KERNEL_WIDTH * sizeof(float))); cudaMalloc( (void **)&input, (ctx->width * ctx->height * sizeof(float))); cudaMalloc( (void **)&result, (ctx->width * ctx->height * sizeof(float))); float kernelSum = 0; for(i = 0; i < KERNEL_WIDTH; i++) { float dist = (float)(i - KERNEL_RADIUS) / (float)KERNEL_RADIUS; h_Kernel[i] = expf(- dist * dist / 2); kernelSum += h_Kernel[i]; } for(i = 0; i < KERNEL_WIDTH; i++) h_Kernel[i] /= kernelSum; cudaMemcpyToSymbol( kernel, h_Kernel, (KERNEL_WIDTH * sizeof(float)) ); for(int i=0; i < ctx->width * ctx->height ;i++) { input[i] = (float)ctx->output_buffer_1[i]; } int temp1 = (ctx->width % ROW_TILE_WIDTH != 0) ? (ctx->width / ROW_TILE_WIDTH + 1) : (ctx->width / ROW_TILE_WIDTH) ; int temp2 = (ctx->width % COLUMN_TILE_WIDTH != 0) ? (ctx->width / COLUMN_TILE_WIDTH + 1) : (ctx->width / COLUMN_TILE_WIDTH); int temp3 = (ctx->height % COLUMN_TILE_HEIGHT != 0) ? (ctx->height / COLUMN_TILE_HEIGHT + 1) : (ctx->height / COLUMN_TILE_HEIGHT); dim3 row_threads(KERNEL_RADIUS_ALIGNED + ROW_TILE_WIDTH + KERNEL_RADIUS); dim3 row_blocks(temp1,ctx->height); dim3 column_threads(COLUMN_TILE_WIDTH, 8); dim3 column_blocks( temp2, temp3); cudaThreadSynchronize(); row_convolution<<<row_blocks,row_threads>>>( ctx, input, result); column_convolution<<<column_blocks,column_threads>>>(ctx, input, result, COLUMN_TILE_WIDTH * column_threads.y, ctx->width * column_threads.y); for(int i=0; i < ctx->width * ctx->height ;i++) { ctx->output_buffer_1[i] = (unsigned char)result[i]; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////
dd01e666f3ed3ec2250f8e9376520a679c96468f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* simple.cu */ /****************************************************************************/ /* */ /* (C) 2010 Texas Advanced Computing Center. */ /* */ /* For information, contact Frank Willmore: [email protected] */ /* */ /* Shareable in accordance with TACC and University of Texas policies. */ /* */ /****************************************************************************/ #include <stdio.h> __global__ void kernel() { int idx; int array[4]; idx = threadIdx.x; array[0] = idx++; array[1] = idx++; array[2] = idx++; array[3] = idx++; idx++; idx++; idx++; idx++; } int main() { dim3 dimGrid(1); dim3 dimBlock(64); hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ); hipDeviceSynchronize(); // block until the device has completed hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("%s\n", hipGetErrorString(err)); }
dd01e666f3ed3ec2250f8e9376520a679c96468f.cu
/* simple.cu */ /****************************************************************************/ /* */ /* (C) 2010 Texas Advanced Computing Center. */ /* */ /* For information, contact Frank Willmore: [email protected] */ /* */ /* Shareable in accordance with TACC and University of Texas policies. */ /* */ /****************************************************************************/ #include <stdio.h> __global__ void kernel() { int idx; int array[4]; idx = threadIdx.x; array[0] = idx++; array[1] = idx++; array[2] = idx++; array[3] = idx++; idx++; idx++; idx++; idx++; } int main() { dim3 dimGrid(1); dim3 dimBlock(64); kernel<<< dimGrid, dimBlock>>>(); cudaThreadSynchronize(); // block until the device has completed cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err)); }
4a4c993118cb90c71614f74592ec8f29cf90a5a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/grid_sample_grad_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/gpu/grid_sample_utils.h" namespace phi { template <typename T> static __forceinline__ __device__ void AtomicAdd( T* data, int h, int w, int sH, int sW, int H, int W, T delta) { if (InBounds(h, w, H, W)) { paddle::platform::CudaAtomicAdd(data + h * sH + w * sW, delta); } } template <typename T> static __forceinline__ __device__ void AtomicAdd3D(T* data, int d, int h, int w, int sD, int sH, int sW, int D, int H, int W, T delta) { if (InBounds3D(d, h, w, D, H, W)) { paddle::platform::CudaAtomicAdd(data + d * sD + h * sH + w * sW, delta); } } template <typename T> static __forceinline__ __device__ T UnnormalizeWithMask(T coord, int size, bool align_corners, T* grad_in) { if (align_corners) { *grad_in = static_cast<T>(size - 1) / 2; return ((coord + 1.f) / 2) * (size - 1); } else { *grad_in = static_cast<T>(size) / 2; return ((coord + 1.f) * size - 1) / 2; } } template <typename T> static __forceinline__ __device__ T ClipIndexesWithMask(T in, int clip_limit, T* grad_in) { if (in <= static_cast<T>(0)) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } else { T max = static_cast<T>(clip_limit - 1); if (in >= max) { *grad_in = static_cast<T>(0); return max; } else { *grad_in = static_cast<T>(1); return in; } } } template <typename T> static __forceinline__ __device__ T ReflectIndexesWithMask(T in, int twice_low, int twice_high, T* grad_in) { if (twice_low == twice_high) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } int grad_in_mult_; T min = static_cast<T>(twice_low) / 2; T span = static_cast<T>(twice_high - twice_low) / 2; in = in - min; if (in < static_cast<T>(0)) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } T extra = fmod(in, span); int flips = static_cast<int>(floor(in / span)); if (flips % 2 == 0) { *grad_in = static_cast<T>(grad_in_mult_); return extra + min; } else { *grad_in = static_cast<T>(-grad_in_mult_); return span - extra + min; } } template <typename T> static __forceinline__ __device__ T ComputePositionsWithMask(T coord, int size, PaddingMode padding_mode, bool align_corners, T* grad_in) { T grad_clip, grad_refl; coord = UnnormalizeWithMask<T>(coord, size, align_corners, grad_in); if (padding_mode == PaddingMode::border) { coord = ClipIndexesWithMask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_clip; } else if (padding_mode == PaddingMode::reflect) { if (align_corners) { coord = ReflectIndexesWithMask(coord, 0, 2 * (size - 1), &grad_refl); } else { coord = ReflectIndexesWithMask(coord, -1, 2 * size - 1, &grad_refl); } coord = ClipIndexesWithMask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_refl * grad_clip; } return coord; } template <typename T> __global__ void GridSamplerCudaBackwardKernel(const int nthreads, const T* grad_output, const T* input, const T* grid, int n, int out_c, int out_h, int out_w, int in_h, int in_w, T* grad_input, T* grad_grid, const Mode mode, const PaddingMode padding_mode, bool align_corners) { int inp_sN = out_c * in_h * in_w; int inp_sC = in_h * in_w; int inp_sH = in_w; int inp_sW = 1; int grid_sN = out_h * out_w * 2; int grid_sH = out_w * 2; int grid_sW = 2; int grid_sCoor = 1; int gOut_sN = out_c * out_h * out_w; int gOut_sC = out_h * out_w; int gOut_sH = out_w; int gOut_sW = 1; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_w; const int h = (index / out_w) % out_h; const int n = index / (out_h * out_w); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; T gix_mult, giy_mult; ix = ComputePositionsWithMask( ix, in_w, padding_mode, align_corners, &gix_mult); iy = ComputePositionsWithMask( iy, in_h, padding_mode, align_corners, &giy_mult); if (mode == Mode::bilinear) { int ix_nw = static_cast<int>(floor(ix)); int iy_nw = static_cast<int>(floor(iy)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; T nw = (ix_se - ix) * (iy_se - iy); T ne = (ix - ix_sw) * (iy_sw - iy); T sw = (ix_ne - ix) * (iy - iy_ne); T se = (ix - ix_nw) * (iy - iy_nw); T gix = static_cast<T>(0), giy = static_cast<T>(0); int gOut_offset = n * gOut_sN + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; int inp_offset_NC = n * inp_sN; for (int c = 0; c < out_c; ++c, inp_offset_NC += inp_sC, gInp_ptr_NC += inp_sC, gOut_offset += gOut_sC) { T gOut = grad_output[gOut_offset]; AtomicAdd( gInp_ptr_NC, iy_nw, ix_nw, inp_sH, inp_sW, in_h, in_w, nw * gOut); AtomicAdd( gInp_ptr_NC, iy_ne, ix_ne, inp_sH, inp_sW, in_h, in_w, ne * gOut); AtomicAdd( gInp_ptr_NC, iy_sw, ix_sw, inp_sH, inp_sW, in_h, in_w, sw * gOut); AtomicAdd( gInp_ptr_NC, iy_se, ix_se, inp_sH, inp_sW, in_h, in_w, se * gOut); if (InBounds(iy_nw, ix_nw, in_h, in_w)) { T nw_val = input[inp_offset_NC + iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (InBounds(iy_ne, ix_ne, in_h, in_w)) { T ne_val = input[inp_offset_NC + iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (InBounds(iy_sw, ix_sw, in_h, in_w)) { T sw_val = input[inp_offset_NC + iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (InBounds(iy_se, ix_se, in_h, in_w)) { T se_val = input[inp_offset_NC + iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } if (grad_grid != nullptr) { T* gGrid_ptr_NHW = grad_grid + index * grid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } } else if (mode == Mode::nearest) { int ix_nearest = static_cast<int>(std::nearbyint(ix)); int iy_nearest = static_cast<int>(std::nearbyint(iy)); int gOut_offset = n * gOut_sN + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; for (int c = 0; c < out_c; ++c, gInp_ptr_NC += inp_sC, gOut_offset += gOut_sC) { AtomicAdd(gInp_ptr_NC, iy_nearest, ix_nearest, inp_sH, inp_sW, in_h, in_w, grad_output[gOut_offset]); } if (grad_grid != nullptr) { T* gGrid_ptr_NHW = grad_grid + index * grid_sW; gGrid_ptr_NHW[0] = static_cast<T>(0); gGrid_ptr_NHW[1] = static_cast<T>(0); } } } } template <typename T> __global__ void GridSampler3DCudaBackwardKernel(const int nthreads, const T* grad_output, const T* input, const T* grid, int out_c, int out_d, int out_h, int out_w, int in_d, int in_h, int in_w, T* grad_input, T* grad_grid, const Mode mode, const PaddingMode padding_mode, bool align_corners) { int inp_sW = 1; int inp_sH = in_w; int inp_sD = in_h * in_w; int inp_sC = in_d * inp_sD; int inp_sN = out_c * inp_sC; int grid_sCoor = 1; int grid_sW = 3; int grid_sH = out_w * grid_sW; int grid_sD = out_h * grid_sH; int grid_sN = out_d * grid_sD; int gOut_sW = 1; int gOut_sH = out_w; int gOut_sD = out_h * out_w; int gOut_sC = out_d * gOut_sD; int gOut_sN = out_c * gOut_sC; CUDA_KERNEL_LOOP_TYPE(index, nthreads, int) { const int w = index % out_w; const int h = (index / out_w) % out_h; const int d = (index / (out_h * out_w)) % out_d; const int n = index / (out_d * out_h * out_w); const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; T iz = grid[grid_offset + 2 * grid_sCoor]; // multipliers for gradients on ix, iy, and iz T gix_mult, giy_mult, giz_mult; ix = ComputePositionsWithMask( ix, in_w, padding_mode, align_corners, &gix_mult); iy = ComputePositionsWithMask( iy, in_h, padding_mode, align_corners, &giy_mult); iz = ComputePositionsWithMask( iz, in_d, padding_mode, align_corners, &giz_mult); if (mode == Mode::bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: T tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); T tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); T tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); T tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); T bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); T bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); T bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); T bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); T gix = static_cast<T>(0), giy = static_cast<T>(0), giz = static_cast<T>(0); int gOut_offset = n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; int inp_offset_NC = n * inp_sN; T* gInp_ptr_NC = grad_input + n * inp_sN; for (int c = 0; c < out_c; ++c, gOut_offset += gOut_sC, gInp_ptr_NC += inp_sC, inp_offset_NC += inp_sC) { T gOut = grad_output[gOut_offset]; AtomicAdd3D(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tnw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tne * gOut); AtomicAdd3D(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tsw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tse * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bnw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bne * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bsw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bse * gOut); // calculate grad_grid if (InBounds3D(iz_tnw, iy_tnw, ix_tnw, in_d, in_h, in_w)) { T tnw_val = input[inp_offset_NC + iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (InBounds3D(iz_tne, iy_tne, ix_tne, in_d, in_h, in_w)) { T tne_val = input[inp_offset_NC + iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (InBounds3D(iz_tsw, iy_tsw, ix_tsw, in_d, in_h, in_w)) { T tsw_val = input[inp_offset_NC + iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (InBounds3D(iz_tse, iy_tse, ix_tse, in_d, in_h, in_w)) { T tse_val = input[inp_offset_NC + iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (InBounds3D(iz_bnw, iy_bnw, ix_bnw, in_d, in_h, in_w)) { T bnw_val = input[inp_offset_NC + iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (InBounds3D(iz_bne, iy_bne, ix_bne, in_d, in_h, in_w)) { T bne_val = input[inp_offset_NC + iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (InBounds3D(iz_bsw, iy_bsw, ix_bsw, in_d, in_h, in_w)) { T bsw_val = input[inp_offset_NC + iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (InBounds3D(iz_bse, iy_bse, ix_bse, in_d, in_h, in_w)) { T bse_val = input[inp_offset_NC + iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } if (grad_grid != nullptr) { T* gGrid_ptr_NDHW = grad_grid + index * grid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } } else if (mode == Mode::nearest) { auto ix_nearest = static_cast<int>(std::round(ix)); auto iy_nearest = static_cast<int>(std::round(iy)); auto iz_nearest = static_cast<int>(std::round(iz)); // assign nearest neighor pixel value to output pixel int gOut_offset = n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; for (int c = 0; c < out_c; ++c, gOut_offset += gOut_sC, gInp_ptr_NC += inp_sC) { AtomicAdd3D(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, grad_output[gOut_offset]); } if (grad_grid != nullptr) { T* gGrid_ptr_NDHW = grad_grid + index * grid_sW; gGrid_ptr_NDHW[0] = static_cast<T>(0); gGrid_ptr_NDHW[1] = static_cast<T>(0); gGrid_ptr_NDHW[2] = static_cast<T>(0); } } } } template <typename T, typename Context> void GridSampleGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& grid, const DenseTensor& out_grad, const std::string& mode, const std::string& padding_mode, bool align_corners, DenseTensor* x_grad, DenseTensor* grid_grad) { PaddingMode enum_padding_mode; Mode enum_mode; if (padding_mode == "border") { enum_padding_mode = PaddingMode::border; } else if (padding_mode == "reflection") { enum_padding_mode = PaddingMode::reflect; } else { enum_padding_mode = PaddingMode::zeros; } if (mode == "nearest") { enum_mode = Mode::nearest; } else { enum_mode = Mode::bilinear; } if (x.dims().size() == 4) { const int n = grid.dims()[0]; const int out_h = grid.dims()[1]; const int out_w = grid.dims()[2]; const int c = x.dims()[1]; const int in_h = x.dims()[2]; const int in_w = x.dims()[3]; dev_ctx.template Alloc<T>(x_grad); phi::funcs::SetConstant<Context, T>()(dev_ctx, x_grad, static_cast<T>(0)); T* grid_grad_data = nullptr; if (grid_grad != nullptr) { grid_grad_data = dev_ctx.template Alloc<T>(grid_grad); } int count = static_cast<int>(n * out_h * out_w); auto cu_stream = dev_ctx.stream(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, count); hipLaunchKernelGGL(( GridSamplerCudaBackwardKernel<T>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, cu_stream, count, out_grad.data<T>(), x.data<T>(), grid.data<T>(), n, c, out_h, out_w, in_h, in_w, x_grad->data<T>(), grid_grad_data, enum_mode, enum_padding_mode, align_corners); } else { const int out_d = grid.dims()[1]; const int out_h = grid.dims()[2]; const int out_w = grid.dims()[3]; const int n = x.dims()[0]; const int c = x.dims()[1]; const int in_d = x.dims()[2]; const int in_h = x.dims()[3]; const int in_w = x.dims()[4]; dev_ctx.template Alloc<T>(x_grad); phi::funcs::SetConstant<Context, T>()(dev_ctx, x_grad, static_cast<T>(0)); T* grid_grad_data = nullptr; if (grid_grad != nullptr) { grid_grad_data = dev_ctx.template Alloc<T>(grid_grad); } int count = static_cast<int>(n * out_d * out_h * out_w); auto cu_stream = dev_ctx.stream(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, count); hipLaunchKernelGGL(( GridSampler3DCudaBackwardKernel<T>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, cu_stream, count, out_grad.data<T>(), x.data<T>(), grid.data<T>(), c, out_d, out_h, out_w, in_d, in_h, in_w, x_grad->data<T>(), grid_grad_data, enum_mode, enum_padding_mode, align_corners); } } } // namespace phi PD_REGISTER_KERNEL(grid_sample_grad, GPU, ALL_LAYOUT, phi::GridSampleGradKernel, float, double) {}
4a4c993118cb90c71614f74592ec8f29cf90a5a2.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/grid_sample_grad_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/gpu/grid_sample_utils.h" namespace phi { template <typename T> static __forceinline__ __device__ void AtomicAdd( T* data, int h, int w, int sH, int sW, int H, int W, T delta) { if (InBounds(h, w, H, W)) { paddle::platform::CudaAtomicAdd(data + h * sH + w * sW, delta); } } template <typename T> static __forceinline__ __device__ void AtomicAdd3D(T* data, int d, int h, int w, int sD, int sH, int sW, int D, int H, int W, T delta) { if (InBounds3D(d, h, w, D, H, W)) { paddle::platform::CudaAtomicAdd(data + d * sD + h * sH + w * sW, delta); } } template <typename T> static __forceinline__ __device__ T UnnormalizeWithMask(T coord, int size, bool align_corners, T* grad_in) { if (align_corners) { *grad_in = static_cast<T>(size - 1) / 2; return ((coord + 1.f) / 2) * (size - 1); } else { *grad_in = static_cast<T>(size) / 2; return ((coord + 1.f) * size - 1) / 2; } } template <typename T> static __forceinline__ __device__ T ClipIndexesWithMask(T in, int clip_limit, T* grad_in) { if (in <= static_cast<T>(0)) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } else { T max = static_cast<T>(clip_limit - 1); if (in >= max) { *grad_in = static_cast<T>(0); return max; } else { *grad_in = static_cast<T>(1); return in; } } } template <typename T> static __forceinline__ __device__ T ReflectIndexesWithMask(T in, int twice_low, int twice_high, T* grad_in) { if (twice_low == twice_high) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } int grad_in_mult_; T min = static_cast<T>(twice_low) / 2; T span = static_cast<T>(twice_high - twice_low) / 2; in = in - min; if (in < static_cast<T>(0)) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } T extra = fmod(in, span); int flips = static_cast<int>(floor(in / span)); if (flips % 2 == 0) { *grad_in = static_cast<T>(grad_in_mult_); return extra + min; } else { *grad_in = static_cast<T>(-grad_in_mult_); return span - extra + min; } } template <typename T> static __forceinline__ __device__ T ComputePositionsWithMask(T coord, int size, PaddingMode padding_mode, bool align_corners, T* grad_in) { T grad_clip, grad_refl; coord = UnnormalizeWithMask<T>(coord, size, align_corners, grad_in); if (padding_mode == PaddingMode::border) { coord = ClipIndexesWithMask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_clip; } else if (padding_mode == PaddingMode::reflect) { if (align_corners) { coord = ReflectIndexesWithMask(coord, 0, 2 * (size - 1), &grad_refl); } else { coord = ReflectIndexesWithMask(coord, -1, 2 * size - 1, &grad_refl); } coord = ClipIndexesWithMask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_refl * grad_clip; } return coord; } template <typename T> __global__ void GridSamplerCudaBackwardKernel(const int nthreads, const T* grad_output, const T* input, const T* grid, int n, int out_c, int out_h, int out_w, int in_h, int in_w, T* grad_input, T* grad_grid, const Mode mode, const PaddingMode padding_mode, bool align_corners) { int inp_sN = out_c * in_h * in_w; int inp_sC = in_h * in_w; int inp_sH = in_w; int inp_sW = 1; int grid_sN = out_h * out_w * 2; int grid_sH = out_w * 2; int grid_sW = 2; int grid_sCoor = 1; int gOut_sN = out_c * out_h * out_w; int gOut_sC = out_h * out_w; int gOut_sH = out_w; int gOut_sW = 1; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_w; const int h = (index / out_w) % out_h; const int n = index / (out_h * out_w); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; T gix_mult, giy_mult; ix = ComputePositionsWithMask( ix, in_w, padding_mode, align_corners, &gix_mult); iy = ComputePositionsWithMask( iy, in_h, padding_mode, align_corners, &giy_mult); if (mode == Mode::bilinear) { int ix_nw = static_cast<int>(floor(ix)); int iy_nw = static_cast<int>(floor(iy)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; T nw = (ix_se - ix) * (iy_se - iy); T ne = (ix - ix_sw) * (iy_sw - iy); T sw = (ix_ne - ix) * (iy - iy_ne); T se = (ix - ix_nw) * (iy - iy_nw); T gix = static_cast<T>(0), giy = static_cast<T>(0); int gOut_offset = n * gOut_sN + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; int inp_offset_NC = n * inp_sN; for (int c = 0; c < out_c; ++c, inp_offset_NC += inp_sC, gInp_ptr_NC += inp_sC, gOut_offset += gOut_sC) { T gOut = grad_output[gOut_offset]; AtomicAdd( gInp_ptr_NC, iy_nw, ix_nw, inp_sH, inp_sW, in_h, in_w, nw * gOut); AtomicAdd( gInp_ptr_NC, iy_ne, ix_ne, inp_sH, inp_sW, in_h, in_w, ne * gOut); AtomicAdd( gInp_ptr_NC, iy_sw, ix_sw, inp_sH, inp_sW, in_h, in_w, sw * gOut); AtomicAdd( gInp_ptr_NC, iy_se, ix_se, inp_sH, inp_sW, in_h, in_w, se * gOut); if (InBounds(iy_nw, ix_nw, in_h, in_w)) { T nw_val = input[inp_offset_NC + iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (InBounds(iy_ne, ix_ne, in_h, in_w)) { T ne_val = input[inp_offset_NC + iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (InBounds(iy_sw, ix_sw, in_h, in_w)) { T sw_val = input[inp_offset_NC + iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (InBounds(iy_se, ix_se, in_h, in_w)) { T se_val = input[inp_offset_NC + iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } if (grad_grid != nullptr) { T* gGrid_ptr_NHW = grad_grid + index * grid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } } else if (mode == Mode::nearest) { int ix_nearest = static_cast<int>(std::nearbyint(ix)); int iy_nearest = static_cast<int>(std::nearbyint(iy)); int gOut_offset = n * gOut_sN + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; for (int c = 0; c < out_c; ++c, gInp_ptr_NC += inp_sC, gOut_offset += gOut_sC) { AtomicAdd(gInp_ptr_NC, iy_nearest, ix_nearest, inp_sH, inp_sW, in_h, in_w, grad_output[gOut_offset]); } if (grad_grid != nullptr) { T* gGrid_ptr_NHW = grad_grid + index * grid_sW; gGrid_ptr_NHW[0] = static_cast<T>(0); gGrid_ptr_NHW[1] = static_cast<T>(0); } } } } template <typename T> __global__ void GridSampler3DCudaBackwardKernel(const int nthreads, const T* grad_output, const T* input, const T* grid, int out_c, int out_d, int out_h, int out_w, int in_d, int in_h, int in_w, T* grad_input, T* grad_grid, const Mode mode, const PaddingMode padding_mode, bool align_corners) { int inp_sW = 1; int inp_sH = in_w; int inp_sD = in_h * in_w; int inp_sC = in_d * inp_sD; int inp_sN = out_c * inp_sC; int grid_sCoor = 1; int grid_sW = 3; int grid_sH = out_w * grid_sW; int grid_sD = out_h * grid_sH; int grid_sN = out_d * grid_sD; int gOut_sW = 1; int gOut_sH = out_w; int gOut_sD = out_h * out_w; int gOut_sC = out_d * gOut_sD; int gOut_sN = out_c * gOut_sC; CUDA_KERNEL_LOOP_TYPE(index, nthreads, int) { const int w = index % out_w; const int h = (index / out_w) % out_h; const int d = (index / (out_h * out_w)) % out_d; const int n = index / (out_d * out_h * out_w); const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; T iz = grid[grid_offset + 2 * grid_sCoor]; // multipliers for gradients on ix, iy, and iz T gix_mult, giy_mult, giz_mult; ix = ComputePositionsWithMask( ix, in_w, padding_mode, align_corners, &gix_mult); iy = ComputePositionsWithMask( iy, in_h, padding_mode, align_corners, &giy_mult); iz = ComputePositionsWithMask( iz, in_d, padding_mode, align_corners, &giz_mult); if (mode == Mode::bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(std::floor(ix)); int iy_tnw = static_cast<int>(std::floor(iy)); int iz_tnw = static_cast<int>(std::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: T tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); T tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); T tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); T tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); T bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); T bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); T bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); T bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); T gix = static_cast<T>(0), giy = static_cast<T>(0), giz = static_cast<T>(0); int gOut_offset = n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; int inp_offset_NC = n * inp_sN; T* gInp_ptr_NC = grad_input + n * inp_sN; for (int c = 0; c < out_c; ++c, gOut_offset += gOut_sC, gInp_ptr_NC += inp_sC, inp_offset_NC += inp_sC) { T gOut = grad_output[gOut_offset]; AtomicAdd3D(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tnw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tne * gOut); AtomicAdd3D(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tsw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tse * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bnw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bne * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bsw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bse * gOut); // calculate grad_grid if (InBounds3D(iz_tnw, iy_tnw, ix_tnw, in_d, in_h, in_w)) { T tnw_val = input[inp_offset_NC + iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (InBounds3D(iz_tne, iy_tne, ix_tne, in_d, in_h, in_w)) { T tne_val = input[inp_offset_NC + iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (InBounds3D(iz_tsw, iy_tsw, ix_tsw, in_d, in_h, in_w)) { T tsw_val = input[inp_offset_NC + iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (InBounds3D(iz_tse, iy_tse, ix_tse, in_d, in_h, in_w)) { T tse_val = input[inp_offset_NC + iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (InBounds3D(iz_bnw, iy_bnw, ix_bnw, in_d, in_h, in_w)) { T bnw_val = input[inp_offset_NC + iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (InBounds3D(iz_bne, iy_bne, ix_bne, in_d, in_h, in_w)) { T bne_val = input[inp_offset_NC + iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (InBounds3D(iz_bsw, iy_bsw, ix_bsw, in_d, in_h, in_w)) { T bsw_val = input[inp_offset_NC + iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (InBounds3D(iz_bse, iy_bse, ix_bse, in_d, in_h, in_w)) { T bse_val = input[inp_offset_NC + iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } if (grad_grid != nullptr) { T* gGrid_ptr_NDHW = grad_grid + index * grid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } } else if (mode == Mode::nearest) { auto ix_nearest = static_cast<int>(std::round(ix)); auto iy_nearest = static_cast<int>(std::round(iy)); auto iz_nearest = static_cast<int>(std::round(iz)); // assign nearest neighor pixel value to output pixel int gOut_offset = n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; for (int c = 0; c < out_c; ++c, gOut_offset += gOut_sC, gInp_ptr_NC += inp_sC) { AtomicAdd3D(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, grad_output[gOut_offset]); } if (grad_grid != nullptr) { T* gGrid_ptr_NDHW = grad_grid + index * grid_sW; gGrid_ptr_NDHW[0] = static_cast<T>(0); gGrid_ptr_NDHW[1] = static_cast<T>(0); gGrid_ptr_NDHW[2] = static_cast<T>(0); } } } } template <typename T, typename Context> void GridSampleGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& grid, const DenseTensor& out_grad, const std::string& mode, const std::string& padding_mode, bool align_corners, DenseTensor* x_grad, DenseTensor* grid_grad) { PaddingMode enum_padding_mode; Mode enum_mode; if (padding_mode == "border") { enum_padding_mode = PaddingMode::border; } else if (padding_mode == "reflection") { enum_padding_mode = PaddingMode::reflect; } else { enum_padding_mode = PaddingMode::zeros; } if (mode == "nearest") { enum_mode = Mode::nearest; } else { enum_mode = Mode::bilinear; } if (x.dims().size() == 4) { const int n = grid.dims()[0]; const int out_h = grid.dims()[1]; const int out_w = grid.dims()[2]; const int c = x.dims()[1]; const int in_h = x.dims()[2]; const int in_w = x.dims()[3]; dev_ctx.template Alloc<T>(x_grad); phi::funcs::SetConstant<Context, T>()(dev_ctx, x_grad, static_cast<T>(0)); T* grid_grad_data = nullptr; if (grid_grad != nullptr) { grid_grad_data = dev_ctx.template Alloc<T>(grid_grad); } int count = static_cast<int>(n * out_h * out_w); auto cu_stream = dev_ctx.stream(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, count); GridSamplerCudaBackwardKernel<T> <<<config.block_per_grid, config.thread_per_block, 0, cu_stream>>>( count, out_grad.data<T>(), x.data<T>(), grid.data<T>(), n, c, out_h, out_w, in_h, in_w, x_grad->data<T>(), grid_grad_data, enum_mode, enum_padding_mode, align_corners); } else { const int out_d = grid.dims()[1]; const int out_h = grid.dims()[2]; const int out_w = grid.dims()[3]; const int n = x.dims()[0]; const int c = x.dims()[1]; const int in_d = x.dims()[2]; const int in_h = x.dims()[3]; const int in_w = x.dims()[4]; dev_ctx.template Alloc<T>(x_grad); phi::funcs::SetConstant<Context, T>()(dev_ctx, x_grad, static_cast<T>(0)); T* grid_grad_data = nullptr; if (grid_grad != nullptr) { grid_grad_data = dev_ctx.template Alloc<T>(grid_grad); } int count = static_cast<int>(n * out_d * out_h * out_w); auto cu_stream = dev_ctx.stream(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, count); GridSampler3DCudaBackwardKernel<T> <<<config.block_per_grid, config.thread_per_block, 0, cu_stream>>>( count, out_grad.data<T>(), x.data<T>(), grid.data<T>(), c, out_d, out_h, out_w, in_d, in_h, in_w, x_grad->data<T>(), grid_grad_data, enum_mode, enum_padding_mode, align_corners); } } } // namespace phi PD_REGISTER_KERNEL(grid_sample_grad, GPU, ALL_LAYOUT, phi::GridSampleGradKernel, float, double) {}
b1bf0d0b86c03f44660b5f22fb9aad453ea94278.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2019 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <algorithm> #include <string> #include <set> #include "xgboost/logging.h" #include "xgboost/span.h" #include "constraints.cuh" #include "param.h" #include "../common/device_helpers.cuh" namespace xgboost { size_t FeatureInteractionConstraintDevice::Features() const { return d_sets_ptr_.size() - 1; } void FeatureInteractionConstraintDevice::Configure( tree::TrainParam const& param, int32_t const n_features) { has_constraint_ = true; if (param.interaction_constraints.length() == 0) { has_constraint_ = false; return; } // --- Parse interaction constraints // Interaction constraints parsed from string parameter. After // parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}. std::vector<std::vector<bst_feature_t>> h_feature_constraints; try { ParseInteractionConstraint(param.interaction_constraints, &h_feature_constraints); } catch (dmlc::Error const& e) { LOG(FATAL) << "Failed to parse feature interaction constraint:\n" << param.interaction_constraints << "\n" << "With error:\n" << e.what(); } n_sets_ = h_feature_constraints.size(); size_t const n_feat_storage = LBitField64::ComputeStorageSize(n_features); if (n_feat_storage == 0 && n_features != 0) { LOG(FATAL) << "Wrong storage size, n_features: " << n_features; } // --- Initialize allowed features attached to nodes. int32_t n_nodes { param.MaxNodes() }; node_constraints_.resize(n_nodes); node_constraints_storage_.resize(n_nodes); for (auto& n : node_constraints_storage_) { n.resize(LBitField64::ComputeStorageSize(n_features)); } for (size_t i = 0; i < node_constraints_storage_.size(); ++i) { auto span = dh::ToSpan(node_constraints_storage_[i]); node_constraints_[i] = LBitField64(span); } s_node_constraints_ = common::Span<LBitField64>(node_constraints_.data(), node_constraints_.size()); // Represent constraints as CSR format, flatten is the value vector, // ptr is row_ptr vector in CSR. std::vector<uint32_t> h_feature_constraints_flatten; for (auto const& constraints : h_feature_constraints) { for (uint32_t c : constraints) { h_feature_constraints_flatten.emplace_back(c); } } std::vector<size_t> h_feature_constraints_ptr; size_t n_features_in_constraints = 0; h_feature_constraints_ptr.emplace_back(n_features_in_constraints); for (auto const& v : h_feature_constraints) { n_features_in_constraints += v.size(); h_feature_constraints_ptr.emplace_back(n_features_in_constraints); } // Copy the CSR to device. d_fconstraints_.resize(h_feature_constraints_flatten.size()); thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(), d_fconstraints_.begin()); s_fconstraints_ = dh::ToSpan(d_fconstraints_); d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size()); thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(), d_fconstraints_ptr_.begin()); s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_); // --- Compute interaction sets attached to each feature. // Use a set to eliminate duplicated entries. std::vector<std::set<int32_t> > h_features_set(n_features); int32_t cid = 0; for (auto const& constraints : h_feature_constraints) { for (auto const& feat : constraints) { h_features_set.at(feat).insert(cid); } cid++; } // Compute device sets. std::vector<int32_t> h_sets; int32_t ptr = 0; std::vector<int32_t> h_sets_ptr {ptr}; for (auto const& feature : h_features_set) { for (auto constraint_id : feature) { h_sets.emplace_back(constraint_id); } // empty set is well defined here. ptr += feature.size(); h_sets_ptr.emplace_back(ptr); } d_sets_ = h_sets; d_sets_ptr_ = h_sets_ptr; s_sets_ = dh::ToSpan(d_sets_); s_sets_ptr_ = dh::ToSpan(d_sets_ptr_); d_feature_buffer_storage_.resize(LBitField64::ComputeStorageSize(n_features)); feature_buffer_ = LBitField64{dh::ToSpan(d_feature_buffer_storage_)}; // --- Initialize result buffers. output_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features)); output_buffer_bits_ = LBitField64(dh::ToSpan(output_buffer_bits_storage_)); input_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features)); input_buffer_bits_ = LBitField64(dh::ToSpan(input_buffer_bits_storage_)); result_buffer_.resize(n_features); s_result_buffer_ = dh::ToSpan(result_buffer_); } FeatureInteractionConstraintDevice::FeatureInteractionConstraintDevice( tree::TrainParam const& param, int32_t const n_features) : has_constraint_{true}, n_sets_{0} { this->Configure(param, n_features); } void FeatureInteractionConstraintDevice::Reset() { for (auto& node : node_constraints_storage_) { thrust::fill(node.begin(), node.end(), 0); } } __global__ void ClearBuffersKernel( LBitField64 result_buffer_output, LBitField64 result_buffer_input) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < result_buffer_output.Size()) { result_buffer_output.Clear(tid); } if (tid < result_buffer_input.Size()) { result_buffer_input.Clear(tid); } } void FeatureInteractionConstraintDevice::ClearBuffers() { CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size()); CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size()); uint32_t constexpr kBlockThreads = 256; auto const n_grids = static_cast<uint32_t>( common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads)); dh::LaunchKernel {n_grids, kBlockThreads} ( ClearBuffersKernel, output_buffer_bits_, input_buffer_bits_); } common::Span<bst_feature_t> FeatureInteractionConstraintDevice::QueryNode(int32_t node_id) { if (!has_constraint_) { return {}; } CHECK_LT(node_id, s_node_constraints_.size()); ClearBuffers(); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); auto p_result_buffer = result_buffer_.data(); LBitField64 node_constraints = s_node_constraints_[node_id]; thrust::device_ptr<bst_feature_t> const out_end = thrust::copy_if( thrust::device, begin, end, p_result_buffer, [=]__device__(int32_t pos) { bool res = node_constraints.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); return {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; } __global__ void SetInputBufferKernel(common::Span<bst_feature_t> feature_list_input, LBitField64 result_buffer_input) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < feature_list_input.size()) { result_buffer_input.Set(feature_list_input[tid]); } } __global__ void QueryFeatureListKernel(LBitField64 node_constraints, LBitField64 result_buffer_input, LBitField64 result_buffer_output) { result_buffer_output |= node_constraints; result_buffer_output &= result_buffer_input; } common::Span<bst_feature_t> FeatureInteractionConstraintDevice::Query( common::Span<bst_feature_t> feature_list, int32_t nid) { if (!has_constraint_ || nid == 0) { return feature_list; } ClearBuffers(); LBitField64 node_constraints = s_node_constraints_[nid]; CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size()); uint32_t constexpr kBlockThreads = 256; auto n_grids = static_cast<uint32_t>( common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads)); dh::LaunchKernel {n_grids, kBlockThreads} ( SetInputBufferKernel, feature_list, input_buffer_bits_); dh::LaunchKernel {n_grids, kBlockThreads} ( QueryFeatureListKernel, node_constraints, input_buffer_bits_, output_buffer_bits_); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); LBitField64 local_result_buffer = output_buffer_bits_; thrust::device_ptr<bst_feature_t> const out_end = thrust::copy_if( thrust::device, begin, end, result_buffer_.data(), [=]__device__(int32_t pos) { bool res = local_result_buffer.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); common::Span<bst_feature_t> result = {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; return result; } // Find interaction sets for each feature, then store all features in // those sets in a buffer. __global__ void RestoreFeatureListFromSetsKernel( LBitField64 feature_buffer, bst_feature_t fid, common::Span<bst_feature_t> feature_interactions, common::Span<size_t> feature_interactions_ptr, // of size n interaction set + 1 common::Span<bst_feature_t> interactions_list, common::Span<size_t> interactions_list_ptr) { auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x; auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y; // painful mapping: fid -> sets related to it -> features related to sets. auto const beg = interactions_list_ptr[fid]; auto const end = interactions_list_ptr[fid+1]; auto const n_sets = end - beg; if (tid_x < n_sets) { auto const set_id_pos = beg + tid_x; auto const set_id = interactions_list[set_id_pos]; auto const set_beg = feature_interactions_ptr[set_id]; auto const set_end = feature_interactions_ptr[set_id + 1]; auto const feature_pos = set_beg + tid_y; if (feature_pos < set_end) { feature_buffer.Set(feature_interactions[feature_pos]); } } } __global__ void InteractionConstraintSplitKernel(LBitField64 feature, int32_t feature_id, LBitField64 node, LBitField64 left, LBitField64 right) { auto tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid > node.Size()) { return; } // enable constraints from feature node |= feature; // clear the buffer after use if (tid < feature.Size()) { feature.Clear(tid); } // enable constraints from parent left |= node; right |= node; if (tid == feature_id) { // enable the split feature, set all of them at last instead of // setting it for parent to avoid race. node.Set(feature_id); left.Set(feature_id); right.Set(feature_id); } } void FeatureInteractionConstraintDevice::Split( bst_node_t node_id, bst_feature_t feature_id, bst_node_t left_id, bst_node_t right_id) { if (!has_constraint_) { return; } CHECK_NE(node_id, left_id) << " Split node: " << node_id << " and its left child: " << left_id << " cannot be the same."; CHECK_NE(node_id, right_id) << " Split node: " << node_id << " and its left child: " << right_id << " cannot be the same."; CHECK_LT(right_id, s_node_constraints_.size()); CHECK_NE(s_node_constraints_.size(), 0); LBitField64 node = s_node_constraints_[node_id]; LBitField64 left = s_node_constraints_[left_id]; LBitField64 right = s_node_constraints_[right_id]; dim3 const block3(16, 64, 1); dim3 const grid3(common::DivRoundUp(n_sets_, 16), common::DivRoundUp(s_fconstraints_.size(), 64)); dh::LaunchKernel {grid3, block3} ( RestoreFeatureListFromSetsKernel, feature_buffer_, feature_id, s_fconstraints_, s_fconstraints_ptr_, s_sets_, s_sets_ptr_); uint32_t constexpr kBlockThreads = 256; auto n_grids = static_cast<uint32_t>(common::DivRoundUp(node.Size(), kBlockThreads)); dh::LaunchKernel {n_grids, kBlockThreads} ( InteractionConstraintSplitKernel, feature_buffer_, feature_id, node, left, right); } } // namespace xgboost
b1bf0d0b86c03f44660b5f22fb9aad453ea94278.cu
/*! * Copyright 2019 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <algorithm> #include <string> #include <set> #include "xgboost/logging.h" #include "xgboost/span.h" #include "constraints.cuh" #include "param.h" #include "../common/device_helpers.cuh" namespace xgboost { size_t FeatureInteractionConstraintDevice::Features() const { return d_sets_ptr_.size() - 1; } void FeatureInteractionConstraintDevice::Configure( tree::TrainParam const& param, int32_t const n_features) { has_constraint_ = true; if (param.interaction_constraints.length() == 0) { has_constraint_ = false; return; } // --- Parse interaction constraints // Interaction constraints parsed from string parameter. After // parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}. std::vector<std::vector<bst_feature_t>> h_feature_constraints; try { ParseInteractionConstraint(param.interaction_constraints, &h_feature_constraints); } catch (dmlc::Error const& e) { LOG(FATAL) << "Failed to parse feature interaction constraint:\n" << param.interaction_constraints << "\n" << "With error:\n" << e.what(); } n_sets_ = h_feature_constraints.size(); size_t const n_feat_storage = LBitField64::ComputeStorageSize(n_features); if (n_feat_storage == 0 && n_features != 0) { LOG(FATAL) << "Wrong storage size, n_features: " << n_features; } // --- Initialize allowed features attached to nodes. int32_t n_nodes { param.MaxNodes() }; node_constraints_.resize(n_nodes); node_constraints_storage_.resize(n_nodes); for (auto& n : node_constraints_storage_) { n.resize(LBitField64::ComputeStorageSize(n_features)); } for (size_t i = 0; i < node_constraints_storage_.size(); ++i) { auto span = dh::ToSpan(node_constraints_storage_[i]); node_constraints_[i] = LBitField64(span); } s_node_constraints_ = common::Span<LBitField64>(node_constraints_.data(), node_constraints_.size()); // Represent constraints as CSR format, flatten is the value vector, // ptr is row_ptr vector in CSR. std::vector<uint32_t> h_feature_constraints_flatten; for (auto const& constraints : h_feature_constraints) { for (uint32_t c : constraints) { h_feature_constraints_flatten.emplace_back(c); } } std::vector<size_t> h_feature_constraints_ptr; size_t n_features_in_constraints = 0; h_feature_constraints_ptr.emplace_back(n_features_in_constraints); for (auto const& v : h_feature_constraints) { n_features_in_constraints += v.size(); h_feature_constraints_ptr.emplace_back(n_features_in_constraints); } // Copy the CSR to device. d_fconstraints_.resize(h_feature_constraints_flatten.size()); thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(), d_fconstraints_.begin()); s_fconstraints_ = dh::ToSpan(d_fconstraints_); d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size()); thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(), d_fconstraints_ptr_.begin()); s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_); // --- Compute interaction sets attached to each feature. // Use a set to eliminate duplicated entries. std::vector<std::set<int32_t> > h_features_set(n_features); int32_t cid = 0; for (auto const& constraints : h_feature_constraints) { for (auto const& feat : constraints) { h_features_set.at(feat).insert(cid); } cid++; } // Compute device sets. std::vector<int32_t> h_sets; int32_t ptr = 0; std::vector<int32_t> h_sets_ptr {ptr}; for (auto const& feature : h_features_set) { for (auto constraint_id : feature) { h_sets.emplace_back(constraint_id); } // empty set is well defined here. ptr += feature.size(); h_sets_ptr.emplace_back(ptr); } d_sets_ = h_sets; d_sets_ptr_ = h_sets_ptr; s_sets_ = dh::ToSpan(d_sets_); s_sets_ptr_ = dh::ToSpan(d_sets_ptr_); d_feature_buffer_storage_.resize(LBitField64::ComputeStorageSize(n_features)); feature_buffer_ = LBitField64{dh::ToSpan(d_feature_buffer_storage_)}; // --- Initialize result buffers. output_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features)); output_buffer_bits_ = LBitField64(dh::ToSpan(output_buffer_bits_storage_)); input_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features)); input_buffer_bits_ = LBitField64(dh::ToSpan(input_buffer_bits_storage_)); result_buffer_.resize(n_features); s_result_buffer_ = dh::ToSpan(result_buffer_); } FeatureInteractionConstraintDevice::FeatureInteractionConstraintDevice( tree::TrainParam const& param, int32_t const n_features) : has_constraint_{true}, n_sets_{0} { this->Configure(param, n_features); } void FeatureInteractionConstraintDevice::Reset() { for (auto& node : node_constraints_storage_) { thrust::fill(node.begin(), node.end(), 0); } } __global__ void ClearBuffersKernel( LBitField64 result_buffer_output, LBitField64 result_buffer_input) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < result_buffer_output.Size()) { result_buffer_output.Clear(tid); } if (tid < result_buffer_input.Size()) { result_buffer_input.Clear(tid); } } void FeatureInteractionConstraintDevice::ClearBuffers() { CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size()); CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size()); uint32_t constexpr kBlockThreads = 256; auto const n_grids = static_cast<uint32_t>( common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads)); dh::LaunchKernel {n_grids, kBlockThreads} ( ClearBuffersKernel, output_buffer_bits_, input_buffer_bits_); } common::Span<bst_feature_t> FeatureInteractionConstraintDevice::QueryNode(int32_t node_id) { if (!has_constraint_) { return {}; } CHECK_LT(node_id, s_node_constraints_.size()); ClearBuffers(); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); auto p_result_buffer = result_buffer_.data(); LBitField64 node_constraints = s_node_constraints_[node_id]; thrust::device_ptr<bst_feature_t> const out_end = thrust::copy_if( thrust::device, begin, end, p_result_buffer, [=]__device__(int32_t pos) { bool res = node_constraints.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); return {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; } __global__ void SetInputBufferKernel(common::Span<bst_feature_t> feature_list_input, LBitField64 result_buffer_input) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < feature_list_input.size()) { result_buffer_input.Set(feature_list_input[tid]); } } __global__ void QueryFeatureListKernel(LBitField64 node_constraints, LBitField64 result_buffer_input, LBitField64 result_buffer_output) { result_buffer_output |= node_constraints; result_buffer_output &= result_buffer_input; } common::Span<bst_feature_t> FeatureInteractionConstraintDevice::Query( common::Span<bst_feature_t> feature_list, int32_t nid) { if (!has_constraint_ || nid == 0) { return feature_list; } ClearBuffers(); LBitField64 node_constraints = s_node_constraints_[nid]; CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size()); uint32_t constexpr kBlockThreads = 256; auto n_grids = static_cast<uint32_t>( common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads)); dh::LaunchKernel {n_grids, kBlockThreads} ( SetInputBufferKernel, feature_list, input_buffer_bits_); dh::LaunchKernel {n_grids, kBlockThreads} ( QueryFeatureListKernel, node_constraints, input_buffer_bits_, output_buffer_bits_); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); LBitField64 local_result_buffer = output_buffer_bits_; thrust::device_ptr<bst_feature_t> const out_end = thrust::copy_if( thrust::device, begin, end, result_buffer_.data(), [=]__device__(int32_t pos) { bool res = local_result_buffer.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); common::Span<bst_feature_t> result = {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; return result; } // Find interaction sets for each feature, then store all features in // those sets in a buffer. __global__ void RestoreFeatureListFromSetsKernel( LBitField64 feature_buffer, bst_feature_t fid, common::Span<bst_feature_t> feature_interactions, common::Span<size_t> feature_interactions_ptr, // of size n interaction set + 1 common::Span<bst_feature_t> interactions_list, common::Span<size_t> interactions_list_ptr) { auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x; auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y; // painful mapping: fid -> sets related to it -> features related to sets. auto const beg = interactions_list_ptr[fid]; auto const end = interactions_list_ptr[fid+1]; auto const n_sets = end - beg; if (tid_x < n_sets) { auto const set_id_pos = beg + tid_x; auto const set_id = interactions_list[set_id_pos]; auto const set_beg = feature_interactions_ptr[set_id]; auto const set_end = feature_interactions_ptr[set_id + 1]; auto const feature_pos = set_beg + tid_y; if (feature_pos < set_end) { feature_buffer.Set(feature_interactions[feature_pos]); } } } __global__ void InteractionConstraintSplitKernel(LBitField64 feature, int32_t feature_id, LBitField64 node, LBitField64 left, LBitField64 right) { auto tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid > node.Size()) { return; } // enable constraints from feature node |= feature; // clear the buffer after use if (tid < feature.Size()) { feature.Clear(tid); } // enable constraints from parent left |= node; right |= node; if (tid == feature_id) { // enable the split feature, set all of them at last instead of // setting it for parent to avoid race. node.Set(feature_id); left.Set(feature_id); right.Set(feature_id); } } void FeatureInteractionConstraintDevice::Split( bst_node_t node_id, bst_feature_t feature_id, bst_node_t left_id, bst_node_t right_id) { if (!has_constraint_) { return; } CHECK_NE(node_id, left_id) << " Split node: " << node_id << " and its left child: " << left_id << " cannot be the same."; CHECK_NE(node_id, right_id) << " Split node: " << node_id << " and its left child: " << right_id << " cannot be the same."; CHECK_LT(right_id, s_node_constraints_.size()); CHECK_NE(s_node_constraints_.size(), 0); LBitField64 node = s_node_constraints_[node_id]; LBitField64 left = s_node_constraints_[left_id]; LBitField64 right = s_node_constraints_[right_id]; dim3 const block3(16, 64, 1); dim3 const grid3(common::DivRoundUp(n_sets_, 16), common::DivRoundUp(s_fconstraints_.size(), 64)); dh::LaunchKernel {grid3, block3} ( RestoreFeatureListFromSetsKernel, feature_buffer_, feature_id, s_fconstraints_, s_fconstraints_ptr_, s_sets_, s_sets_ptr_); uint32_t constexpr kBlockThreads = 256; auto n_grids = static_cast<uint32_t>(common::DivRoundUp(node.Size(), kBlockThreads)); dh::LaunchKernel {n_grids, kBlockThreads} ( InteractionConstraintSplitKernel, feature_buffer_, feature_id, node, left, right); } } // namespace xgboost
23ff134cf05fc96cb882686236409a176d53ff5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "svt_utils.h" __device__ int svtsim_whichFit_full_GPU(int layerMask, int lcMask) { switch (layerMask & 0x1f) { case 0x0f: /* 0123 */ return 0; case 0x17: /* 0124 */ return 1; case 0x1b: /* 0134 */ return 2; case 0x1d: /* 0234 */ return 3; case 0x1e: /* 1234 */ return 4; case 0x1f: /* 01234 - this is the fun one to be careful with */ if(lcMask == 0) return 2; /* use 0134 if we have no LC */ else if (lcMask == 0x1) return 4; else if (lcMask == 0x2) return 3; else if (lcMask == 0x3) return 3; else if (lcMask == 0x4) return 2; else if (lcMask == 0x5) return 2; else if (lcMask == 0x6) return 2; else if (lcMask == 0x7) return 2; else if (lcMask == 0x8) return 1; else if (lcMask == 0x9) return 1; else if (lcMask == 0xa) return 1; else if (lcMask == 0xb) return 1; else if (lcMask == 0xc) return 2; else if (lcMask == 0xd) return 2; else if (lcMask == 0xe) return 2; else if (lcMask == 0xf) return 2; else /* If we have LC on outer layer just use 0123 */ return 0; default: return 0; } } __device__ int svtsim_whichFit_GPU(struct extra_data* edata_dev, int zin, int layerMask, int lcMask) { int which0 = 0, which = 0; if (zin<0 || zin>=SVTSIM_NBAR) zin = 0; which0 = svtsim_whichFit_full_GPU(layerMask, lcMask); which = edata_dev->whichFit[zin][which0]; return which; } __device__ int svtsim_get_gfMkAddr_GPU(struct extra_data* edata_dev, int *d, int nd, int d0) { /* d0 = iaddr */ int j; int md = 0x4000; int iz, lcl, hit; if (d0+nd>md) nd = md-d0; for (j = 0; j<nd; j++) { int i = j+d0; int word = 0xffff, intcp = 0, coeff = 0; int which; iz = i&7, lcl = i>>3 & 0x1f, hit = i>>8 & 0x3f; which = svtsim_whichFit_GPU(edata_dev, iz, hit, lcl); coeff = iz + which*6; /* poor choice for illegal iz=6,7, but compatible */ intcp = which; word = coeff<<3 | intcp; d[j] = word; } return nd; } __device__ int gf_mkaddr_GPU(struct extra_data* edata_dev, int hitmap, int lclmap, int zmap, int *coe_addr, int *int_addr, int *addr, int *err) { int iaddr; unsigned int datum = 0; if ((hitmap<0) || (hitmap > gf_mask_GPU( NSVX_PLANE + 1 )) || /* + XFT_LYR */ (lclmap<0) || (lclmap > gf_mask_GPU( NSVX_PLANE )) || (zmap<0) || (zmap > gf_mask_GPU( GF_ZID_WIDTH ))) *err |= ( 1 << SVTSIM_GF_MKADDR_INVALID ); iaddr = ((zmap & gf_mask_GPU(GF_SUBZ_WIDTH)) + (lclmap<<MADDR_NCLS_LSB) + (hitmap<<MADDR_HITM_LSB)); #define MAXMKA 8192 if ((iaddr < 0) || (iaddr >= MAXMKA)) return SVTSIM_GF_ERR; int ldat = 0; svtsim_get_gfMkAddr_GPU(edata_dev, &ldat, 1, iaddr); datum = ldat; *int_addr = datum & gf_mask_GPU(OFF_SUBA_WIDTH); *coe_addr = (datum >> OFF_SUBA_WIDTH) & gf_mask_GPU(PAR_ADDR_WIDTH); *addr = iaddr; return SVTSIM_GF_OK; } __device__ int gf_fit_proc_GPU(int hit[], int sign_crv, long long int coeff[], long long int intcp, long long int *result, int *err) { long long int temp = 0; int i = 0; *result = 0; *err = 0; for (i = 0; i < SVTNHITS; i++) { if (i < NSVX_PLANE) { temp += hit[i] * coeff[i]; } else if (i == HIT_PHI) { /* XFT phi */ hit[i] = (hit[i]&0x400) ? -((~hit[i]&0x3ff)+1) : (hit[i]&0x3ff); temp += hit[i] * coeff[i]; } else if (i == HIT_CRV) { /* XFT curvature (curv already with sign in fep ) */ if (sign_crv == 1) { /* if negative bit is set */ temp -= hit[i] * coeff[i]; } else { temp += hit[i] * coeff[i]; } } } *result = *result + temp + intcp; *result = *result<0 ? -((-*result)>>17) : *result>>17; if (*result > 0) *result &= gf_mask3_GPU(FIT_DWIDTH); else *result = -(abs(*result)&gf_mask3_GPU(FIT_DWIDTH)); return SVTSIM_GF_OK; } __device__ int gf_chi2_GPU(long long int chi[], int* trk_err, long long int *chi2) { long long int temp = 0; long long int chi2memdata = 0; *chi2 = 0; for (int i=0; i<NCHI; i++) { temp = abs(chi[i]); if (chi[i] < 0) temp++; chi2memdata = temp*temp; *chi2 += chi2memdata; } *chi2 = (*chi2 >> 2); if ((*chi2 >> 2) > gf_mask_GPU(CHI_DWIDTH)) { *chi2 = 0x7ff; *trk_err |= (1 << OFLOW_CHI_BIT); } return SVTSIM_GF_OK; } __device__ int gf_getq_GPU(int lyr_config) { int q = 0; switch (lyr_config) { case 0x01e : /* lcmap = 00000, hitmap = 11110 */ q = 3; break; case 0x01d : /* lcmap = 00000, hitmap = 11101 */ q = 2; break; case 0x01b : /* lcmap = 00000, hitmap = 11011 */ q = 1; break; case 0x017 : /* lcmap = 00000, hitmap = 10111 */ q = 2; break; case 0x00f : /* lcmap = 00000, hitmap = 01111 */ q = 2; break; case 0x03e : /* lcmap = 00001, hitmap = 11110 */ q = 2; break; case 0x03d : /* lcmap = 00001, hitmap = 11101 */ q = 1; break; case 0x03b : /* lcmap = 00001, hitmap = 11011 */ q = 1; break; case 0x037 : /* lcmap = 00001, hitmap = 10111 */ q = 1; break; case 0x02f : /* lcmap = 00001, hitmap = 01111 */ q = 1; break; case 0x05e : /* lcmap = 00010, hitmap = 11110 */ q = 7; break; case 0x05d : /* lcmap = 00010, hitmap = 11101 */ q = 1; break; case 0x05b : /* lcmap = 00010, hitmap = 11011 */ q = 2; break; case 0x057 : /* lcmap = 00010, hitmap = 10111 */ q = 2; break; case 0x04f : /* lcmap = 00010, hitmap = 01111 */ q = 2; break; case 0x09e : /* lcmap = 00100, hitmap = 11110 */ q = 7; break; case 0x09d : /* lcmap = 00100, hitmap = 11101 */ q = 2; break; case 0x09b : /* lcmap = 00100, hitmap = 11011 */ q = 1; break; case 0x097 : /* lcmap = 00100, hitmap = 10111 */ q = 2; break; case 0x08f : /* lcmap = 00100, hitmap = 01111 */ q = 3; break; case 0x11e : /* lcmap = 01000, hitmap = 11110 */ q = 7; break; case 0x11d : /* lcmap = 01000, hitmap = 11101 */ q = 2; break; case 0x11b : /* lcmap = 01000, hitmap = 11011 */ q = 2; break; case 0x117 : /* lcmap = 01000, hitmap = 10111 */ q = 1; break; case 0x10f : /* lcmap = 01000, hitmap = 01111 */ q = 3; break; case 0x21e : /* lcmap = 10000, hitmap = 11110 */ q = 7; break; case 0x21d : /* lcmap = 10000, hitmap = 11101 */ q = 2; break; case 0x21b : /* lcmap = 10000, hitmap = 11011 */ q = 2; break; case 0x217 : /* lcmap = 10000, hitmap = 10111 */ q = 2; break; case 0x20f : /* lcmap = 10000, hitmap = 01111 */ q = 1; break; case 0x0de : /* lcmap = 00110, hitmap = 11110 */ q = 7; break; case 0x0dd : /* lcmap = 00110, hitmap = 11101 */ q = 1; break; case 0x0db : /* lcmap = 00110, hitmap = 11011 */ q = 2; break; case 0x0d7 : /* lcmap = 00110, hitmap = 10111 */ q = 3; break; case 0x0cf : /* lcmap = 00110, hitmap = 01111 */ q = 4; break; case 0x19e : /* lcmap = 01100, hitmap = 11110 */ q = 7; break; case 0x19d : /* lcmap = 01100, hitmap = 11101 */ q = 2; break; case 0x19b : /* lcmap = 01100, hitmap = 11011 */ q = 1; break; case 0x197 : /* lcmap = 01100, hitmap = 10111 */ q = 1; break; case 0x18f : /* lcmap = 01100, hitmap = 01111 */ q = 3; break; case 0x31e : /* lcmap = 11000, hitmap = 11110 */ q = 7; break; case 0x31d : /* lcmap = 11000, hitmap = 11101 */ q = 3; break; case 0x31b : /* lcmap = 11000, hitmap = 11011 */ q = 3; break; case 0x317 : /* lcmap = 11000, hitmap = 10111 */ q = 1; break; case 0x30f : /* lcmap = 11000, hitmap = 01111 */ q = 2; break; case 0x15e : /* lcmap = 01010, hitmap = 11110 */ q = 7; break; case 0x15d : /* lcmap = 01010, hitmap = 11101 */ q = 1; break; case 0x15b : /* lcmap = 01010, hitmap = 11011 */ q = 3; q = 3; break; case 0x157 : /* lcmap = 01010, hitmap = 10111 */ q = 2; break; case 0x14f : /* lcmap = 01010, hitmap = 01111 */ q = 4; break; case 0x25e : /* lcmap = 10010, hitmap = 11110 */ q = 7; break; case 0x25d : /* lcmap = 10010, hitmap = 11101 */ q = 1; break; case 0x25b : /* lcmap = 10010, hitmap = 11011 */ q = 2; break; case 0x257 : /* lcmap = 10010, hitmap = 10111 */ q = 2; break; case 0x24f : /* lcmap = 10010, hitmap = 01111 */ q = 1; break; case 0x29e : /* lcmap = 10100, hitmap = 11110 */ q = 7; break; case 0x29d : /* lcmap = 10100, hitmap = 11101 */ q = 2; break; case 0x29b : /* lcmap = 10100, hitmap = 11011 */ q = 1; break; case 0x297 : /* lcmap = 10100, hitmap = 10111 */ q = 2; break; case 0x28f : /* lcmap = 10100, hitmap = 01111 */ q = 1; break; default: q = 7; break; } return q; } __device__ int gf_gfunc_GPU(int ncomb5h, int icomb5h, int hitmap, int lcmap, int chi2) { int lyr_config; int gvalue; int newhitmap; int newlcmap; int q = 0; if (ncomb5h == 1) { newhitmap = hitmap; newlcmap = lcmap; } else if (ncomb5h == 5) { switch (icomb5h) { case 0 : /* 11110 */ newhitmap = 0x1e; newlcmap = (lcmap & 0x1e); break; case 1 : /* 11101 */ newhitmap = 0x1d; newlcmap = lcmap & 0x1d; break; case 2 : /* 11011 */ newhitmap = 0x1b; newlcmap = lcmap & 0x1b; break; case 3 : /* 10111 */ newhitmap = 0x17; newlcmap = lcmap & 0x17; break; case 4 : /* 01111 */ newhitmap = 0x0f; newlcmap = lcmap & 0x0f; break; } } lyr_config = newhitmap + (newlcmap << 5); q = gf_getq_GPU(lyr_config); gvalue = (q << 4) + ((chi2 & 0x3ff) >> 6); return gvalue; } __device__ int gf_stword_GPU(int id, int err) { /* Compose the GF status word in the 7th word from the GF INPUT : err; error summary OUTPUT : return the gf_stword NOTE: Currently this code does not support the parity error and FIFO error. */ int word; word = id; if ((err>>OFLOW_HIT_BIT)&gf_mask_GPU(1)) word |= (1<<GFS_OFL_HIT); if ((err>>OFLOW_CHI_BIT)&gf_mask_GPU(1)) word |= (1<<GFS_OFL_CHI); if (((err>>UFLOW_HIT_BIT)&gf_mask_GPU(1)) || ((err>>OUTORDER_BIT)&gf_mask_GPU(1))) word |= (1<<GFS_INV_DATA); return word; } __device__ int cal_parity_GPU(int word) { int par = 0; for (int i=0; i<SVT_WORD_WIDTH; i++) par ^= ((word>>i) & gf_mask_GPU(1)); return par; } __device__ int gf_formatter_err_GPU(int err, int cdfmsk, int svtmsk, int eoemsk, int *eoe, int *cdf, int *svt) { /* Simulate the board error conditions (CDF-ERR, SVT-ERR and EOE-ERR) INPUT: err; error summary. cdfmsk; Mask for the CDF-ERR. svtmsk; Mask for the SVT-ERR. eoemsk; Mask for the EOE-ERR. OUTPUT: *eoe; EOE error *cdf; CDF error *svt; SVT error */ /* --------- Executable starts here ------------ */ *cdf = 0; /* never turned ON except for the FIFO overflow */ *svt = 0; *eoe = 0; for (int i=0; i<= FIT_RESULT_OFLOW_BIT; i++) { if ((err>>i)&gf_mask_GPU(1)) { if (((svtmsk>>i)&gf_mask_GPU(1)) == 0) *svt = 1; if (i == 0) { if (((eoemsk >> PARITY_ERR_BIT) & gf_mask_GPU(1)) == 0) { *eoe |= (1<<PARITY_ERR_BIT); } } else if ((i==2) || (i==3)) { if (((eoemsk>>INV_DATA_BIT)&gf_mask_GPU(1)) == 0) { *eoe |= (1<<INV_DATA_BIT); } } else { if (((eoemsk>>INT_OFLOW_BIT)&gf_mask_GPU(1)) == 0) { *eoe |= (1<<INT_OFLOW_BIT); } } } /* if ((err>>i)&gf_mask_GPU(1)) */ } /* for (i=0; i<= FIT_RESULT_OFLOW_BIT; i++) */ return SVTSIM_GF_OK; } __device__ int gf_formatter_GPU(int ie, int ir, int ic, int ich, int chi2, struct fep_arrays* fep_dev, struct fit_arrays* fit_dev, struct evt_arrays* evt_dev, struct fout_arrays* fout_dev) { int it, err; int hit_form[NSVX_PLANE]; int z = 0; /* z should be 6 bits large */ int gf_stat = 0; // atomicAdd returns the old value it = atomicAdd(&fout_dev->fout_ntrks[ie], 1); err = (fep_dev->fep_err[ie][ir] | fit_dev->fit_err[ie][ir][ic][ich]); for (int i=0; i<NSVX_PLANE; i++) { /* Hit coordinate */ if (fep_dev->fep_ncomb5h[ie][ir][ic] == 5) { if (i != ich) { hit_form[i] = fep_dev->fep_hit[ie][ir][ic][i]&gf_mask_GPU(GF_HIT_WIDTH); /* Long Cluster bit */ hit_form[i] += (((fep_dev->fep_hit[ie][ir][ic][i] & 0x4000) ? 1 : 0) << GF_HIT_WIDTH); /* Hit existence bit */ hit_form[i] += (((fep_dev->fep_hitmap[ie][ir][ic]>>i)&gf_mask_GPU(1))<<(GF_HIT_WIDTH+1)); hit_form[i] = (hit_form[i]&gf_mask_GPU(GF_HIT_WIDTH+2)); } else hit_form[i] = 0; } else { hit_form[i] = fep_dev->fep_hit[ie][ir][ic][i]&gf_mask_GPU(GF_HIT_WIDTH); /* Long Cluster bit */ hit_form[i] += (((fep_dev->fep_hit[ie][ir][ic][i] & 0x4000) ? 1 : 0) << GF_HIT_WIDTH); /* Hit existence bit */ hit_form[i] += (((fep_dev->fep_hitmap[ie][ir][ic]>>i)&gf_mask_GPU(1))<<(GF_HIT_WIDTH+1)); hit_form[i] = (hit_form[i]&gf_mask_GPU(GF_HIT_WIDTH+2)); } } if (1) { int presentmask; int newhitmap; if (fep_dev->fep_ncomb5h[ie][ir][ic] == 1) { presentmask = fep_dev->fep_hitmap[ie][ir][ic]; } else if (fep_dev->fep_ncomb5h[ie][ir][ic] == 5) { switch (ich) { case 0 : /* 11110 */ newhitmap = 0x1e; break; case 1 : /* 11101 */ newhitmap = 0x1d; break; case 2 : /* 11011 */ newhitmap = 0x1b; break; case 3 : /* 10111 */ newhitmap = 0x17; break; case 4 : /* 01111 */ newhitmap = 0x0f; break; } presentmask = newhitmap; } { int longmask = presentmask & fep_dev->fep_lcl[ie][ir][ic]; int goodmask = presentmask & ~longmask; int badmask = 0x1f & ~goodmask; int badmap[] = { 0x0, /* 00000: all layers good */ 0x5, /* 10000: layer 0 bad */ 0x4, /* 01000: layer 1 bad */ 0xe, /* 11000: layers 0,1 bad (changed from f to e) */ 0x3, /* 00100: layer 2 bad */ 0xe, /* 10100: layers 0,2 bad */ 0xb, /* 01100: layers 1,2 bad */ 0xf, /* 11100: >2 layers bad */ 0x2, /* 00010: layer 3 bad */ 0xd, /* 10010: layers 0,3 bad */ 0xa, /* 01010: layers 1,3 bad */ 0xf, /* 11010: >2 layers bad */ 0x8, /* 00110: layers 2,3 bad */ 0xf, /* 10110: >2 layers bad */ 0xf, /* 01110: >2 layers bad */ 0xf, /* 11110: >2 layers bad */ 0x1, /* 00001: layer 4 bad */ 0xc, /* 10001: layers 0,4 bad */ 0x8, /* 01001: layers 1,4 bad (oops: doc says 0x9 not 0x8) */ 0xf, /* 11001: >2 layers bad */ 0x7, /* 00101: layers 2,4 bad */ 0xf, /* 10101: >2 layers bad */ 0xf, /* 01101: >2 layers bad */ 0xf, /* 11101: >2 layers bad */ 0x6, /* 00011: layers 3,4 bad */ 0xf, /* 10011: >2 layers bad */ 0xf, /* 01011: >2 layers bad */ 0xf, /* 11011: >2 layers bad */ 0xf, /* 00111: >2 layers bad */ 0xf, /* 10111: >2 layers bad */ 0xf, /* 01111: >2 layers bad */ 0xf /* 11111: all layers bad! */ }; gf_stat = badmap[badmask]; } } gf_stat = gf_stword_GPU(gf_stat, err); /* output word (25 bits) (from CDFnote 5026) 4-3-2-1-0-9-8-7-6-5-4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 */ /* 1st word 24-23-22-21- 20- 19- 18-17-16-15-14-13- 12-11-10-9-8-7-6-5-4-3-2-1-0 -------- 1 - z phi */ /* phi is already formatted by the fitter (13 bits) */ if (fep_dev->fep_ncomb5h[ie][ir][ic] == 1) { z = fep_dev->fep_zid[ie][ir]; } else if (fep_dev->fep_ncomb5h[ie][ir][ic] == 5) { if (ich == 0){ z = ((fep_dev->fep_hitZ[ie][ir][ic][4]&gf_mask_GPU(GF_SUBZ_WIDTH))<<GF_SUBZ_WIDTH) + (fep_dev->fep_hitZ[ie][ir][ic][1]&gf_mask_GPU(GF_SUBZ_WIDTH)); } else if (ich == 4){ z = ((fep_dev->fep_hitZ[ie][ir][ic][3]&gf_mask_GPU(GF_SUBZ_WIDTH))<<GF_SUBZ_WIDTH) + (fep_dev->fep_hitZ[ie][ir][ic][0]&gf_mask_GPU(GF_SUBZ_WIDTH)); } else { z = ((fep_dev->fep_hitZ[ie][ir][ic][4]&gf_mask_GPU(GF_SUBZ_WIDTH))<<GF_SUBZ_WIDTH) + (fep_dev->fep_hitZ[ie][ir][ic][0]&gf_mask_GPU(GF_SUBZ_WIDTH)); } } fout_dev->fout_gfword[ie][it][0] = (fit_dev->fit_fit[ie][0][ir][ic][ich] & gf_mask_GPU(OPHI_WIDTH)) + ((z & gf_mask_GPU(GF_ZID_WIDTH)) << OPHI_WIDTH) + (0 << OBP_ERR_BIT) // we follow the word structure in http://www-cdf.fnal.gov/internal/upgrades/daq_trig/trigger/svt/BoardDocs/data_words/tracks_bits.html + (1<<(OBP_ID_BIT)); /* 2nd word 4-3-2-1-0-9-8 -7-6-5-4-3-2-1-0 -9 -8-7-6-5-4-3-2-1-0 24-23-22-21- 20- 19- 18- 17-16-15-14-13- 12-11- 10-9-8-7-6-5-4-3-2-1-0 ------------ rID sign c d 17mo bit di roadID -> 19 18mo -> 20 */ fout_dev->fout_gfword[ie][it][1] = fit_dev->fit_fit[ie][1][ir][ic][ich] + (fit_dev->fit_fit[ie][2][ir][ic][ich] << OCVR_LSB) + ((evt_dev->evt_road[ie][ir] & 0x60000) << 2); /* 3rd word 4-3-2-1-0-9-8-7 -6-5-4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 --------sector AM road id (17 LSB) */ fout_dev->fout_gfword[ie][it][2] = (evt_dev->evt_road[ie][ir] & gf_mask_GPU(OAMROAD_WIDTH)) + (( fep_dev->fep_cable_sect[ie][ir] & gf_mask_GPU(SVT_SECT_WIDTH)) << OSEC_LSB); /* 4th word 4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 -9-8-7-6-5-4-3-2-1-0 ----------x1 x0 bit 21 = bit 19 del roadID hit = 8 bassi e 2 alti */ fout_dev->fout_gfword[ie][it][3] = hit_form[0] + (hit_form[1]<<OX1_LSB) + ((evt_dev->evt_road[ie][ir] & 0x80000) << 1); /* 5th word 4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 -9-8-7-6-5-4-3-2-1-0 ----------x3 x2 bit 21 = road ID 20 */ fout_dev->fout_gfword[ie][it][4] = hit_form[2] + (hit_form[3]<<OX3_LSB) + ((evt_dev->evt_road[ie][ir] & 0x100000)); /* 6th word 4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 -9-8-7-6-5-4-3-2-1-0 ----------chisq x4 */ fout_dev->fout_gfword[ie][it][5] = hit_form[4] + ((chi2 & gf_mask_GPU(CHI2SUM_WIDTH)) << OCHI2_LSB); /* 7th word 4-3-2-1 -0-9-8-7-6-5-4-3-2-1-0-9 -8-7-6-5-4-3-2-1-0 ------0 TrackFitter status Track Number Track Num = identificativo della traccia XFT phi - 3 bit meno significativi del phi della traccia XFT */ fout_dev->fout_gfword[ie][it][6] = ((fep_dev->fep_phi[ie][ir][ic] >> SVT_TRKID_LSB) &gf_mask_GPU(SVT_TRKID_WIDTH)) + ((gf_stat & gf_mask_GPU(GF_STAT_WIDTH))<<OSTAT_LSB) + (1<<SVT_EP_BIT); for (int i=0; i<NTFWORDS; i++) atomicXor(&fout_dev->fout_parity[ie], cal_parity_GPU(fout_dev->fout_gfword[ie][it][i])); return SVTSIM_GF_OK; } __global__ void gf_fit_format_GPU (struct fep_arrays* fep_dev, struct fit_arrays* fit_dev, int maxEvt ) { int ie, ir, ic, ich; long long int temp = 0; ie = blockIdx.x; // events index ir = blockIdx.y; // roads index // combination indexes ic = threadIdx.x; ich = threadIdx.y; if ( ( ie < maxEvt ) && ( ir < fep_dev->fep_nroads[ie] ) && ( ic < fep_dev->fep_ncmb[ie][ir] ) && ( ich < fep_dev->fep_ncomb5h[ie][ir][ic] ) ) { /* phi */ temp = fit_dev->fit_fit[ie][0][ir][ic][ich]; if ( temp > 0) { temp++; temp = temp >> 1; } else { temp--; temp = -((-temp) >> 1); } if (abs(temp) > gf_mask_GPU(OPHI_WIDTH)) { fit_dev->fit_err[ie][ir][ic][ich] |= (1<<FIT_RESULT_OFLOW_BIT); } temp = (temp < 0 ? -temp : temp) & gf_mask_GPU(OPHI_WIDTH); fit_dev->fit_fit[ie][0][ir][ic][ich] = temp; /* impact parameter */ temp = fit_dev->fit_fit[ie][1][ir][ic][ich]; if ( temp > 0) { temp++; temp = temp >> 1; } else { temp--; temp = -((-temp) >> 1); } /*overflow check */ if (abs(temp) > gf_mask_GPU(OIMP_WIDTH)) { fit_dev->fit_err[ie][ir][ic][ich] |= (1<< FIT_RESULT_OFLOW_BIT); } temp = (temp < 0 ? -temp : temp) & gf_mask_GPU(OIMP_WIDTH); /* now add a bit for the sign */ if ( fit_dev->fit_fit[ie][1][ir][ic][ich] < 0) { temp += (1<<OIMP_SIGN); } fit_dev->fit_fit[ie][1][ir][ic][ich] = temp; /* curvature */ temp = fit_dev->fit_fit[ie][2][ir][ic][ich]; if (temp > 0) { temp++; temp = temp >> 1; } else { temp--; temp = -((-temp) >> 1); } /*overflow check */ if (abs(temp) > gf_mask_GPU(OCVR_WIDTH)) { fit_dev->fit_err[ie][ir][ic][ich] |= (1<<FIT_RESULT_OFLOW_BIT); } temp = (temp < 0 ? -temp : temp) & gf_mask_GPU(OCVR_WIDTH); /* now add a bit for the sign */ if (fit_dev->fit_fit[ie][2][ir][ic][ich] < 0) { temp += (1<<OCVR_SIGN); } fit_dev->fit_fit[ie][2][ir][ic][ich] = temp; } // end if } __global__ void kFit(struct fep_arrays* fep_dev, struct extra_data* edata_dev, struct fit_arrays* fit_dev, int maxEvt) { int ir, ic, ip, ih, il; int hit[SVTNHITS]; long long int coeff[NFITTER][SVTNHITS]; int coe_addr, int_addr; /* Address for coefficients and intercept */ int mka_addr; /* Address for MKADDR memory */ long long int theintcp = 0; int sign_crv = 0; int which, lwhich; int iz; int ie; int newhitmap; int map[7][7] = { { 0, 1, 2, 3, -1, 4, 5 }, /* 01235 */ { 0, 1, 2, -1, 3, 4, 5 }, /* 01245 */ { 0, 1, -1, 2, 3, 4, 5 }, /* 01345 */ { 0, -1, 1, 2, 3, 4, 5 }, /* 02345 */ { -1, 0, 1, 2, 3, 4, 5 }, /* 12345 */ { 0, 1, 2, 3, -1, 4, 5 }, /* (??) */ { 0, 1, 2, 3, -1, 4, 5 } /* (??) */ }; ie = blockIdx.x; // event index ir = blockIdx.y; // road index ic = threadIdx.x; // combination index ip = threadIdx.y; // fitter index fit_dev->fit_err_sum[ie] = fep_dev->fep_err_sum[ie]; if ( ( ie < maxEvt ) && ( ir < fep_dev->fep_nroads[ie] ) && ( ic < fep_dev->fep_ncmb[ie][ir] ) ) { if ( fep_dev->fep_hitmap[ie][ir][ic] != 0x1f ) { gf_mkaddr_GPU(edata_dev, fep_dev->fep_hitmap[ie][ir][ic], fep_dev->fep_lcl[ie][ir][ic], fep_dev->fep_zid[ie][ir], &coe_addr, &int_addr, &mka_addr, fit_dev->fit_err_sum); int_addr = (int_addr<<OFF_SUBA_LSB) + fep_dev->fep_road[ie][ir]; iz = fep_dev->fep_zid[ie][ir]&7; which = coe_addr/6; lwhich = which; which = edata_dev->whichFit[iz][which]; for (ih = 0; ih < SVTNHITS; ih++) { coeff[ip][ih] = map[lwhich][ih] < 0 ? 0 : (edata_dev->lfitparfcon[ip][map[lwhich][ih]][iz][which]); if ( ih<NSVX_PLANE ) { hit[ih] = ((fep_dev->fep_hit[ie][ir][ic][ih] << 1) + 1) & gf_mask_GPU(15); } else if (ih == HIT_PHI) { hit[ih] = fep_dev->fep_phi[ie][ir][ic]; hit[ih] -= edata_dev->wedge[ie]*SVTSIM_XFTPHIBINS/SVTSIM_NWEDGE; hit[ih] = ((hit[ih] << 3) + (1 << 2)) & gf_mask_GPU(15); } else if (ih == HIT_CRV) { sign_crv = fep_dev->fep_crv_sign[ie][ir][ic]; hit[ih] = ((fep_dev->fep_crv[ie][ir][ic] << 8) + (1 << 7)) & gf_mask_GPU(15); } } /* end for(ih = 0; ih < SVTNHITS; ih++) */ theintcp = edata_dev->lfitparfcon[ip][6][iz][which] << 18; gf_fit_proc_GPU(hit, sign_crv, coeff[ip], theintcp, &(fit_dev->fit_fit[ie][ip][ir][ic][0]), &(fit_dev->fit_err[ie][ir][ic][0])); } else { /* 5/5 track transformed in 5 4/5 tracks*/ for (ih = 0; ih < NSVX_PLANE; ih++) { for (il = 0; il < NSVX_PLANE; il++) { /* one call to gf_fit_proc for each ih value */ /* let's calculate the new hitmap */ if (il != ih) { switch (ih) { case 0 : /* 11110 */ newhitmap = 0x1e; break; case 1 : /* 11101 */ newhitmap = 0x1d; break; case 2 : /* 11011 */ newhitmap = 0x1b; break; case 3 : /* 10111 */ newhitmap = 0x17; break; case 4 : /* 01111 */ newhitmap = 0x0f; break; } gf_mkaddr_GPU(edata_dev, newhitmap, fep_dev->fep_lcl[ie][ir][ic], fep_dev->fep_zid[ie][ir], &coe_addr, &int_addr, &mka_addr, fit_dev->fit_err_sum); if (ih == 0){ iz = fep_dev->fep_hitZ[ie][ir][ic][1];; } else { iz = fep_dev->fep_zid[ie][ir]&7; } which = coe_addr/6; lwhich = which; which = edata_dev->whichFit[iz][which]; coeff[ip][il] = map[lwhich][il] < 0 ? 0 : (edata_dev->lfitparfcon[ip][map[lwhich][il]][iz][which]); hit[il] = ((fep_dev->fep_hit[ie][ir][ic][il] << 1) + 1) & gf_mask_GPU(15); } else { // il == ih hit[il] = 0 ; coeff[ip][il]= 1; } } /* end for(il = 0; il < NSVX_PLANE; il++) */ hit[HIT_PHI] = fep_dev->fep_phi[ie][ir][ic]; hit[HIT_PHI] -= edata_dev->wedge[ie]*SVTSIM_XFTPHIBINS/SVTSIM_NWEDGE; hit[HIT_PHI] = ((hit[HIT_PHI] << 3) + (1 << 2)) & gf_mask_GPU(15); coeff[ip][HIT_PHI] = map[lwhich][HIT_PHI] < 0 ? 0 : (edata_dev->lfitparfcon[ip][map[lwhich][HIT_PHI]][iz][which]); sign_crv = fep_dev->fep_crv_sign[ie][ir][ic]; hit[HIT_CRV] = ((fep_dev->fep_crv[ie][ir][ic] << 8) + (1 << 7)) & gf_mask_GPU(15); coeff[ip][HIT_CRV] = map[lwhich][HIT_CRV] < 0 ? 0 : (edata_dev->lfitparfcon[ip][map[lwhich][HIT_CRV]][iz][which]); /* INTERCEPT */ theintcp = edata_dev->lfitparfcon[ip][6][iz][which] << 18; gf_fit_proc_GPU(hit, sign_crv, coeff[ip], theintcp, &(fit_dev->fit_fit[ie][ip][ir][ic][ih]), &(fit_dev->fit_err[ie][ir][ic][ih])); fit_dev->fit_err_sum[ie] |= fit_dev->fit_err[ie][ir][ic][ih]; } /* end for(ih = 0; ih < NSVX_PLANE; ih++) */ } /* end if(tf->fep_hitmap[ie][ir][ic] != 0x1f) */ } /* enf if on indexes */ } __global__ void gf_comparator_GPU(struct fep_arrays* fep_dev, struct evt_arrays* evt_dev, struct fit_arrays* fit_dev, struct fout_arrays* fout_dev, int maxEvt) { int ie, ir, ic; int ChiSqCut, gvalue, gvalue_best; int ich = 0; int ind_best = 0; int chi2_best = 0; int gvalue_cut = 0x70; int bestTrackFound = 0; long long int chi[3], chi2; ie = blockIdx.x; ir = blockIdx.y; ic = threadIdx.x; if ( ( ie < maxEvt ) && ( ir < fep_dev->fep_nroads[ie] ) && ( ic < fep_dev->fep_ncmb[ie][ir] )) { ChiSqCut = 0x40; gvalue_best = 0x70; if (fep_dev->fep_ncomb5h[ie][ir][ic] == 1) { for (int i=0; i<NCHI; i++) chi[i] = fit_dev->fit_fit[ie][i+3][ir][ic][0]; gf_chi2_GPU(chi, &fit_dev->fit_err[ie][ir][ic][0], &chi2); if (chi2 <= ChiSqCut) { chi2 = chi2 >> 2; gvalue = gf_gfunc_GPU(fep_dev->fep_ncomb5h[ie][ir][ic], ich, fep_dev->fep_hitmap[ie][ir][ic], fep_dev->fep_lcl[ie][ir][ic], (chi2 & gf_mask_GPU(CHI2SUM_WIDTH))); if (gvalue < gvalue_cut) gf_formatter_GPU(ie, ir, ic, 0, chi2, fep_dev, fit_dev, evt_dev, fout_dev); } } else if (fep_dev->fep_ncomb5h[ie][ir][ic] == 5) { bestTrackFound = 0; gvalue_best = 999; ind_best = 999; chi2_best = 999; for (ich = 0; ich < fep_dev->fep_ncomb5h[ie][ir][ic]; ich++) { for (int i=0; i<NCHI; i++) chi[i] = fit_dev->fit_fit[ie][i+3][ir][ic][ich]; /* calculate chisq */ gf_chi2_GPU(chi, &fit_dev->fit_err[ie][ir][ic][ich], &chi2); /* check chiSq */ if (chi2 <= ChiSqCut) { chi2 = chi2 >> 2; /* FC - hack .. see matching shift in gf_chi2 */ gvalue = gf_gfunc_GPU(fep_dev->fep_ncomb5h[ie][ir][ic], ich, fep_dev->fep_hitmap[ie][ir][ic], fep_dev->fep_lcl[ie][ir][ic], (chi2 & gf_mask_GPU(CHI2SUM_WIDTH))); if ((gvalue < gvalue_cut) && (gvalue < gvalue_best)) { gvalue_best = gvalue; ind_best = ich; chi2_best = chi2; bestTrackFound = 1; } } /* end if(chi2 <= ChiSqCut) */ } /* end for(ich = 0; ich < gf->fep->ncomb5h[ir][ic]; ich++) */ if (bestTrackFound) gf_formatter_GPU(ie, ir, ic, ind_best, chi2_best, fep_dev, fit_dev, evt_dev, fout_dev); } /* end if(gf->fep->ncomb5h[ir][ic] == 1) */ } /* end if on indexes */ } __global__ void gf_compute_eeword_GPU( struct fep_arrays* fep_dev, struct fit_arrays* fit_dev, struct fout_arrays* fout_dev, int maxEvt) { int eoe_err; int ie = blockIdx.x * blockDim.x + threadIdx.x; if ( ie < maxEvt ) { fout_dev->fout_err_sum[ie] = (fep_dev->fep_err_sum[ie] | fit_dev->fit_err_sum[ie]); gf_formatter_err_GPU(fout_dev->fout_err_sum[ie], GF_ERRMASK_CDF, GF_ERRMASK_SVT, GF_ERRMASK_EOE, &eoe_err, &fout_dev->fout_cdferr[ie], &fout_dev->fout_svterr[ie]); fout_dev->fout_ee_word[ie] = (fep_dev->fep_ee_word[ie] & (gf_mask_GPU(SVT_WORD_WIDTH) & ~(1<<SVT_PAR_BIT))); fout_dev->fout_ee_word[ie] |= (eoe_err<<SVT_ERR_LSB); fout_dev->fout_ee_word[ie] |= (fout_dev->fout_parity[ie]<<SVT_PAR_BIT); } } __global__ void gf_setout(struct fout_arrays* fout_dev, int maxEvt, unsigned int* cable_dev, int* ndata_dev) { *ndata_dev = 0; for (int ie=0; ie < maxEvt; ie++) { for (int nt=0; nt < fout_dev->fout_ntrks[ie]; nt++) { memcpy(cable_dev + *ndata_dev, fout_dev->fout_gfword[ie][nt], NTFWORDS*sizeof(unsigned int)); (*ndata_dev) += NTFWORDS; } memcpy(cable_dev + *ndata_dev, &fout_dev->fout_ee_word[ie], sizeof(unsigned int)); (*ndata_dev)++; } } void gf_fit_GPU(struct fep_arrays* fep_dev, struct evt_arrays* evt_dev, struct extra_data* edata_dev, struct fit_arrays* fit_dev, struct fout_arrays* fout_dev, int maxEvt, unsigned int* cable_dev, int* ndata_dev) { dim3 blocks(NEVTS,MAXROAD); hipLaunchKernelGGL(( kFit), dim3(blocks), dim3(dim3(MAXCOMB,NFITTER)), 0, 0, fep_dev, edata_dev, fit_dev, maxEvt); hipLaunchKernelGGL(( gf_fit_format_GPU), dim3(blocks), dim3(dim3(MAXCOMB, MAXCOMB5H)), 0, 0, fep_dev, fit_dev, maxEvt); hipLaunchKernelGGL(( gf_comparator_GPU), dim3(blocks), dim3(dim3(MAXCOMB)), 0, 0, fep_dev, evt_dev, fit_dev, fout_dev, maxEvt); hipLaunchKernelGGL(( gf_compute_eeword_GPU), dim3((NEVTS+255)/256), dim3(256), 0, 0, fep_dev, fit_dev, fout_dev, maxEvt); hipLaunchKernelGGL(( gf_setout), dim3(1),dim3(1), 0, 0, fout_dev, maxEvt, cable_dev, ndata_dev); }
23ff134cf05fc96cb882686236409a176d53ff5c.cu
#include "svt_utils.h" __device__ int svtsim_whichFit_full_GPU(int layerMask, int lcMask) { switch (layerMask & 0x1f) { case 0x0f: /* 0123 */ return 0; case 0x17: /* 0124 */ return 1; case 0x1b: /* 0134 */ return 2; case 0x1d: /* 0234 */ return 3; case 0x1e: /* 1234 */ return 4; case 0x1f: /* 01234 - this is the fun one to be careful with */ if(lcMask == 0) return 2; /* use 0134 if we have no LC */ else if (lcMask == 0x1) return 4; else if (lcMask == 0x2) return 3; else if (lcMask == 0x3) return 3; else if (lcMask == 0x4) return 2; else if (lcMask == 0x5) return 2; else if (lcMask == 0x6) return 2; else if (lcMask == 0x7) return 2; else if (lcMask == 0x8) return 1; else if (lcMask == 0x9) return 1; else if (lcMask == 0xa) return 1; else if (lcMask == 0xb) return 1; else if (lcMask == 0xc) return 2; else if (lcMask == 0xd) return 2; else if (lcMask == 0xe) return 2; else if (lcMask == 0xf) return 2; else /* If we have LC on outer layer just use 0123 */ return 0; default: return 0; } } __device__ int svtsim_whichFit_GPU(struct extra_data* edata_dev, int zin, int layerMask, int lcMask) { int which0 = 0, which = 0; if (zin<0 || zin>=SVTSIM_NBAR) zin = 0; which0 = svtsim_whichFit_full_GPU(layerMask, lcMask); which = edata_dev->whichFit[zin][which0]; return which; } __device__ int svtsim_get_gfMkAddr_GPU(struct extra_data* edata_dev, int *d, int nd, int d0) { /* d0 = iaddr */ int j; int md = 0x4000; int iz, lcl, hit; if (d0+nd>md) nd = md-d0; for (j = 0; j<nd; j++) { int i = j+d0; int word = 0xffff, intcp = 0, coeff = 0; int which; iz = i&7, lcl = i>>3 & 0x1f, hit = i>>8 & 0x3f; which = svtsim_whichFit_GPU(edata_dev, iz, hit, lcl); coeff = iz + which*6; /* poor choice for illegal iz=6,7, but compatible */ intcp = which; word = coeff<<3 | intcp; d[j] = word; } return nd; } __device__ int gf_mkaddr_GPU(struct extra_data* edata_dev, int hitmap, int lclmap, int zmap, int *coe_addr, int *int_addr, int *addr, int *err) { int iaddr; unsigned int datum = 0; if ((hitmap<0) || (hitmap > gf_mask_GPU( NSVX_PLANE + 1 )) || /* + XFT_LYR */ (lclmap<0) || (lclmap > gf_mask_GPU( NSVX_PLANE )) || (zmap<0) || (zmap > gf_mask_GPU( GF_ZID_WIDTH ))) *err |= ( 1 << SVTSIM_GF_MKADDR_INVALID ); iaddr = ((zmap & gf_mask_GPU(GF_SUBZ_WIDTH)) + (lclmap<<MADDR_NCLS_LSB) + (hitmap<<MADDR_HITM_LSB)); #define MAXMKA 8192 if ((iaddr < 0) || (iaddr >= MAXMKA)) return SVTSIM_GF_ERR; int ldat = 0; svtsim_get_gfMkAddr_GPU(edata_dev, &ldat, 1, iaddr); datum = ldat; *int_addr = datum & gf_mask_GPU(OFF_SUBA_WIDTH); *coe_addr = (datum >> OFF_SUBA_WIDTH) & gf_mask_GPU(PAR_ADDR_WIDTH); *addr = iaddr; return SVTSIM_GF_OK; } __device__ int gf_fit_proc_GPU(int hit[], int sign_crv, long long int coeff[], long long int intcp, long long int *result, int *err) { long long int temp = 0; int i = 0; *result = 0; *err = 0; for (i = 0; i < SVTNHITS; i++) { if (i < NSVX_PLANE) { temp += hit[i] * coeff[i]; } else if (i == HIT_PHI) { /* XFT phi */ hit[i] = (hit[i]&0x400) ? -((~hit[i]&0x3ff)+1) : (hit[i]&0x3ff); temp += hit[i] * coeff[i]; } else if (i == HIT_CRV) { /* XFT curvature (curv already with sign in fep ) */ if (sign_crv == 1) { /* if negative bit is set */ temp -= hit[i] * coeff[i]; } else { temp += hit[i] * coeff[i]; } } } *result = *result + temp + intcp; *result = *result<0 ? -((-*result)>>17) : *result>>17; if (*result > 0) *result &= gf_mask3_GPU(FIT_DWIDTH); else *result = -(abs(*result)&gf_mask3_GPU(FIT_DWIDTH)); return SVTSIM_GF_OK; } __device__ int gf_chi2_GPU(long long int chi[], int* trk_err, long long int *chi2) { long long int temp = 0; long long int chi2memdata = 0; *chi2 = 0; for (int i=0; i<NCHI; i++) { temp = abs(chi[i]); if (chi[i] < 0) temp++; chi2memdata = temp*temp; *chi2 += chi2memdata; } *chi2 = (*chi2 >> 2); if ((*chi2 >> 2) > gf_mask_GPU(CHI_DWIDTH)) { *chi2 = 0x7ff; *trk_err |= (1 << OFLOW_CHI_BIT); } return SVTSIM_GF_OK; } __device__ int gf_getq_GPU(int lyr_config) { int q = 0; switch (lyr_config) { case 0x01e : /* lcmap = 00000, hitmap = 11110 */ q = 3; break; case 0x01d : /* lcmap = 00000, hitmap = 11101 */ q = 2; break; case 0x01b : /* lcmap = 00000, hitmap = 11011 */ q = 1; break; case 0x017 : /* lcmap = 00000, hitmap = 10111 */ q = 2; break; case 0x00f : /* lcmap = 00000, hitmap = 01111 */ q = 2; break; case 0x03e : /* lcmap = 00001, hitmap = 11110 */ q = 2; break; case 0x03d : /* lcmap = 00001, hitmap = 11101 */ q = 1; break; case 0x03b : /* lcmap = 00001, hitmap = 11011 */ q = 1; break; case 0x037 : /* lcmap = 00001, hitmap = 10111 */ q = 1; break; case 0x02f : /* lcmap = 00001, hitmap = 01111 */ q = 1; break; case 0x05e : /* lcmap = 00010, hitmap = 11110 */ q = 7; break; case 0x05d : /* lcmap = 00010, hitmap = 11101 */ q = 1; break; case 0x05b : /* lcmap = 00010, hitmap = 11011 */ q = 2; break; case 0x057 : /* lcmap = 00010, hitmap = 10111 */ q = 2; break; case 0x04f : /* lcmap = 00010, hitmap = 01111 */ q = 2; break; case 0x09e : /* lcmap = 00100, hitmap = 11110 */ q = 7; break; case 0x09d : /* lcmap = 00100, hitmap = 11101 */ q = 2; break; case 0x09b : /* lcmap = 00100, hitmap = 11011 */ q = 1; break; case 0x097 : /* lcmap = 00100, hitmap = 10111 */ q = 2; break; case 0x08f : /* lcmap = 00100, hitmap = 01111 */ q = 3; break; case 0x11e : /* lcmap = 01000, hitmap = 11110 */ q = 7; break; case 0x11d : /* lcmap = 01000, hitmap = 11101 */ q = 2; break; case 0x11b : /* lcmap = 01000, hitmap = 11011 */ q = 2; break; case 0x117 : /* lcmap = 01000, hitmap = 10111 */ q = 1; break; case 0x10f : /* lcmap = 01000, hitmap = 01111 */ q = 3; break; case 0x21e : /* lcmap = 10000, hitmap = 11110 */ q = 7; break; case 0x21d : /* lcmap = 10000, hitmap = 11101 */ q = 2; break; case 0x21b : /* lcmap = 10000, hitmap = 11011 */ q = 2; break; case 0x217 : /* lcmap = 10000, hitmap = 10111 */ q = 2; break; case 0x20f : /* lcmap = 10000, hitmap = 01111 */ q = 1; break; case 0x0de : /* lcmap = 00110, hitmap = 11110 */ q = 7; break; case 0x0dd : /* lcmap = 00110, hitmap = 11101 */ q = 1; break; case 0x0db : /* lcmap = 00110, hitmap = 11011 */ q = 2; break; case 0x0d7 : /* lcmap = 00110, hitmap = 10111 */ q = 3; break; case 0x0cf : /* lcmap = 00110, hitmap = 01111 */ q = 4; break; case 0x19e : /* lcmap = 01100, hitmap = 11110 */ q = 7; break; case 0x19d : /* lcmap = 01100, hitmap = 11101 */ q = 2; break; case 0x19b : /* lcmap = 01100, hitmap = 11011 */ q = 1; break; case 0x197 : /* lcmap = 01100, hitmap = 10111 */ q = 1; break; case 0x18f : /* lcmap = 01100, hitmap = 01111 */ q = 3; break; case 0x31e : /* lcmap = 11000, hitmap = 11110 */ q = 7; break; case 0x31d : /* lcmap = 11000, hitmap = 11101 */ q = 3; break; case 0x31b : /* lcmap = 11000, hitmap = 11011 */ q = 3; break; case 0x317 : /* lcmap = 11000, hitmap = 10111 */ q = 1; break; case 0x30f : /* lcmap = 11000, hitmap = 01111 */ q = 2; break; case 0x15e : /* lcmap = 01010, hitmap = 11110 */ q = 7; break; case 0x15d : /* lcmap = 01010, hitmap = 11101 */ q = 1; break; case 0x15b : /* lcmap = 01010, hitmap = 11011 */ q = 3; q = 3; break; case 0x157 : /* lcmap = 01010, hitmap = 10111 */ q = 2; break; case 0x14f : /* lcmap = 01010, hitmap = 01111 */ q = 4; break; case 0x25e : /* lcmap = 10010, hitmap = 11110 */ q = 7; break; case 0x25d : /* lcmap = 10010, hitmap = 11101 */ q = 1; break; case 0x25b : /* lcmap = 10010, hitmap = 11011 */ q = 2; break; case 0x257 : /* lcmap = 10010, hitmap = 10111 */ q = 2; break; case 0x24f : /* lcmap = 10010, hitmap = 01111 */ q = 1; break; case 0x29e : /* lcmap = 10100, hitmap = 11110 */ q = 7; break; case 0x29d : /* lcmap = 10100, hitmap = 11101 */ q = 2; break; case 0x29b : /* lcmap = 10100, hitmap = 11011 */ q = 1; break; case 0x297 : /* lcmap = 10100, hitmap = 10111 */ q = 2; break; case 0x28f : /* lcmap = 10100, hitmap = 01111 */ q = 1; break; default: q = 7; break; } return q; } __device__ int gf_gfunc_GPU(int ncomb5h, int icomb5h, int hitmap, int lcmap, int chi2) { int lyr_config; int gvalue; int newhitmap; int newlcmap; int q = 0; if (ncomb5h == 1) { newhitmap = hitmap; newlcmap = lcmap; } else if (ncomb5h == 5) { switch (icomb5h) { case 0 : /* 11110 */ newhitmap = 0x1e; newlcmap = (lcmap & 0x1e); break; case 1 : /* 11101 */ newhitmap = 0x1d; newlcmap = lcmap & 0x1d; break; case 2 : /* 11011 */ newhitmap = 0x1b; newlcmap = lcmap & 0x1b; break; case 3 : /* 10111 */ newhitmap = 0x17; newlcmap = lcmap & 0x17; break; case 4 : /* 01111 */ newhitmap = 0x0f; newlcmap = lcmap & 0x0f; break; } } lyr_config = newhitmap + (newlcmap << 5); q = gf_getq_GPU(lyr_config); gvalue = (q << 4) + ((chi2 & 0x3ff) >> 6); return gvalue; } __device__ int gf_stword_GPU(int id, int err) { /* Compose the GF status word in the 7th word from the GF INPUT : err; error summary OUTPUT : return the gf_stword NOTE: Currently this code does not support the parity error and FIFO error. */ int word; word = id; if ((err>>OFLOW_HIT_BIT)&gf_mask_GPU(1)) word |= (1<<GFS_OFL_HIT); if ((err>>OFLOW_CHI_BIT)&gf_mask_GPU(1)) word |= (1<<GFS_OFL_CHI); if (((err>>UFLOW_HIT_BIT)&gf_mask_GPU(1)) || ((err>>OUTORDER_BIT)&gf_mask_GPU(1))) word |= (1<<GFS_INV_DATA); return word; } __device__ int cal_parity_GPU(int word) { int par = 0; for (int i=0; i<SVT_WORD_WIDTH; i++) par ^= ((word>>i) & gf_mask_GPU(1)); return par; } __device__ int gf_formatter_err_GPU(int err, int cdfmsk, int svtmsk, int eoemsk, int *eoe, int *cdf, int *svt) { /* Simulate the board error conditions (CDF-ERR, SVT-ERR and EOE-ERR) INPUT: err; error summary. cdfmsk; Mask for the CDF-ERR. svtmsk; Mask for the SVT-ERR. eoemsk; Mask for the EOE-ERR. OUTPUT: *eoe; EOE error *cdf; CDF error *svt; SVT error */ /* --------- Executable starts here ------------ */ *cdf = 0; /* never turned ON except for the FIFO overflow */ *svt = 0; *eoe = 0; for (int i=0; i<= FIT_RESULT_OFLOW_BIT; i++) { if ((err>>i)&gf_mask_GPU(1)) { if (((svtmsk>>i)&gf_mask_GPU(1)) == 0) *svt = 1; if (i == 0) { if (((eoemsk >> PARITY_ERR_BIT) & gf_mask_GPU(1)) == 0) { *eoe |= (1<<PARITY_ERR_BIT); } } else if ((i==2) || (i==3)) { if (((eoemsk>>INV_DATA_BIT)&gf_mask_GPU(1)) == 0) { *eoe |= (1<<INV_DATA_BIT); } } else { if (((eoemsk>>INT_OFLOW_BIT)&gf_mask_GPU(1)) == 0) { *eoe |= (1<<INT_OFLOW_BIT); } } } /* if ((err>>i)&gf_mask_GPU(1)) */ } /* for (i=0; i<= FIT_RESULT_OFLOW_BIT; i++) */ return SVTSIM_GF_OK; } __device__ int gf_formatter_GPU(int ie, int ir, int ic, int ich, int chi2, struct fep_arrays* fep_dev, struct fit_arrays* fit_dev, struct evt_arrays* evt_dev, struct fout_arrays* fout_dev) { int it, err; int hit_form[NSVX_PLANE]; int z = 0; /* z should be 6 bits large */ int gf_stat = 0; // atomicAdd returns the old value it = atomicAdd(&fout_dev->fout_ntrks[ie], 1); err = (fep_dev->fep_err[ie][ir] | fit_dev->fit_err[ie][ir][ic][ich]); for (int i=0; i<NSVX_PLANE; i++) { /* Hit coordinate */ if (fep_dev->fep_ncomb5h[ie][ir][ic] == 5) { if (i != ich) { hit_form[i] = fep_dev->fep_hit[ie][ir][ic][i]&gf_mask_GPU(GF_HIT_WIDTH); /* Long Cluster bit */ hit_form[i] += (((fep_dev->fep_hit[ie][ir][ic][i] & 0x4000) ? 1 : 0) << GF_HIT_WIDTH); /* Hit existence bit */ hit_form[i] += (((fep_dev->fep_hitmap[ie][ir][ic]>>i)&gf_mask_GPU(1))<<(GF_HIT_WIDTH+1)); hit_form[i] = (hit_form[i]&gf_mask_GPU(GF_HIT_WIDTH+2)); } else hit_form[i] = 0; } else { hit_form[i] = fep_dev->fep_hit[ie][ir][ic][i]&gf_mask_GPU(GF_HIT_WIDTH); /* Long Cluster bit */ hit_form[i] += (((fep_dev->fep_hit[ie][ir][ic][i] & 0x4000) ? 1 : 0) << GF_HIT_WIDTH); /* Hit existence bit */ hit_form[i] += (((fep_dev->fep_hitmap[ie][ir][ic]>>i)&gf_mask_GPU(1))<<(GF_HIT_WIDTH+1)); hit_form[i] = (hit_form[i]&gf_mask_GPU(GF_HIT_WIDTH+2)); } } if (1) { int presentmask; int newhitmap; if (fep_dev->fep_ncomb5h[ie][ir][ic] == 1) { presentmask = fep_dev->fep_hitmap[ie][ir][ic]; } else if (fep_dev->fep_ncomb5h[ie][ir][ic] == 5) { switch (ich) { case 0 : /* 11110 */ newhitmap = 0x1e; break; case 1 : /* 11101 */ newhitmap = 0x1d; break; case 2 : /* 11011 */ newhitmap = 0x1b; break; case 3 : /* 10111 */ newhitmap = 0x17; break; case 4 : /* 01111 */ newhitmap = 0x0f; break; } presentmask = newhitmap; } { int longmask = presentmask & fep_dev->fep_lcl[ie][ir][ic]; int goodmask = presentmask & ~longmask; int badmask = 0x1f & ~goodmask; int badmap[] = { 0x0, /* 00000: all layers good */ 0x5, /* 10000: layer 0 bad */ 0x4, /* 01000: layer 1 bad */ 0xe, /* 11000: layers 0,1 bad (changed from f to e) */ 0x3, /* 00100: layer 2 bad */ 0xe, /* 10100: layers 0,2 bad */ 0xb, /* 01100: layers 1,2 bad */ 0xf, /* 11100: >2 layers bad */ 0x2, /* 00010: layer 3 bad */ 0xd, /* 10010: layers 0,3 bad */ 0xa, /* 01010: layers 1,3 bad */ 0xf, /* 11010: >2 layers bad */ 0x8, /* 00110: layers 2,3 bad */ 0xf, /* 10110: >2 layers bad */ 0xf, /* 01110: >2 layers bad */ 0xf, /* 11110: >2 layers bad */ 0x1, /* 00001: layer 4 bad */ 0xc, /* 10001: layers 0,4 bad */ 0x8, /* 01001: layers 1,4 bad (oops: doc says 0x9 not 0x8) */ 0xf, /* 11001: >2 layers bad */ 0x7, /* 00101: layers 2,4 bad */ 0xf, /* 10101: >2 layers bad */ 0xf, /* 01101: >2 layers bad */ 0xf, /* 11101: >2 layers bad */ 0x6, /* 00011: layers 3,4 bad */ 0xf, /* 10011: >2 layers bad */ 0xf, /* 01011: >2 layers bad */ 0xf, /* 11011: >2 layers bad */ 0xf, /* 00111: >2 layers bad */ 0xf, /* 10111: >2 layers bad */ 0xf, /* 01111: >2 layers bad */ 0xf /* 11111: all layers bad! */ }; gf_stat = badmap[badmask]; } } gf_stat = gf_stword_GPU(gf_stat, err); /* output word (25 bits) (from CDFnote 5026) 4-3-2-1-0-9-8-7-6-5-4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 */ /* 1st word 24-23-22-21- 20- 19- 18-17-16-15-14-13- 12-11-10-9-8-7-6-5-4-3-2-1-0 -------- 1 - z phi */ /* phi is already formatted by the fitter (13 bits) */ if (fep_dev->fep_ncomb5h[ie][ir][ic] == 1) { z = fep_dev->fep_zid[ie][ir]; } else if (fep_dev->fep_ncomb5h[ie][ir][ic] == 5) { if (ich == 0){ z = ((fep_dev->fep_hitZ[ie][ir][ic][4]&gf_mask_GPU(GF_SUBZ_WIDTH))<<GF_SUBZ_WIDTH) + (fep_dev->fep_hitZ[ie][ir][ic][1]&gf_mask_GPU(GF_SUBZ_WIDTH)); } else if (ich == 4){ z = ((fep_dev->fep_hitZ[ie][ir][ic][3]&gf_mask_GPU(GF_SUBZ_WIDTH))<<GF_SUBZ_WIDTH) + (fep_dev->fep_hitZ[ie][ir][ic][0]&gf_mask_GPU(GF_SUBZ_WIDTH)); } else { z = ((fep_dev->fep_hitZ[ie][ir][ic][4]&gf_mask_GPU(GF_SUBZ_WIDTH))<<GF_SUBZ_WIDTH) + (fep_dev->fep_hitZ[ie][ir][ic][0]&gf_mask_GPU(GF_SUBZ_WIDTH)); } } fout_dev->fout_gfword[ie][it][0] = (fit_dev->fit_fit[ie][0][ir][ic][ich] & gf_mask_GPU(OPHI_WIDTH)) + ((z & gf_mask_GPU(GF_ZID_WIDTH)) << OPHI_WIDTH) + (0 << OBP_ERR_BIT) // we follow the word structure in http://www-cdf.fnal.gov/internal/upgrades/daq_trig/trigger/svt/BoardDocs/data_words/tracks_bits.html + (1<<(OBP_ID_BIT)); /* 2nd word 4-3-2-1-0-9-8 -7-6-5-4-3-2-1-0 -9 -8-7-6-5-4-3-2-1-0 24-23-22-21- 20- 19- 18- 17-16-15-14-13- 12-11- 10-9-8-7-6-5-4-3-2-1-0 ------------ rID sign c d 17mo bit di roadID -> 19 18mo -> 20 */ fout_dev->fout_gfword[ie][it][1] = fit_dev->fit_fit[ie][1][ir][ic][ich] + (fit_dev->fit_fit[ie][2][ir][ic][ich] << OCVR_LSB) + ((evt_dev->evt_road[ie][ir] & 0x60000) << 2); /* 3rd word 4-3-2-1-0-9-8-7 -6-5-4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 --------sector AM road id (17 LSB) */ fout_dev->fout_gfword[ie][it][2] = (evt_dev->evt_road[ie][ir] & gf_mask_GPU(OAMROAD_WIDTH)) + (( fep_dev->fep_cable_sect[ie][ir] & gf_mask_GPU(SVT_SECT_WIDTH)) << OSEC_LSB); /* 4th word 4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 -9-8-7-6-5-4-3-2-1-0 ----------x1 x0 bit 21 = bit 19 del roadID hit = 8 bassi e 2 alti */ fout_dev->fout_gfword[ie][it][3] = hit_form[0] + (hit_form[1]<<OX1_LSB) + ((evt_dev->evt_road[ie][ir] & 0x80000) << 1); /* 5th word 4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 -9-8-7-6-5-4-3-2-1-0 ----------x3 x2 bit 21 = road ID 20 */ fout_dev->fout_gfword[ie][it][4] = hit_form[2] + (hit_form[3]<<OX3_LSB) + ((evt_dev->evt_road[ie][ir] & 0x100000)); /* 6th word 4-3-2-1-0-9-8-7-6-5-4-3-2-1-0 -9-8-7-6-5-4-3-2-1-0 ----------chisq x4 */ fout_dev->fout_gfword[ie][it][5] = hit_form[4] + ((chi2 & gf_mask_GPU(CHI2SUM_WIDTH)) << OCHI2_LSB); /* 7th word 4-3-2-1 -0-9-8-7-6-5-4-3-2-1-0-9 -8-7-6-5-4-3-2-1-0 ------0 TrackFitter status Track Number Track Num = identificativo della traccia XFT phi - 3 bit meno significativi del phi della traccia XFT */ fout_dev->fout_gfword[ie][it][6] = ((fep_dev->fep_phi[ie][ir][ic] >> SVT_TRKID_LSB) &gf_mask_GPU(SVT_TRKID_WIDTH)) + ((gf_stat & gf_mask_GPU(GF_STAT_WIDTH))<<OSTAT_LSB) + (1<<SVT_EP_BIT); for (int i=0; i<NTFWORDS; i++) atomicXor(&fout_dev->fout_parity[ie], cal_parity_GPU(fout_dev->fout_gfword[ie][it][i])); return SVTSIM_GF_OK; } __global__ void gf_fit_format_GPU (struct fep_arrays* fep_dev, struct fit_arrays* fit_dev, int maxEvt ) { int ie, ir, ic, ich; long long int temp = 0; ie = blockIdx.x; // events index ir = blockIdx.y; // roads index // combination indexes ic = threadIdx.x; ich = threadIdx.y; if ( ( ie < maxEvt ) && ( ir < fep_dev->fep_nroads[ie] ) && ( ic < fep_dev->fep_ncmb[ie][ir] ) && ( ich < fep_dev->fep_ncomb5h[ie][ir][ic] ) ) { /* phi */ temp = fit_dev->fit_fit[ie][0][ir][ic][ich]; if ( temp > 0) { temp++; temp = temp >> 1; } else { temp--; temp = -((-temp) >> 1); } if (abs(temp) > gf_mask_GPU(OPHI_WIDTH)) { fit_dev->fit_err[ie][ir][ic][ich] |= (1<<FIT_RESULT_OFLOW_BIT); } temp = (temp < 0 ? -temp : temp) & gf_mask_GPU(OPHI_WIDTH); fit_dev->fit_fit[ie][0][ir][ic][ich] = temp; /* impact parameter */ temp = fit_dev->fit_fit[ie][1][ir][ic][ich]; if ( temp > 0) { temp++; temp = temp >> 1; } else { temp--; temp = -((-temp) >> 1); } /*overflow check */ if (abs(temp) > gf_mask_GPU(OIMP_WIDTH)) { fit_dev->fit_err[ie][ir][ic][ich] |= (1<< FIT_RESULT_OFLOW_BIT); } temp = (temp < 0 ? -temp : temp) & gf_mask_GPU(OIMP_WIDTH); /* now add a bit for the sign */ if ( fit_dev->fit_fit[ie][1][ir][ic][ich] < 0) { temp += (1<<OIMP_SIGN); } fit_dev->fit_fit[ie][1][ir][ic][ich] = temp; /* curvature */ temp = fit_dev->fit_fit[ie][2][ir][ic][ich]; if (temp > 0) { temp++; temp = temp >> 1; } else { temp--; temp = -((-temp) >> 1); } /*overflow check */ if (abs(temp) > gf_mask_GPU(OCVR_WIDTH)) { fit_dev->fit_err[ie][ir][ic][ich] |= (1<<FIT_RESULT_OFLOW_BIT); } temp = (temp < 0 ? -temp : temp) & gf_mask_GPU(OCVR_WIDTH); /* now add a bit for the sign */ if (fit_dev->fit_fit[ie][2][ir][ic][ich] < 0) { temp += (1<<OCVR_SIGN); } fit_dev->fit_fit[ie][2][ir][ic][ich] = temp; } // end if } __global__ void kFit(struct fep_arrays* fep_dev, struct extra_data* edata_dev, struct fit_arrays* fit_dev, int maxEvt) { int ir, ic, ip, ih, il; int hit[SVTNHITS]; long long int coeff[NFITTER][SVTNHITS]; int coe_addr, int_addr; /* Address for coefficients and intercept */ int mka_addr; /* Address for MKADDR memory */ long long int theintcp = 0; int sign_crv = 0; int which, lwhich; int iz; int ie; int newhitmap; int map[7][7] = { { 0, 1, 2, 3, -1, 4, 5 }, /* 01235 */ { 0, 1, 2, -1, 3, 4, 5 }, /* 01245 */ { 0, 1, -1, 2, 3, 4, 5 }, /* 01345 */ { 0, -1, 1, 2, 3, 4, 5 }, /* 02345 */ { -1, 0, 1, 2, 3, 4, 5 }, /* 12345 */ { 0, 1, 2, 3, -1, 4, 5 }, /* (??) */ { 0, 1, 2, 3, -1, 4, 5 } /* (??) */ }; ie = blockIdx.x; // event index ir = blockIdx.y; // road index ic = threadIdx.x; // combination index ip = threadIdx.y; // fitter index fit_dev->fit_err_sum[ie] = fep_dev->fep_err_sum[ie]; if ( ( ie < maxEvt ) && ( ir < fep_dev->fep_nroads[ie] ) && ( ic < fep_dev->fep_ncmb[ie][ir] ) ) { if ( fep_dev->fep_hitmap[ie][ir][ic] != 0x1f ) { gf_mkaddr_GPU(edata_dev, fep_dev->fep_hitmap[ie][ir][ic], fep_dev->fep_lcl[ie][ir][ic], fep_dev->fep_zid[ie][ir], &coe_addr, &int_addr, &mka_addr, fit_dev->fit_err_sum); int_addr = (int_addr<<OFF_SUBA_LSB) + fep_dev->fep_road[ie][ir]; iz = fep_dev->fep_zid[ie][ir]&7; which = coe_addr/6; lwhich = which; which = edata_dev->whichFit[iz][which]; for (ih = 0; ih < SVTNHITS; ih++) { coeff[ip][ih] = map[lwhich][ih] < 0 ? 0 : (edata_dev->lfitparfcon[ip][map[lwhich][ih]][iz][which]); if ( ih<NSVX_PLANE ) { hit[ih] = ((fep_dev->fep_hit[ie][ir][ic][ih] << 1) + 1) & gf_mask_GPU(15); } else if (ih == HIT_PHI) { hit[ih] = fep_dev->fep_phi[ie][ir][ic]; hit[ih] -= edata_dev->wedge[ie]*SVTSIM_XFTPHIBINS/SVTSIM_NWEDGE; hit[ih] = ((hit[ih] << 3) + (1 << 2)) & gf_mask_GPU(15); } else if (ih == HIT_CRV) { sign_crv = fep_dev->fep_crv_sign[ie][ir][ic]; hit[ih] = ((fep_dev->fep_crv[ie][ir][ic] << 8) + (1 << 7)) & gf_mask_GPU(15); } } /* end for(ih = 0; ih < SVTNHITS; ih++) */ theintcp = edata_dev->lfitparfcon[ip][6][iz][which] << 18; gf_fit_proc_GPU(hit, sign_crv, coeff[ip], theintcp, &(fit_dev->fit_fit[ie][ip][ir][ic][0]), &(fit_dev->fit_err[ie][ir][ic][0])); } else { /* 5/5 track transformed in 5 4/5 tracks*/ for (ih = 0; ih < NSVX_PLANE; ih++) { for (il = 0; il < NSVX_PLANE; il++) { /* one call to gf_fit_proc for each ih value */ /* let's calculate the new hitmap */ if (il != ih) { switch (ih) { case 0 : /* 11110 */ newhitmap = 0x1e; break; case 1 : /* 11101 */ newhitmap = 0x1d; break; case 2 : /* 11011 */ newhitmap = 0x1b; break; case 3 : /* 10111 */ newhitmap = 0x17; break; case 4 : /* 01111 */ newhitmap = 0x0f; break; } gf_mkaddr_GPU(edata_dev, newhitmap, fep_dev->fep_lcl[ie][ir][ic], fep_dev->fep_zid[ie][ir], &coe_addr, &int_addr, &mka_addr, fit_dev->fit_err_sum); if (ih == 0){ iz = fep_dev->fep_hitZ[ie][ir][ic][1];; } else { iz = fep_dev->fep_zid[ie][ir]&7; } which = coe_addr/6; lwhich = which; which = edata_dev->whichFit[iz][which]; coeff[ip][il] = map[lwhich][il] < 0 ? 0 : (edata_dev->lfitparfcon[ip][map[lwhich][il]][iz][which]); hit[il] = ((fep_dev->fep_hit[ie][ir][ic][il] << 1) + 1) & gf_mask_GPU(15); } else { // il == ih hit[il] = 0 ; coeff[ip][il]= 1; } } /* end for(il = 0; il < NSVX_PLANE; il++) */ hit[HIT_PHI] = fep_dev->fep_phi[ie][ir][ic]; hit[HIT_PHI] -= edata_dev->wedge[ie]*SVTSIM_XFTPHIBINS/SVTSIM_NWEDGE; hit[HIT_PHI] = ((hit[HIT_PHI] << 3) + (1 << 2)) & gf_mask_GPU(15); coeff[ip][HIT_PHI] = map[lwhich][HIT_PHI] < 0 ? 0 : (edata_dev->lfitparfcon[ip][map[lwhich][HIT_PHI]][iz][which]); sign_crv = fep_dev->fep_crv_sign[ie][ir][ic]; hit[HIT_CRV] = ((fep_dev->fep_crv[ie][ir][ic] << 8) + (1 << 7)) & gf_mask_GPU(15); coeff[ip][HIT_CRV] = map[lwhich][HIT_CRV] < 0 ? 0 : (edata_dev->lfitparfcon[ip][map[lwhich][HIT_CRV]][iz][which]); /* INTERCEPT */ theintcp = edata_dev->lfitparfcon[ip][6][iz][which] << 18; gf_fit_proc_GPU(hit, sign_crv, coeff[ip], theintcp, &(fit_dev->fit_fit[ie][ip][ir][ic][ih]), &(fit_dev->fit_err[ie][ir][ic][ih])); fit_dev->fit_err_sum[ie] |= fit_dev->fit_err[ie][ir][ic][ih]; } /* end for(ih = 0; ih < NSVX_PLANE; ih++) */ } /* end if(tf->fep_hitmap[ie][ir][ic] != 0x1f) */ } /* enf if on indexes */ } __global__ void gf_comparator_GPU(struct fep_arrays* fep_dev, struct evt_arrays* evt_dev, struct fit_arrays* fit_dev, struct fout_arrays* fout_dev, int maxEvt) { int ie, ir, ic; int ChiSqCut, gvalue, gvalue_best; int ich = 0; int ind_best = 0; int chi2_best = 0; int gvalue_cut = 0x70; int bestTrackFound = 0; long long int chi[3], chi2; ie = blockIdx.x; ir = blockIdx.y; ic = threadIdx.x; if ( ( ie < maxEvt ) && ( ir < fep_dev->fep_nroads[ie] ) && ( ic < fep_dev->fep_ncmb[ie][ir] )) { ChiSqCut = 0x40; gvalue_best = 0x70; if (fep_dev->fep_ncomb5h[ie][ir][ic] == 1) { for (int i=0; i<NCHI; i++) chi[i] = fit_dev->fit_fit[ie][i+3][ir][ic][0]; gf_chi2_GPU(chi, &fit_dev->fit_err[ie][ir][ic][0], &chi2); if (chi2 <= ChiSqCut) { chi2 = chi2 >> 2; gvalue = gf_gfunc_GPU(fep_dev->fep_ncomb5h[ie][ir][ic], ich, fep_dev->fep_hitmap[ie][ir][ic], fep_dev->fep_lcl[ie][ir][ic], (chi2 & gf_mask_GPU(CHI2SUM_WIDTH))); if (gvalue < gvalue_cut) gf_formatter_GPU(ie, ir, ic, 0, chi2, fep_dev, fit_dev, evt_dev, fout_dev); } } else if (fep_dev->fep_ncomb5h[ie][ir][ic] == 5) { bestTrackFound = 0; gvalue_best = 999; ind_best = 999; chi2_best = 999; for (ich = 0; ich < fep_dev->fep_ncomb5h[ie][ir][ic]; ich++) { for (int i=0; i<NCHI; i++) chi[i] = fit_dev->fit_fit[ie][i+3][ir][ic][ich]; /* calculate chisq */ gf_chi2_GPU(chi, &fit_dev->fit_err[ie][ir][ic][ich], &chi2); /* check chiSq */ if (chi2 <= ChiSqCut) { chi2 = chi2 >> 2; /* FC - hack .. see matching shift in gf_chi2 */ gvalue = gf_gfunc_GPU(fep_dev->fep_ncomb5h[ie][ir][ic], ich, fep_dev->fep_hitmap[ie][ir][ic], fep_dev->fep_lcl[ie][ir][ic], (chi2 & gf_mask_GPU(CHI2SUM_WIDTH))); if ((gvalue < gvalue_cut) && (gvalue < gvalue_best)) { gvalue_best = gvalue; ind_best = ich; chi2_best = chi2; bestTrackFound = 1; } } /* end if(chi2 <= ChiSqCut) */ } /* end for(ich = 0; ich < gf->fep->ncomb5h[ir][ic]; ich++) */ if (bestTrackFound) gf_formatter_GPU(ie, ir, ic, ind_best, chi2_best, fep_dev, fit_dev, evt_dev, fout_dev); } /* end if(gf->fep->ncomb5h[ir][ic] == 1) */ } /* end if on indexes */ } __global__ void gf_compute_eeword_GPU( struct fep_arrays* fep_dev, struct fit_arrays* fit_dev, struct fout_arrays* fout_dev, int maxEvt) { int eoe_err; int ie = blockIdx.x * blockDim.x + threadIdx.x; if ( ie < maxEvt ) { fout_dev->fout_err_sum[ie] = (fep_dev->fep_err_sum[ie] | fit_dev->fit_err_sum[ie]); gf_formatter_err_GPU(fout_dev->fout_err_sum[ie], GF_ERRMASK_CDF, GF_ERRMASK_SVT, GF_ERRMASK_EOE, &eoe_err, &fout_dev->fout_cdferr[ie], &fout_dev->fout_svterr[ie]); fout_dev->fout_ee_word[ie] = (fep_dev->fep_ee_word[ie] & (gf_mask_GPU(SVT_WORD_WIDTH) & ~(1<<SVT_PAR_BIT))); fout_dev->fout_ee_word[ie] |= (eoe_err<<SVT_ERR_LSB); fout_dev->fout_ee_word[ie] |= (fout_dev->fout_parity[ie]<<SVT_PAR_BIT); } } __global__ void gf_setout(struct fout_arrays* fout_dev, int maxEvt, unsigned int* cable_dev, int* ndata_dev) { *ndata_dev = 0; for (int ie=0; ie < maxEvt; ie++) { for (int nt=0; nt < fout_dev->fout_ntrks[ie]; nt++) { memcpy(cable_dev + *ndata_dev, fout_dev->fout_gfword[ie][nt], NTFWORDS*sizeof(unsigned int)); (*ndata_dev) += NTFWORDS; } memcpy(cable_dev + *ndata_dev, &fout_dev->fout_ee_word[ie], sizeof(unsigned int)); (*ndata_dev)++; } } void gf_fit_GPU(struct fep_arrays* fep_dev, struct evt_arrays* evt_dev, struct extra_data* edata_dev, struct fit_arrays* fit_dev, struct fout_arrays* fout_dev, int maxEvt, unsigned int* cable_dev, int* ndata_dev) { dim3 blocks(NEVTS,MAXROAD); kFit<<<blocks, dim3(MAXCOMB,NFITTER)>>>(fep_dev, edata_dev, fit_dev, maxEvt); gf_fit_format_GPU<<<blocks, dim3(MAXCOMB, MAXCOMB5H)>>>(fep_dev, fit_dev, maxEvt); gf_comparator_GPU<<<blocks, dim3(MAXCOMB)>>>(fep_dev, evt_dev, fit_dev, fout_dev, maxEvt); gf_compute_eeword_GPU<<<(NEVTS+255)/256, 256>>>(fep_dev, fit_dev, fout_dev, maxEvt); gf_setout<<<1,1>>>(fout_dev, maxEvt, cable_dev, ndata_dev); }
38c3f5e1909a17dc57e8ec1b3755a57dbfcd2d98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lookup_table_op.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" namespace paddle { namespace operators { template <typename T, int BlockDimX, int BlockDimY, int GridDimX, bool PaddingFlag> __global__ void LookupTable(T *output, const T *table, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D, const int64_t padding_idx) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); T *out = output + idy * D; const T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { if (PaddingFlag) { if (id == padding_idx) out[i] = static_cast<T>(0); else out[i] = tab[i]; } else { out[i] = tab[i]; } } idy += BlockDimY * GridDimX; } } template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); const T *out = output + idy * D; T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { phi::CudaAtomicAdd(&tab[i], out[i]); } idy += BlockDimY * GridDimX; } } template <typename T> class LookupTableCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *table_t = context.Input<phi::DenseTensor>("W"); auto *ids_t = context.Input<phi::DenseTensor>("Ids"); auto *output_t = context.Output<phi::DenseTensor>("Out"); int64_t padding_idx = context.Attr<int64_t>("padding_idx"); auto id_name = context.InputNames("Ids").front(); auto out_name = context.OutputNames("Out").front(); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; size_t K = ids_t->numel(); auto *ids = ids_t->data<int64_t>(); auto *table = table_t->data<T>(); auto *output = output_t->mutable_data<T>(context.GetPlace()); #ifdef PADDLE_WITH_HIP dim3 threads(64, 4); #else dim3 threads(128, 8); #endif // PADDLE_WITH_HIP dim3 grids(8, 1); #ifdef PADDLE_WITH_HIP if (padding_idx == -1) hipLaunchKernelGGL(( LookupTable<T, 64, 4, 8, false>) , dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); else hipLaunchKernelGGL(( LookupTable<T, 64, 4, 8, true>) , dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); #else if (padding_idx == -1) hipLaunchKernelGGL(( LookupTable<T, 128, 8, 8, false>) , dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); else hipLaunchKernelGGL(( LookupTable<T, 128, 8, 8, true>) , dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); #endif // PADDLE_WITH_HIP } }; template <typename T> class LookupTableGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto &dev_ctx = context.template device_context<phi::GPUContext>(); bool is_sparse = context.Attr<bool>("is_sparse"); // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { auto *ids = context.Input<phi::DenseTensor>("Ids"); auto *table = context.Input<phi::DenseTensor>("W"); auto *d_output = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); auto *d_table = context.Output<phi::SelectedRows>(framework::GradVarName("W")); auto *ids_data = ids->data<int64_t>(); int64_t ids_num = ids->numel(); auto stream = dev_ctx.stream(); // copy GPU memory to CPU pinned memory phi::Vector<int64_t> new_rows; new_rows.resize(ids_num); auto gpu_place = context.GetPlace(); // TODO(yuyang18): Strange code here. phi::MixVector<int64_t> mixv_new_rows(&new_rows); memory::Copy(gpu_place, mixv_new_rows.CUDAMutableData(context.GetPlace()), gpu_place, ids_data, ids_num * sizeof(int64_t), stream); mixv_new_rows.CopyToCPU(); d_table->set_rows(new_rows); auto *d_table_value = d_table->mutable_value(); d_table_value->Resize({ids_num, table->dims()[1]}); d_table_value->mutable_data<T>(context.GetPlace()); auto *d_table_data = d_table_value->data<T>(); auto *d_output_data = d_output->data<T>(); auto d_output_dims = d_output->dims(); auto d_output_dims_2d = phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d, platform::errors::InvalidArgument( "ShapeError: The shape of lookup_table@Grad and " "output@Grad should be same. " "But received lookup_table@Grad's shape = [%s], " "output@Grad's shape = [%s].", d_table_value->dims(), d_output_dims_2d)); memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); } else { auto ids_t = context.Input<phi::DenseTensor>("Ids"); auto d_output_t = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); auto d_table_t = context.Output<phi::DenseTensor>(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; int K = ids_t->numel(); const int64_t *ids = ids_t->data<int64_t>(); const T *d_output = d_output_t->data<T>(); T *d_table = d_table_t->mutable_data<T>(context.GetPlace()); auto t = framework::EigenVector<T>::Flatten(*d_table_t); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); #ifdef PADDLE_WITH_HIP dim3 threads(64, 4); #else dim3 threads(128, 8); #endif // PADDLE_WITH_HIP dim3 grids(8, 1); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(( LookupTableGrad<T, 64, 4, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(), d_table, d_output, ids, N, K, D); #else hipLaunchKernelGGL(( LookupTableGrad<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(), d_table, d_output, ids, N, K, D); #endif // PADDLE_WITH_HIP } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>, ops::LookupTableCUDAKernel<double>, ops::LookupTableCUDAKernel<plat::float16>, ops::LookupTableCUDAKernel<int8_t>, ops::LookupTableCUDAKernel<int16_t>); REGISTER_OP_CUDA_KERNEL(lookup_table_grad, ops::LookupTableGradCUDAKernel<float>, ops::LookupTableGradCUDAKernel<double>, ops::LookupTableGradCUDAKernel<plat::float16>);
38c3f5e1909a17dc57e8ec1b3755a57dbfcd2d98.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lookup_table_op.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" namespace paddle { namespace operators { template <typename T, int BlockDimX, int BlockDimY, int GridDimX, bool PaddingFlag> __global__ void LookupTable(T *output, const T *table, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D, const int64_t padding_idx) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); T *out = output + idy * D; const T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { if (PaddingFlag) { if (id == padding_idx) out[i] = static_cast<T>(0); else out[i] = tab[i]; } else { out[i] = tab[i]; } } idy += BlockDimY * GridDimX; } } template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); const T *out = output + idy * D; T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { phi::CudaAtomicAdd(&tab[i], out[i]); } idy += BlockDimY * GridDimX; } } template <typename T> class LookupTableCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *table_t = context.Input<phi::DenseTensor>("W"); auto *ids_t = context.Input<phi::DenseTensor>("Ids"); auto *output_t = context.Output<phi::DenseTensor>("Out"); int64_t padding_idx = context.Attr<int64_t>("padding_idx"); auto id_name = context.InputNames("Ids").front(); auto out_name = context.OutputNames("Out").front(); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; size_t K = ids_t->numel(); auto *ids = ids_t->data<int64_t>(); auto *table = table_t->data<T>(); auto *output = output_t->mutable_data<T>(context.GetPlace()); #ifdef PADDLE_WITH_HIP dim3 threads(64, 4); #else dim3 threads(128, 8); #endif // PADDLE_WITH_HIP dim3 grids(8, 1); #ifdef PADDLE_WITH_HIP if (padding_idx == -1) LookupTable<T, 64, 4, 8, false> <<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); else LookupTable<T, 64, 4, 8, true> <<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); #else if (padding_idx == -1) LookupTable<T, 128, 8, 8, false> <<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); else LookupTable<T, 128, 8, 8, true> <<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); #endif // PADDLE_WITH_HIP } }; template <typename T> class LookupTableGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto &dev_ctx = context.template device_context<phi::GPUContext>(); bool is_sparse = context.Attr<bool>("is_sparse"); // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { auto *ids = context.Input<phi::DenseTensor>("Ids"); auto *table = context.Input<phi::DenseTensor>("W"); auto *d_output = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); auto *d_table = context.Output<phi::SelectedRows>(framework::GradVarName("W")); auto *ids_data = ids->data<int64_t>(); int64_t ids_num = ids->numel(); auto stream = dev_ctx.stream(); // copy GPU memory to CPU pinned memory phi::Vector<int64_t> new_rows; new_rows.resize(ids_num); auto gpu_place = context.GetPlace(); // TODO(yuyang18): Strange code here. phi::MixVector<int64_t> mixv_new_rows(&new_rows); memory::Copy(gpu_place, mixv_new_rows.CUDAMutableData(context.GetPlace()), gpu_place, ids_data, ids_num * sizeof(int64_t), stream); mixv_new_rows.CopyToCPU(); d_table->set_rows(new_rows); auto *d_table_value = d_table->mutable_value(); d_table_value->Resize({ids_num, table->dims()[1]}); d_table_value->mutable_data<T>(context.GetPlace()); auto *d_table_data = d_table_value->data<T>(); auto *d_output_data = d_output->data<T>(); auto d_output_dims = d_output->dims(); auto d_output_dims_2d = phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d, platform::errors::InvalidArgument( "ShapeError: The shape of lookup_table@Grad and " "output@Grad should be same. " "But received lookup_table@Grad's shape = [%s], " "output@Grad's shape = [%s].", d_table_value->dims(), d_output_dims_2d)); memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); } else { auto ids_t = context.Input<phi::DenseTensor>("Ids"); auto d_output_t = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); auto d_table_t = context.Output<phi::DenseTensor>(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; int K = ids_t->numel(); const int64_t *ids = ids_t->data<int64_t>(); const T *d_output = d_output_t->data<T>(); T *d_table = d_table_t->mutable_data<T>(context.GetPlace()); auto t = framework::EigenVector<T>::Flatten(*d_table_t); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); #ifdef PADDLE_WITH_HIP dim3 threads(64, 4); #else dim3 threads(128, 8); #endif // PADDLE_WITH_HIP dim3 grids(8, 1); #ifdef PADDLE_WITH_HIP LookupTableGrad<T, 64, 4, 8><<<grids, threads, 0, dev_ctx.stream()>>>( d_table, d_output, ids, N, K, D); #else LookupTableGrad<T, 128, 8, 8><<<grids, threads, 0, dev_ctx.stream()>>>( d_table, d_output, ids, N, K, D); #endif // PADDLE_WITH_HIP } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>, ops::LookupTableCUDAKernel<double>, ops::LookupTableCUDAKernel<plat::float16>, ops::LookupTableCUDAKernel<int8_t>, ops::LookupTableCUDAKernel<int16_t>); REGISTER_OP_CUDA_KERNEL(lookup_table_grad, ops::LookupTableGradCUDAKernel<float>, ops::LookupTableGradCUDAKernel<double>, ops::LookupTableGradCUDAKernel<plat::float16>);
f575706f8613b453598e9aa116040f2e6f9d9cfe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #include <fstream> using namespace std; // includes, project // includes, kernels #include "vector_reduction_kernel.cuh" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int ReadFile(float*, char* file_name); float computeOnDevice(float* h_data, int array_mem_size); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run test //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int num_elements = NUM_ELEMENTS; int errorM = 0; const unsigned int array_mem_size = sizeof( float) * num_elements; // allocate host memory to store the input data float* h_data = (float*) malloc( array_mem_size); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Read the input data array from the given file. switch(argc-1) { case 1: // One Argument errorM = ReadFile(h_data, argv[1]); if(errorM != 1) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); } break; } // compute reference solution float reference = 0.0f; computeGold(&reference , h_data, num_elements); // **===-------- Modify the body of this function -----------===** float result = computeOnDevice(h_data, num_elements); // **===-----------------------------------------------------------===** // Run accuracy test float epsilon = 0.0001f; unsigned int result_regtest = (abs(result - reference) <= epsilon); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); printf( "device: %f host: %f\n", result, reference); // cleanup memory free( h_data); } int ReadFile(float* M, char* file_name) { unsigned int data_read = NUM_ELEMENTS; std::ifstream ifile(file_name); for(unsigned int i = 0; i < data_read; i++){ ifile>>M[i]; } ifile.close(); return data_read; } // **===----------------- Modify this function ---------------------===** // Take h_data from host, copies it to device, setup grid and thread // dimensions, excutes kernel function, and copy result of scan back // to h_data. // Note: float* h_data is both the input and the output of this function. float computeOnDevice(float* h_data, int num_elements) { // placeholder int tpb; size_t size = num_elements * sizeof(float); float* d_data; hipMalloc(&d_data, size); hipMemcpy(d_data,h_data,size, hipMemcpyHostToDevice); tpb = 1024; // bpg = (num_elements + tpb-1)/tpb; hipLaunchKernelGGL(( reduction), dim3(1),dim3(tpb), 0, 0, d_data,num_elements); hipMemcpy(h_data,d_data,size,hipMemcpyDeviceToHost); return h_data[0]; }
f575706f8613b453598e9aa116040f2e6f9d9cfe.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #include <fstream> using namespace std; // includes, project // includes, kernels #include "vector_reduction_kernel.cuh" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int ReadFile(float*, char* file_name); float computeOnDevice(float* h_data, int array_mem_size); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run test //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int num_elements = NUM_ELEMENTS; int errorM = 0; const unsigned int array_mem_size = sizeof( float) * num_elements; // allocate host memory to store the input data float* h_data = (float*) malloc( array_mem_size); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Read the input data array from the given file. switch(argc-1) { case 1: // One Argument errorM = ReadFile(h_data, argv[1]); if(errorM != 1) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); } break; } // compute reference solution float reference = 0.0f; computeGold(&reference , h_data, num_elements); // **===-------- Modify the body of this function -----------===** float result = computeOnDevice(h_data, num_elements); // **===-----------------------------------------------------------===** // Run accuracy test float epsilon = 0.0001f; unsigned int result_regtest = (abs(result - reference) <= epsilon); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); printf( "device: %f host: %f\n", result, reference); // cleanup memory free( h_data); } int ReadFile(float* M, char* file_name) { unsigned int data_read = NUM_ELEMENTS; std::ifstream ifile(file_name); for(unsigned int i = 0; i < data_read; i++){ ifile>>M[i]; } ifile.close(); return data_read; } // **===----------------- Modify this function ---------------------===** // Take h_data from host, copies it to device, setup grid and thread // dimensions, excutes kernel function, and copy result of scan back // to h_data. // Note: float* h_data is both the input and the output of this function. float computeOnDevice(float* h_data, int num_elements) { // placeholder int tpb; size_t size = num_elements * sizeof(float); float* d_data; cudaMalloc(&d_data, size); cudaMemcpy(d_data,h_data,size, cudaMemcpyHostToDevice); tpb = 1024; // bpg = (num_elements + tpb-1)/tpb; reduction<<<1,tpb>>>(d_data,num_elements); cudaMemcpy(h_data,d_data,size,cudaMemcpyDeviceToHost); return h_data[0]; }
3e54363d45d18ac8dfafe1698764744a8b0b4053.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdio.h" __global__ void hello() { printf("Hello world from GPU\n"); } int main() { // will print 5 hello world. hipLaunchKernelGGL(( hello), dim3(1), dim3(5), 0, 0, ); // reset all the resources in GPU for this process. // If no hipDeviceReset(), no output will print, the program in CPU will just // quit without waiting for GPU response. hipDeviceReset(); return 0; }
3e54363d45d18ac8dfafe1698764744a8b0b4053.cu
#include "cuda.h" #include "stdio.h" __global__ void hello() { printf("Hello world from GPU\n"); } int main() { // will print 5 hello world. hello<<<1, 5>>>(); // reset all the resources in GPU for this process. // If no cudaDeviceReset(), no output will print, the program in CPU will just // quit without waiting for GPU response. cudaDeviceReset(); return 0; }
8572c95c21730080d003cf664996e3452821697e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #include "assert.h" #include "stdio.h" #define MAX_THREADS_PER_BLOCK 1024 #define SQRT_MAX_THREADS_PER_BLOCK 32 #define TEMP_PRINT(a) //#define TEMP_PRINT(a) if(xOffset == 4 && yOffset == 4) { printf(#a);} #define MAX(a, b) (a) > (b) ? (a) : (b) #define MIN(a, b) (a) < (b) ? (a) : (b) #define CLAMP(val, min, max) MIN(MAX((val), (min)), (max)) __global__ void gaussian_blurShared(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth, int colsPerThread, int rowsPerThread) { int row, col, sharedCol, sharedRow; extern __shared__ char sharedData[]; // declare shared memory caches unsigned char *sharedInputChannel = (unsigned char *)(sharedData); int sharedInputChannel_size = (blockDim.x + filterWidth - 1) * (blockDim.y + filterWidth - 1); float *sharedFilter = (float *)(sharedData + sharedInputChannel_size); int tIdx = threadIdx.x + threadIdx.y * blockDim.x; // populate filter shared memory cache if (tIdx < filterWidth * filterWidth) sharedFilter[tIdx] = filter[tIdx]; for (int i = 0; i < colsPerThread; i++) { // populate input channel shared memory cache sharedCol = colsPerThread * threadIdx.x + i; col = blockIdx.x*blockDim.x - filterWidth/2 + sharedCol; col = CLAMP(col, 0, numCols - 1); for (int j = 0; j < rowsPerThread; j++) { sharedRow = rowsPerThread*threadIdx.y + j; row = blockIdx.y*blockDim.y - filterWidth/2 + sharedRow; row = CLAMP(row, 0, numRows - 1); if (sharedCol < blockDim.x + filterWidth - 1 && sharedRow < blockDim.y + filterWidth - 1) sharedInputChannel[sharedCol + sharedRow*(blockDim.x + filterWidth - 1)] = inputChannel[col + row * numCols]; } } __syncthreads(); // done setting up shared memory caches col = blockIdx.x*blockDim.x + threadIdx.x; row = blockIdx.y*blockDim.y + threadIdx.y; if (row < numRows && col < numCols) { // compute sum of filter placed over input channel float total = 0.0; for (int filterRow = 0; filterRow < filterWidth; filterRow++) for (int filterCol = 0; filterCol < filterWidth; filterCol++) { sharedCol = threadIdx.x + filterCol; sharedRow = threadIdx.y + filterRow; float imageVal = static_cast<float>(sharedInputChannel[sharedRow*(blockDim.x + filterWidth - 1) + sharedCol]); float filterVal = sharedFilter[filterRow * filterWidth + filterCol]; total += imageVal*filterVal; } outputChannel[row*numCols + col] = total; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO unsigned int col = blockIdx.x*blockDim.x + threadIdx.x; unsigned int row = blockIdx.y*blockDim.y + threadIdx.y; if (col < numCols && row < numRows) { int idx = row*numCols + col; uchar4 pixel = inputImageRGBA[idx]; redChannel[idx] = pixel.x; greenChannel[idx] = pixel.y; blueChannel[idx] = pixel.z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc //float* d_filter; int filterSize = filterWidth * filterWidth * sizeof(float); checkCudaErrors(hipMalloc(&d_filter, filterSize)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, filterSize, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { int channelSize = sizeof(unsigned char) * numRows * numCols; //TODO: Set reasonable block size (i.e., number of threads per block) int length = numRows * numCols; const dim3 blockSize(SQRT_MAX_THREADS_PER_BLOCK, SQRT_MAX_THREADS_PER_BLOCK/2, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numCols/blockSize.x + 1, numRows/blockSize.y + 1, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. const size_t sharedInputChannelSize = (blockSize.x + filterWidth) * (blockSize.y + filterWidth) * sizeof(unsigned char); const size_t sharedFilterSize = filterWidth * filterWidth * sizeof(float); const size_t sharedSize = sharedInputChannelSize + sharedFilterSize; int colsPerThread = (blockSize.x + filterWidth - 1) / blockSize.x + 1; int rowsPerThread = (blockSize.y + filterWidth - 1) / blockSize.y + 1; hipLaunchKernelGGL(( gaussian_blurShared), dim3(gridSize), dim3(blockSize), sharedSize, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth, colsPerThread, rowsPerThread); hipLaunchKernelGGL(( gaussian_blurShared), dim3(gridSize), dim3(blockSize), sharedSize, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth, colsPerThread, rowsPerThread); hipLaunchKernelGGL(( gaussian_blurShared), dim3(gridSize), dim3(blockSize), sharedSize, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth, colsPerThread, rowsPerThread); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
8572c95c21730080d003cf664996e3452821697e.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #include "assert.h" #include "stdio.h" #define MAX_THREADS_PER_BLOCK 1024 #define SQRT_MAX_THREADS_PER_BLOCK 32 #define TEMP_PRINT(a) //#define TEMP_PRINT(a) if(xOffset == 4 && yOffset == 4) { printf(#a);} #define MAX(a, b) (a) > (b) ? (a) : (b) #define MIN(a, b) (a) < (b) ? (a) : (b) #define CLAMP(val, min, max) MIN(MAX((val), (min)), (max)) __global__ void gaussian_blurShared(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth, int colsPerThread, int rowsPerThread) { int row, col, sharedCol, sharedRow; extern __shared__ char sharedData[]; // declare shared memory caches unsigned char *sharedInputChannel = (unsigned char *)(sharedData); int sharedInputChannel_size = (blockDim.x + filterWidth - 1) * (blockDim.y + filterWidth - 1); float *sharedFilter = (float *)(sharedData + sharedInputChannel_size); int tIdx = threadIdx.x + threadIdx.y * blockDim.x; // populate filter shared memory cache if (tIdx < filterWidth * filterWidth) sharedFilter[tIdx] = filter[tIdx]; for (int i = 0; i < colsPerThread; i++) { // populate input channel shared memory cache sharedCol = colsPerThread * threadIdx.x + i; col = blockIdx.x*blockDim.x - filterWidth/2 + sharedCol; col = CLAMP(col, 0, numCols - 1); for (int j = 0; j < rowsPerThread; j++) { sharedRow = rowsPerThread*threadIdx.y + j; row = blockIdx.y*blockDim.y - filterWidth/2 + sharedRow; row = CLAMP(row, 0, numRows - 1); if (sharedCol < blockDim.x + filterWidth - 1 && sharedRow < blockDim.y + filterWidth - 1) sharedInputChannel[sharedCol + sharedRow*(blockDim.x + filterWidth - 1)] = inputChannel[col + row * numCols]; } } __syncthreads(); // done setting up shared memory caches col = blockIdx.x*blockDim.x + threadIdx.x; row = blockIdx.y*blockDim.y + threadIdx.y; if (row < numRows && col < numCols) { // compute sum of filter placed over input channel float total = 0.0; for (int filterRow = 0; filterRow < filterWidth; filterRow++) for (int filterCol = 0; filterCol < filterWidth; filterCol++) { sharedCol = threadIdx.x + filterCol; sharedRow = threadIdx.y + filterRow; float imageVal = static_cast<float>(sharedInputChannel[sharedRow*(blockDim.x + filterWidth - 1) + sharedCol]); float filterVal = sharedFilter[filterRow * filterWidth + filterCol]; total += imageVal*filterVal; } outputChannel[row*numCols + col] = total; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO unsigned int col = blockIdx.x*blockDim.x + threadIdx.x; unsigned int row = blockIdx.y*blockDim.y + threadIdx.y; if (col < numCols && row < numRows) { int idx = row*numCols + col; uchar4 pixel = inputImageRGBA[idx]; redChannel[idx] = pixel.x; greenChannel[idx] = pixel.y; blueChannel[idx] = pixel.z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc //float* d_filter; int filterSize = filterWidth * filterWidth * sizeof(float); checkCudaErrors(cudaMalloc(&d_filter, filterSize)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, filterSize, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { int channelSize = sizeof(unsigned char) * numRows * numCols; //TODO: Set reasonable block size (i.e., number of threads per block) int length = numRows * numCols; const dim3 blockSize(SQRT_MAX_THREADS_PER_BLOCK, SQRT_MAX_THREADS_PER_BLOCK/2, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numCols/blockSize.x + 1, numRows/blockSize.y + 1, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. const size_t sharedInputChannelSize = (blockSize.x + filterWidth) * (blockSize.y + filterWidth) * sizeof(unsigned char); const size_t sharedFilterSize = filterWidth * filterWidth * sizeof(float); const size_t sharedSize = sharedInputChannelSize + sharedFilterSize; int colsPerThread = (blockSize.x + filterWidth - 1) / blockSize.x + 1; int rowsPerThread = (blockSize.y + filterWidth - 1) / blockSize.y + 1; gaussian_blurShared<<<gridSize, blockSize, sharedSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth, colsPerThread, rowsPerThread); gaussian_blurShared<<<gridSize, blockSize, sharedSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth, colsPerThread, rowsPerThread); gaussian_blurShared<<<gridSize, blockSize, sharedSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth, colsPerThread, rowsPerThread); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
0103a89d8fb8a7a2554b41c97e52c1ebb27867ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lty_paralle_common.cuh" //#include "common_hip.cuh" #include <stdio.h> #include<iostream> #include<time.h> #include"hip/device_functions.h" #include "device_launch_parameters.h" #include <math.h> #include "sm_20_atomic_functions.hpp" using namespace std; extern int Ne,int nxx,int nzz,int kongxue,int tail,int enlarge; extern float dr,float dz,float ca_posz,float KTe; //extern float *d_ex1,float *d_iex1,float *d_ey1,float *d_iey1,float *d_ez1,float *d_iez1, // float *d_hx1,float *d_ihx1,float *d_hy1,float *d_ihy1,float *d_hz1,float *d_ihz1, // float *d_sigmaz1,float *d_sigmaz,float *sigmaz1,float *sigmaz; extern Paticle *pat_elc; __device__ float d_ex1[51 * 101], d_iex1[51 * 101], d_ey1[51 * 101], d_iey1[51 * 101], d_ez1[51 * 101], d_iez1[51 * 101], d_hx1[51 * 101], d_ihx1[51 * 101], d_hy1[51 * 101], d_ihy1[51 * 101], d_hz1[51 * 101], d_ihz1[51 * 101]; //extern float *d_sigmaz1 = NULL, *d_sigmaz = NULL; __constant__ float D_parameter[12]={1.0e-003,1.0e-003,5.0e-002,1.0e-001,1.66782e-12,-5.5594e-15,5.5594e-17,0.005,0.03, 4.095e-16,2.4033e-20,9.1e-31}; //0dr 1dz 2R 3L 4dt 5qe 6qi 7ca_posr 8ca_posz 9KTe 10KTi 11Me /*3.2044e-19*/ __global__ void device_initialchang(Grid* device_g,Grid* device_gn)//x y { int x=51,y=101; int tid=blockIdx.x*blockDim.x+threadIdx.x; while(tid<x*y) { (device_g+tid)->ex=0; (device_g+tid)->ey=0; (device_g+tid)->ez=0; (device_g+tid)->hx=0; (device_g+tid)->hy=0; (device_g+tid)->hz=0; (device_g+tid)->ave_ex=0; (device_g+tid)->ave_ey=0; (device_g+tid)->ave_ez=0; (device_g+tid)->ave_hx=0; (device_g+tid)->ave_hy=0; (device_g+tid)->ave_hz=0; (device_g+tid)->ne[0]=0; (device_g+tid)->ne[1]=0; (device_g+tid)->jr=0.0; (device_g+tid)->jz=0.0; (device_g+tid)->jy=0.0; (device_g+tid)->jr_ion=0.0; (device_g+tid)->jz_ion=0.0; (device_g+tid)->jy_ion=0.0; (device_g+tid)->Pmax=0.0; (device_gn+tid)->ex=0; (device_gn+tid)->ey=0; (device_gn+tid)->ez=0; (device_gn+tid)->hx=0; (device_gn+tid)->hy=0; (device_gn+tid)->hz=0; (device_gn+tid)->ne[2]=0; tid+=gridDim.x*blockDim.x; } //printf("success,device_initial_chang"); } //void device_initial_always(float *device_rds,float *device_rds1,Paticle *p_pat_elc,Grid *device_G,float ca_posz,int Ne,int nx,int nz,float dr,float dz,float KTe) __global__ void initial_always(Paticle *pat_elc,int n,float *S_number,int tmp)//nstep*30 { int tid=blockIdx.x*blockDim.x+threadIdx.x; while(tid<n) { int i=tid%tmp; float afa=0;//rds5*(pi/12); float cita=0;//rds6*pi/2; float vv = 1e6; pat_elc[tid].pz=D_parameter[8]; pat_elc[tid].pr =0.04*S_number[i]+0.001;// pat_elc[tid].py=2*pi*S_number[i]; pat_elc[tid].vr = vv*sin(afa)*cos(cita); pat_elc[tid].vy = vv*sin(afa)*sin(cita); pat_elc[tid].vz=vv; //pat_elc[tid].blei = pat_elc[tid].pr/D_parameter[0]; // pat_elc[tid].blek = pat_elc[tid].pz/D_parameter[1]; tid+=gridDim.x*blockDim.x; } } __global__ void device_ave_field(int x,int y,Grid* device_g,Grid* device_gn) //x y { int u=0,v=0; int tid=blockIdx.x*blockDim.x+threadIdx.x; while(tid<x*(y)) { //bool bv=tid%y;//v; //bool bu=tid/(y);//u; /*G[u][v].ave_ex=(bv*G[u][v].ex+bu*G[u-1+1-bu][v].ex+(1-bv)*(1-bu)*G[u][v].ex+(1-bv)*bu*G[u][v].ex)/(bv+bu+(1-bv)*(1-bu)+(1-bv)*bu); G[u][v].ave_ez=(G[u][v].ez+bv*G[u][v-1+1-bv].ez)/(2-(1-bv)); G[u][v].ave_hy=(G[u][v].hy+bu*G[u-1+1-bu][v].hy+bv*G[u][v-1+1-bv].hy+bu*bv*G[u-1+1-bu][v-1+1-bv].hy +Gn[u][v].hy+bu*Gn[u-1+1-bu][v].hy+bv*Gn[u][v-1+1-bv].hy+bu*bv*Gn[u-1+1-bu][v-1+1-bv].hy)/(8/(3-bu-bv+(1-bu)*(1-bv))); G[u][v].ave_ey=G[u][v].ey; G[u][v].ave_hx=(G[u][v].hx+Gn[u][v].hx+bv*G[u][v-1+1-bv].hx+bv*Gn[u][v-1+1-bv].hx)/(2+2*bv); G[u][v].ave_hz=(G[u][v].hz+Gn[u][v].hz+bu*G[u-1+1-bu][v].hz+bu*Gn[u-1+1-bu][v].hz)/(2+2*bu);*/ /*device_g[tid].ave_ex=(bv*device_g[tid].ex+bu*device_g[tid-bu*y].ex+(1-bv)*(1-bu)*device_g[tid].ex+(1-bv)*bu*device_g[tid].ex)/(bv+bu+(1-bv)*(1-bu)+(1-bv)*bu); device_g[tid].ave_ez=(device_g[tid].ez+bv*device_g[tid-bv].ez)/(2-(1-bv)); device_g[tid].ave_hy=(device_g[tid].hy+bu*device_g[tid-bu*y].hy+bv*device_g[tid-bv].hy+bu*bv*device_g[tid-bu*y-bv].hy +device_gn[tid].hy+bu*device_gn[tid-bu*y].hy+bv*device_gn[tid-bv].hy+bu*bv*device_gn[tid-bu*y-bv].hy)/(8/(3-bu-bv+(1-bu)*(1-bv))); device_g[tid].ave_ey=device_g[tid].ey; device_g[tid].ave_hx=(device_g[tid].hx+device_gn[tid].hx+bv*device_g[tid-bv].hx+bv*device_gn[tid-bv].hx)/(2+2*bv); device_g[tid].ave_hz=(device_g[tid].hz+device_gn[tid].hz+bu*device_g[tid-bu*y].hz+bu*device_gn[tid-bu*y].hz)/(2+2*bu);*/ u=tid/(y); v=tid%y; int tid_temp=u*(y)+v; if(u==0&&v!=0) { (*(device_g+tid_temp)).ave_ex=((*(device_g+tid_temp)).ex); (*(device_g+tid_temp)).ave_ez=((*(device_g+tid_temp)).ez+(*(device_g+tid_temp-1)).ez)/2; (*(device_g+tid_temp)).ave_hy=((*(device_g+tid_temp)).hy+(*(device_g+tid_temp-1)).hy+(*(device_gn+tid_temp)).hy+(*(device_gn+tid_temp-1)).hy)/4; (*(device_g+tid_temp)).ave_ey=(*(device_g+tid_temp)).ey; (*(device_g+tid_temp)).ave_hx=((*(device_g+tid_temp)).hx+(*(device_gn+tid_temp)).hx+(*(device_g+tid_temp-1)).hx+(*(device_gn+tid_temp-1)).hx)/4; (*(device_g+tid_temp)).ave_hz=((*(device_g+tid_temp)).hz+(*(device_gn+tid_temp)).hz)/2; } else if(v==0&&u!=0) { (*(device_g+tid_temp)).ave_ex=((*(device_g+tid_temp)).ex+(*(device_g+tid_temp-y)).ex)/2; (*(device_g+tid_temp)).ave_ez=((*(device_g+tid_temp)).ez); (*(device_g+tid_temp)).ave_hy=((*(device_g+tid_temp)).hy+(*(device_g+tid_temp-y)).hy+(*(device_gn+tid_temp)).hy+(*(device_gn+tid_temp-y)).hy)/4; (*(device_g+tid_temp)).ave_ey=(*(device_g+tid_temp)).ey; (*(device_g+tid_temp)).ave_hx=((*(device_g+tid_temp)).hx+(*(device_gn+tid_temp)).hx)/2; (*(device_g+tid_temp)).ave_hz=((*(device_g+tid_temp)).hz+(*(device_gn+tid_temp)).hz+(*(device_g+tid_temp-y)).hz+(*(device_gn+tid_temp-y)).hz)/4; } else if(v==0&&u==0) { (*(device_g+tid_temp)).ave_ex=(*(device_g+tid_temp)).ex; (*(device_g+tid_temp)).ave_ez=(*(device_g+tid_temp)).ez; (*(device_g+tid_temp)).ave_hy=((*(device_g+tid_temp)).hy+(*(device_gn+tid_temp)).hy)/2; (*(device_g+tid_temp)).ave_ey=(*(device_g+tid_temp)).ey; (*(device_g+tid_temp)).ave_hx=((*(device_g+tid_temp)).hx+(*(device_gn+tid_temp)).hx)/2; (*(device_g+tid_temp)).ave_hz=((*(device_g+tid_temp)).hz+(*(device_gn+tid_temp)).hz)/2; } else{ (*(device_g+tid_temp)).ave_ex=((*(device_g+tid_temp)).ex+(*(device_g+tid_temp-y)).ex)/2; (*(device_g+tid_temp)).ave_ez=((*(device_g+tid_temp)).ez+(*(device_g+tid_temp-1)).ez)/2; (*(device_g+tid_temp)).ave_hy=((*(device_g+tid_temp)).hy+(*(device_g+tid_temp-y)).hy+(*(device_g+tid_temp-1)).hy+(*(device_g+tid_temp-y-1)).hy +(*(device_gn+tid_temp)).hy+(*(device_gn+tid_temp-y)).hy+(*(device_gn+tid_temp-1)).hy+(*(device_gn+tid_temp-y-1)).hy)/8; (*(device_g+tid_temp)).ave_ey=(*(device_g+tid_temp)).ey; (*(device_g+tid_temp)).ave_hx=((*(device_g+tid_temp)).hx+(*(device_gn+tid_temp)).hx+(*(device_g+tid_temp-1)).hx+(*(device_gn+tid_temp-1)).hx)/4; (*(device_g+tid_temp)).ave_hz=((*(device_g+tid_temp)).hz+(*(device_gn+tid_temp)).hz+(*(device_g+tid_temp-y)).hz+(*(device_gn+tid_temp-y)).hz)/4; } tid+=gridDim.x*blockDim.x; // printf("success,device_initial_chang"); } } __device__ float d_min_1(float x,float y) { return(x<y?x:y); } __device__ float d_max_1(float x,float y) { return(x>y?x:y); } __global__ void device_update_last(float *device_static_magneticX, float *device_static_magneticZ, float *device_static_electricX, float *device_static_electricZ, Paticle *p_pat_elc, Pre_Paticle *d_pre_elc, Grid *device_G, float *S_number, int tail) { //printf("%d",tail); float Qm_ion = 7.33945e+5; float Qm = -1.7588e+11; float mur = 4.0*pi*1.0e-7; float E[3] = { 0, 0, 0 }, B[3] = { 0, 0, 0 }; int tid = blockDim.x*blockIdx.x + threadIdx.x; int threadtid = threadIdx.x; //Paticle p_prepat_elc; __shared__ float elcVr[thread]; __shared__ float elcVy[thread]; __shared__ float elcVz[thread]; __shared__ float elcPr[thread]; __shared__ float elcPy[thread]; __shared__ float elcPz[thread]; __shared__ float PrelcPr[thread]; __shared__ float PrelcPz[thread]; while (tid<tail) { elcVr[threadIdx.x] = p_pat_elc[tid].vr; elcVy[threadIdx.x] = p_pat_elc[tid].vy; elcVz[threadIdx.x] = p_pat_elc[tid].vz; elcPr[threadIdx.x] = p_pat_elc[tid].pr; elcPy[threadIdx.x] = p_pat_elc[tid].py; elcPz[threadIdx.x] = p_pat_elc[tid].pz; d_pre_elc[tid].pr = elcPr[threadIdx.x];// d_pre_elc[tid].pz = elcPz[threadIdx.x];// int ii = (int)(elcPr[threadIdx.x] / D_parameter[0]); // int kk = (int)(elcPz[threadIdx.x] / D_parameter[1]); float wr = (elcPr[threadIdx.x] / D_parameter[0]) - ii; float wz = (elcPz[threadIdx.x] / D_parameter[1]) - kk; float s1 = (1 - wr)*(1 - wz); float s2 = (1 - wr)*wz; float s3 = wr*(1 - wz); float s4 = wr*wz; int grid_temp1 = ii*(nz + 1) + kk;//ii kk int grid_temp2 = ii*(nz + 1) + kk + 1;//ii kk+1 int grid_temp3 = (ii + 1)*(nz + 1) + kk;//ii+1 kk int grid_temp4 = (ii + 1)*(nz + 1) + kk + 1;//ii+1 kk+1 //printf("u:%d\t", tid); E[0] = ((*(device_G + grid_temp1)).ave_ex*s1 + (*(device_G + grid_temp3)).ave_ex*s3 + (*(device_G + grid_temp2)).ave_ex*s2 + (*(device_G + grid_temp4)).ave_ex*s4) +((*(device_static_electricX+grid_temp1))*s1+(*(device_static_electricX+grid_temp3))*s3+(*(device_static_electricX+grid_temp2))*s2+(*(device_static_electricX+grid_temp4))*s4);//+stac_ex[ii][kk]; //printf("u:%d\tE0=%f\n", tid,E[0]); E[2] = ((*(device_G + grid_temp1)).ave_ez*s1 + (*(device_G + grid_temp3)).ave_ez*s3 + (*(device_G + grid_temp2)).ave_ez*s2 + (*(device_G + grid_temp4)).ave_ez*s4) +((*(device_static_electricZ+grid_temp1))*s1+(*(device_static_electricZ+grid_temp3))*s3+(*(device_static_electricZ+grid_temp2))*s2+(*(device_static_electricZ+grid_temp4))*s4);//+(*(device_static_electricZ+grid_temp1)); //printf("u:%d\tE2=%f\n", tid,E[2]); B[1] = (((*(device_G + grid_temp1)).ave_hy*s1 + (*(device_G + grid_temp3)).ave_hy*s3 + (*(device_G + grid_temp2)).ave_hy*s2 + (*(device_G + grid_temp4)).ave_hy*s4)*mur); //printf("u:%d\tB1=%f\n", tid, B[1]); E[1] = ((*(device_G + grid_temp1)).ave_ey*s1 + (*(device_G + grid_temp3)).ave_ey*s3 + (*(device_G + grid_temp2)).ave_ey*s2 + (*(device_G + grid_temp4)).ave_ey*s4); //printf("u:%d\tE1=%f\n", tid,E[1]); B[0] = (((*(device_G + grid_temp1)).ave_hx*s1 + (*(device_G + grid_temp3)).ave_hx*s3 + (*(device_G + grid_temp2)).ave_hx*s2 + (*(device_G + grid_temp4)).ave_hx*s4)*mur) +(*(device_static_magneticX+grid_temp1))*s1+(*(device_static_magneticX+grid_temp3))*s3+((*(device_static_magneticX+grid_temp2))*s2+(*(device_static_magneticX+grid_temp4))*s4);//+(*(device_static_magneticX+grid_temp3)); //B[0]=((G[ii][kk].ave_hx*s1+G[ii+1][kk].ave_hx*s3+G[ii][kk+1].ave_hx*s2+G[ii+1][kk+1].ave_hx*s4)*mur) //+(stac_Bx[ii][kk] * s1 + stac_Bx[ii + 1][kk] * s3 + stac_Bx[ii][kk + 1] * s2 + stac_Bx[ii + 1][kk + 1] * s4); //printf("u:%d\tB0=%f\n", tid, B[0]); B[2] = (((*(device_G + grid_temp1)).ave_hz*s1 + (*(device_G + grid_temp3)).ave_hz*s3 + (*(device_G + grid_temp2)).ave_hz*s2 + (*(device_G + grid_temp4)).ave_hz*s4)*mur) +((*(device_static_magneticZ+grid_temp1))*s1+(*(device_static_magneticZ+grid_temp3))*s3+(*(device_static_magneticZ+grid_temp2))*s2+(*(device_static_magneticZ+grid_temp4))*s4);//+(*(device_static_magneticZ+grid_temp3)); //printf("B2=%f\n", B[2]); float u1[3] = { 0 }, u2[3] = { 0 }, u3[3] = { 0 };//u_n-1/2,u-,,u+,u_n+1/2 // electron float t[3] = { 0 }, s[3] = { 0 }; float pp[3][3] = { 0 }; u1[0] = elcVr[threadIdx.x] + (D_parameter[4] / 2)*Qm*E[0]; u1[1] = elcVy[threadIdx.x] + (D_parameter[4] / 2)*Qm*E[1]; u1[2] = elcVz[threadIdx.x] + (D_parameter[4] / 2)*Qm*E[2]; for (int m = 0; m<3; m++) { t[m] = (B[m] * Qm*D_parameter[4]) / 2; // t s[m] = (2 * t[m]) / (1 + t[m] * t[m]); //s } pp[0][0] = 1 - s[2] * t[2] - s[1] * t[1]; //3*3 pp[0][1] = s[1] * t[0] + s[2]; pp[0][2] = s[2] * t[0] - s[1]; pp[1][0] = s[0] * t[1] - s[2]; pp[1][1] = 1 - s[2] * t[2] - s[0] * t[0]; pp[1][2] = s[0] + s[2] * t[1]; pp[2][0] = s[0] * t[2] + s[1]; pp[2][1] = s[1] * t[2] - s[0]; pp[2][2] = 1 - s[1] * t[1] - s[0] * t[0]; u2[0] = pp[0][0] * u1[0] + pp[0][1] * u1[1] + pp[0][2] * u1[2]; u2[1] = pp[1][0] * u1[0] + pp[1][1] * u1[1] + pp[1][2] * u1[2]; u2[2] = pp[2][0] * u1[0] + pp[2][1] * u1[1] + pp[2][2] * u1[2]; for (int m = 0; m<3; m++) u3[m] = u2[m] + (D_parameter[4] / 2)*Qm*E[m]; elcVr[threadIdx.x] = u3[0]; elcVy[threadIdx.x] = u3[1]; elcVz[threadIdx.x] = u3[2]; float cit1a = 0; if ((elcPr[threadIdx.x]) == 0) cit1a = 0; else cit1a = atan((elcVy[threadIdx.x] * D_parameter[4]) / (elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4])); float temp_x = elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4]; if (temp_x >= 0) elcPr[threadIdx.x] = sqrt((elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4])*(elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4]) + elcVy[threadIdx.x] * D_parameter[4] * elcVy[threadIdx.x] * D_parameter[4]); else { elcPr[threadIdx.x] = -temp_x; elcVr[threadIdx.x] = -elcVr[threadIdx.x]; } elcPz[threadIdx.x] = elcPz[threadIdx.x] + elcVz[threadIdx.x] * D_parameter[4]; elcPy[threadIdx.x] = elcPy[threadIdx.x] + cit1a; if ((elcPr[threadIdx.x]) == 0) { elcVr[threadIdx.x] = elcVr[threadIdx.x]; elcVy[threadIdx.x] = elcVy[threadIdx.x]; } elcVr[threadIdx.x] = cos(cit1a)*elcVr[threadIdx.x] + sin(cit1a)*elcVy[threadIdx.x]; elcVy[threadIdx.x] = -sin(cit1a)*elcVr[threadIdx.x] + cos(cit1a)*elcVy[threadIdx.x]; p_pat_elc[tid].vr = elcVr[threadIdx.x]; p_pat_elc[tid].vy = elcVy[threadIdx.x]; p_pat_elc[tid].vz = elcVz[threadIdx.x]; p_pat_elc[tid].pr = elcPr[threadIdx.x]; p_pat_elc[tid].py = elcPy[threadIdx.x]; p_pat_elc[tid].pz = elcPz[threadIdx.x]; tid += gridDim.x*blockDim.x; } } __global__ void device_update_ion(float *device_static_magneticX, float *device_static_magneticZ, float *device_static_electricX, float *device_static_electricZ, Paticle *p_pat_elc, Pre_Paticle *d_pre_elc, Grid *device_G, float *S_number, int tail) { //printf("%d",tail); float Qm_ion = 7.33945e+5; float Qm = -1.7588e+11; float mur = 4.0*pi*1.0e-7; float E[3] = { 0, 0, 0 }, B[3] = { 0, 0, 0 }; int tid = blockDim.x*blockIdx.x + threadIdx.x; int threadtid = threadIdx.x; //Paticle p_prepat_elc; __shared__ float elcVr[thread]; __shared__ float elcVy[thread]; __shared__ float elcVz[thread]; __shared__ float elcPr[thread]; __shared__ float elcPy[thread]; __shared__ float elcPz[thread]; __shared__ float PrelcPr[thread]; __shared__ float PrelcPz[thread]; while (tid<tail) { elcVr[threadIdx.x] = p_pat_elc[tid].vr; elcVy[threadIdx.x] = p_pat_elc[tid].vy; elcVz[threadIdx.x] = p_pat_elc[tid].vz; elcPr[threadIdx.x] = p_pat_elc[tid].pr; elcPy[threadIdx.x] = p_pat_elc[tid].py; elcPz[threadIdx.x] = p_pat_elc[tid].pz; d_pre_elc[tid].pr = elcPr[threadIdx.x];// d_pre_elc[tid].pz = elcPz[threadIdx.x];// int ii = (int)(elcPr[threadIdx.x] / D_parameter[0]); // int kk = (int)(elcPz[threadIdx.x] / D_parameter[1]); float wr = (elcPr[threadIdx.x] / D_parameter[0]) - ii; float wz = (elcPz[threadIdx.x] / D_parameter[1]) - kk; float s1 = (1 - wr)*(1 - wz); float s2 = (1 - wr)*wz; float s3 = wr*(1 - wz); float s4 = wr*wz; int grid_temp1 = ii*(nz + 1) + kk;//ii kk int grid_temp2 = ii*(nz + 1) + kk + 1;//ii kk+1 int grid_temp3 = (ii + 1)*(nz + 1) + kk;//ii+1 kk int grid_temp4 = (ii + 1)*(nz + 1) + kk + 1;//ii+1 kk+1 //printf("u:%d\t", tid); E[0] = ((*(device_G + grid_temp1)).ave_ex*s1 + (*(device_G + grid_temp3)).ave_ex*s3 + (*(device_G + grid_temp2)).ave_ex*s2 + (*(device_G + grid_temp4)).ave_ex*s4) +((*(device_static_electricX+grid_temp1))*s1+(*(device_static_electricX+grid_temp3))*s3+(*(device_static_electricX+grid_temp2))*s2+(*(device_static_electricX+grid_temp4))*s4);//+stac_ex[ii][kk]; //printf("u:%d\tE0=%f\n", tid,E[0]); E[2] = ((*(device_G + grid_temp1)).ave_ez*s1 + (*(device_G + grid_temp3)).ave_ez*s3 + (*(device_G + grid_temp2)).ave_ez*s2 + (*(device_G + grid_temp4)).ave_ez*s4) +((*(device_static_electricZ+grid_temp1))*s1+(*(device_static_electricZ+grid_temp3))*s3+(*(device_static_electricZ+grid_temp2))*s2+(*(device_static_electricZ+grid_temp4))*s4);//+(*(device_static_electricZ+grid_temp1)); //printf("u:%d\tE2=%f\n", tid,E[2]); B[1] = (((*(device_G + grid_temp1)).ave_hy*s1 + (*(device_G + grid_temp3)).ave_hy*s3 + (*(device_G + grid_temp2)).ave_hy*s2 + (*(device_G + grid_temp4)).ave_hy*s4)*mur); //printf("u:%d\tB1=%f\n", tid, B[1]); E[1] = ((*(device_G + grid_temp1)).ave_ey*s1 + (*(device_G + grid_temp3)).ave_ey*s3 + (*(device_G + grid_temp2)).ave_ey*s2 + (*(device_G + grid_temp4)).ave_ey*s4); //printf("u:%d\tE1=%f\n", tid,E[1]); B[0] = (((*(device_G + grid_temp1)).ave_hx*s1 + (*(device_G + grid_temp3)).ave_hx*s3 + (*(device_G + grid_temp2)).ave_hx*s2 + (*(device_G + grid_temp4)).ave_hx*s4)*mur) +(*(device_static_magneticX+grid_temp1))*s1+(*(device_static_magneticX+grid_temp3))*s3+((*(device_static_magneticX+grid_temp2))*s2+(*(device_static_magneticX+grid_temp4))*s4);//+(*(device_static_magneticX+grid_temp3)); //B[0]=((G[ii][kk].ave_hx*s1+G[ii+1][kk].ave_hx*s3+G[ii][kk+1].ave_hx*s2+G[ii+1][kk+1].ave_hx*s4)*mur) //+(stac_Bx[ii][kk] * s1 + stac_Bx[ii + 1][kk] * s3 + stac_Bx[ii][kk + 1] * s2 + stac_Bx[ii + 1][kk + 1] * s4); //printf("u:%d\tB0=%f\n", tid, B[0]); B[2] = (((*(device_G + grid_temp1)).ave_hz*s1 + (*(device_G + grid_temp3)).ave_hz*s3 + (*(device_G + grid_temp2)).ave_hz*s2 + (*(device_G + grid_temp4)).ave_hz*s4)*mur) +((*(device_static_magneticZ+grid_temp1))*s1+(*(device_static_magneticZ+grid_temp3))*s3+(*(device_static_magneticZ+grid_temp2))*s2+(*(device_static_magneticZ+grid_temp4))*s4);//+(*(device_static_magneticZ+grid_temp3)); //printf("B2=%f\n", B[2]); float u1[3] = { 0 }, u2[3] = { 0 }, u3[3] = { 0 };//u_n-1/2,u-,,u+,u_n+1/2 // electron float t[3] = { 0 }, s[3] = { 0 }; float pp[3][3] = { 0 }; u1[0] = elcVr[threadIdx.x] + (D_parameter[4] / 2)*Qm_ion*E[0]; u1[1] = elcVy[threadIdx.x] + (D_parameter[4] / 2)*Qm_ion*E[1]; u1[2] = elcVz[threadIdx.x] + (D_parameter[4] / 2)*Qm_ion*E[2]; for (int m = 0; m<3; m++) { t[m] = (B[m] * Qm_ion*D_parameter[4]) / 2; // t s[m] = (2 * t[m]) / (1 + t[m] * t[m]); //s } pp[0][0] = 1 - s[2] * t[2] - s[1] * t[1]; //3*3 pp[0][1] = s[1] * t[0] + s[2]; pp[0][2] = s[2] * t[0] - s[1]; pp[1][0] = s[0] * t[1] - s[2]; pp[1][1] = 1 - s[2] * t[2] - s[0] * t[0]; pp[1][2] = s[0] + s[2] * t[1]; pp[2][0] = s[0] * t[2] + s[1]; pp[2][1] = s[1] * t[2] - s[0]; pp[2][2] = 1 - s[1] * t[1] - s[0] * t[0]; u2[0] = pp[0][0] * u1[0] + pp[0][1] * u1[1] + pp[0][2] * u1[2]; u2[1] = pp[1][0] * u1[0] + pp[1][1] * u1[1] + pp[1][2] * u1[2]; u2[2] = pp[2][0] * u1[0] + pp[2][1] * u1[1] + pp[2][2] * u1[2]; for (int m = 0; m<3; m++) u3[m] = u2[m] + (D_parameter[4] / 2)*Qm_ion*E[m]; elcVr[threadIdx.x] = u3[0]; elcVy[threadIdx.x] = u3[1]; elcVz[threadIdx.x] = u3[2]; float cit1a = 0; if ((elcPr[threadIdx.x]) == 0) cit1a = 0; else cit1a = atan((elcVy[threadIdx.x] * D_parameter[4]) / (elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4])); float temp_x = elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4]; if (temp_x >= 0) elcPr[threadIdx.x] = sqrt((elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4])*(elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4]) + elcVy[threadIdx.x] * D_parameter[4] * elcVy[threadIdx.x] * D_parameter[4]); else { elcPr[threadIdx.x] = -temp_x; elcVr[threadIdx.x] = -elcVr[threadIdx.x]; } elcPz[threadIdx.x] = elcPz[threadIdx.x] + elcVz[threadIdx.x] * D_parameter[4]; elcPy[threadIdx.x] = elcPy[threadIdx.x] + cit1a; if ((elcPr[threadIdx.x]) == 0) { elcVr[threadIdx.x] = elcVr[threadIdx.x]; elcVy[threadIdx.x] = elcVy[threadIdx.x]; } elcVr[threadIdx.x] = cos(cit1a)*elcVr[threadIdx.x] + sin(cit1a)*elcVy[threadIdx.x]; elcVy[threadIdx.x] = -sin(cit1a)*elcVr[threadIdx.x] + cos(cit1a)*elcVy[threadIdx.x]; p_pat_elc[tid].vr = elcVr[threadIdx.x]; p_pat_elc[tid].vy = elcVy[threadIdx.x]; p_pat_elc[tid].vz = elcVz[threadIdx.x]; p_pat_elc[tid].pr = elcPr[threadIdx.x]; p_pat_elc[tid].py = elcPy[threadIdx.x]; p_pat_elc[tid].pz = elcPz[threadIdx.x]; tid += gridDim.x*blockDim.x; } } __global__ void current_ion(Paticle *p_pat_elc, Pre_Paticle *d_pre_elc, Grid *device_G, float *S_number, int tail, int t) { __shared__ float elcVr[thread]; __shared__ float elcVy[thread]; __shared__ float elcVz[thread]; __shared__ float elcPr[thread]; __shared__ float elcPy[thread]; __shared__ float elcPz[thread]; __shared__ float PrelcPr[thread]; __shared__ float PrelcPz[thread]; int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid < tail) { elcVy[threadIdx.x] = p_pat_elc[tid].vy; elcPr[threadIdx.x] = p_pat_elc[tid].pr; elcPz[threadIdx.x] = p_pat_elc[tid].pz; PrelcPr[threadIdx.x] = d_pre_elc[tid].pr;// PrelcPz[threadIdx.x] = d_pre_elc[tid].pz;// if ((elcPz[threadIdx.x] >= D_parameter[3]) || (elcPz[threadIdx.x] < 0) || (elcPr[threadIdx.x] >= D_parameter[2])) { //int i = tid %30; //float afa = 0;//rds5*(pi/12); //float cita = 0;//rds6*pi/2; //float vv = sqrt(D_parameter[9] * 2 / D_parameter[11]); //p_pat_elc[tid].pz = D_parameter[8]; //p_pat_elc[tid].pr = 0.005*S_number[i]; //p_pat_elc[tid].py = 2 * pi*S_number[i]; //p_pat_elc[tid].vr = vv*sin(afa)*cos(cita); //p_pat_elc[tid].vy = vv*sin(afa)*sin(cita); //p_pat_elc[tid].vz = 3e7; //p_pat_elc[tid].blei = p_pat_elc[tid].pr / D_parameter[0]; //p_pat_elc[tid].blek = p_pat_elc[tid].pz / D_parameter[1]; } else { int i = (int)(elcPr[threadIdx.x] / D_parameter[0]); int j = (int)(elcPz[threadIdx.x] / D_parameter[1]); int ii = (int)(PrelcPr[threadIdx.x] / D_parameter[0]); // int kk = (int)(PrelcPz[threadIdx.x] / D_parameter[1]); float wrr = PrelcPr[threadIdx.x] / D_parameter[0] - ii; float wzz = PrelcPz[threadIdx.x] / D_parameter[1] - kk; float newwrr = elcPr[threadIdx.x] / D_parameter[0] - i; float newwzz = elcPz[threadIdx.x] / D_parameter[1] - j; float V = abs(pi*((ii + 1)*D_parameter[0] * (ii + 1)*D_parameter[0] - ii*D_parameter[0] * ii*D_parameter[0])*D_parameter[1]); float V1 = abs(pi*((i + 1)*D_parameter[0] * (i + 1)*D_parameter[0] - i*D_parameter[0] * i*D_parameter[0])*D_parameter[1]); int grid_1 = i*(nz + 1) + j; int grid_2 = i*(nz + 1) + j + 1; int grid_3 = (i + 1)*(nz + 1) + j; int grid_4 = (i + 1)*(nz + 1) + j + 1; int grid_temp1 = ii*(nz + 1) + kk;//ii kk int grid_temp2 = ii*(nz + 1) + kk + 1;//ii kk+1 int grid_temp3 = (ii + 1)*(nz + 1) + kk;//ii+1 kk int grid_temp4 = (ii + 1)*(nz + 1) + kk + 1;//ii+1 kk+1 /*(*(device_G + grid_1)).Q += D_parameter[6] * (1 - wrr)*(1 - wzz); (*(device_G + grid_2)).Q += D_parameter[6] * (1 - wrr)*wzz; (*(device_G + grid_3)).Q += D_parameter[6] * wrr*(1 - wzz); (*(device_G + grid_4)).Q += D_parameter[6] * wrr*wzz;*/ //float area = pi*((ii*D_parameter[0] + D_parameter[0])*(ii*D_parameter[0] + D_parameter[0]) - ii*D_parameter[0] * ii*D_parameter[0])*D_parameter[1]; /*(*(device_G + grid_1)).den = (*(device_G + grid_1)).Q / area; (*(device_G + grid_2)).den = (*(device_G + grid_2)).Q / area; (*(device_G + grid_3)).den = (*(device_G + grid_3)).Q / area; (*(device_G + grid_4)).den = (*(device_G + grid_4)).Q / area;*/ // float xp = d_min_1(d_min_1(ii*D_parameter[0], i*D_parameter[0]) + D_parameter[0], d_max_1(d_max_1(ii*D_parameter[0], i*D_parameter[0]), (elcPr[threadIdx.x] + PrelcPr[threadIdx.x]) / 2)); float zp = d_min_1(d_min_1(kk*D_parameter[1], j*D_parameter[1]) + D_parameter[1], d_max_1(d_max_1(kk*D_parameter[1], j*D_parameter[1]), (elcPz[threadIdx.x] + PrelcPz[threadIdx.x]) / 2)); float fr1 = D_parameter[6] * (xp - PrelcPr[threadIdx.x]) / D_parameter[4]; float fz1 = D_parameter[6] * (zp - PrelcPz[threadIdx.x]) / D_parameter[4]; float fr2 = D_parameter[6] * (elcPr[threadIdx.x] - xp) / D_parameter[4]; float fz2 = D_parameter[6] * (elcPz[threadIdx.x] - zp) / D_parameter[4]; float wr1 = (xp + PrelcPr[threadIdx.x]) / 2 / D_parameter[1] - ii; float wz1 = (zp + PrelcPz[threadIdx.x]) / 2 / D_parameter[1] - kk; float wr2 = (xp + elcPr[threadIdx.x]) / 2 / D_parameter[0] - i; float wz2 = (zp + elcPz[threadIdx.x]) / 2 / D_parameter[1] - j; //printf("%d,%e,%e\n",tail, wr2, wz2); /*float da = (*(device_G + grid_1)).den*p_pat_elc[tid].vz; float da1 = (*(device_G + grid_3)).den*p_pat_elc[tid].vz;*/ /////////////////////////////////////////////////// 1 //int logic = ii*i / ((ii - 0.001)*(i - 0.001));// //float logicV = logic*V1 + (1 - logic)*V; if (ii == 0 || i == 0) { atomicAdd(&((*(device_G + grid_temp1)).jr_ion), (fr1*(1 - wz1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp2)).jr_ion), (fr1*(wz1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp1)).jz_ion), (fz1*(1 - wr1) / V));//i __syncthreads(); atomicAdd(&((*(device_G + grid_temp3)).jz_ion), (fz1*(wr1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jr_ion), (fr2*(1 - wz2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_2)).jr_ion), ((fr2*wz2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jz_ion), (fz2*(1 - wr2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_3)).jz_ion), (fz2*wr2 / V)); __syncthreads(); } else{ atomicAdd(&((*(device_G + grid_temp1)).jr_ion), (fr1*(1 - wz1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp2)).jr_ion), (fr1*(wz1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp1)).jz_ion), (fz1*(1 - wr1) / V1));//i __syncthreads(); atomicAdd(&((*(device_G + grid_temp3)).jz_ion), (fz1*(wr1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jr_ion), (fr2*(1 - wz2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_2)).jr_ion), ((fr2*wz2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jz_ion), (fz2*(1 - wr2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_3)).jz_ion), (fz2*wr2 / V1)); __syncthreads(); } float qedens = D_parameter[6] / V; //Jy float Jc = qedens*elcVy[threadIdx.x]; float mid1, mid2; mid1 = xp / D_parameter[0] - i; mid2 = zp / D_parameter[1] - j; float A1 = (1 - wrr)*(1 - wzz); float A3 = wrr*(1 - wzz); float A2 = (1 - wrr)*wzz; float A4 = wrr*wzz; float M1 = (1 - mid1)*(1 - mid2); float M3 = mid1*(1 - mid2); float M2 = (1 - mid1)*mid2; float M4 = mid1*mid2; float B1 = (1 - newwrr)*(1 - newwzz); float B3 = newwrr*(1 - newwzz); float B2 = (1 - newwrr)*newwzz; float B4 = newwrr*newwzz; atomicAdd(&(*(device_G + grid_temp1)).jy_ion, (Jc*(A1 + M1)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp2)).jy_ion, (Jc*(A2 + M2)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp3)).jy_ion, (Jc*(A3 + M3)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp4)).jy_ion, (Jc*(A4 + M4)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_1)).jy_ion, (Jc*(B1 + M1)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_2)).jy_ion, (Jc*(B2 + M2)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_3)).jy_ion, (Jc*(B3 + M3)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_4)).jy_ion, (Jc*(B4 + M4)) / 4 - 0.0); __syncthreads(); } tid += gridDim.x*blockDim.x; } } __global__ void current(Paticle *p_pat_elc, Pre_Paticle *d_pre_elc, Grid *device_G, float *S_number, int tail,int t) { __shared__ float elcVr[thread]; __shared__ float elcVy[thread]; __shared__ float elcVz[thread]; __shared__ float elcPr[thread]; __shared__ float elcPy[thread]; __shared__ float elcPz[thread]; __shared__ float PrelcPr[thread];__shared__ float PrelcPz[thread]; int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid < tail) { elcVy[threadIdx.x] = p_pat_elc[tid].vy; elcPr[threadIdx.x] = p_pat_elc[tid].pr; elcPz[threadIdx.x] = p_pat_elc[tid].pz; PrelcPr[threadIdx.x] = d_pre_elc[tid].pr;// PrelcPz[threadIdx.x] = d_pre_elc[tid].pz;// if ((elcPz[threadIdx.x] >= D_parameter[3]) || (elcPz[threadIdx.x] < 0) || (elcPr[threadIdx.x] >= D_parameter[2])) { //int i = tid %30; //float afa = 0;//rds5*(pi/12); //float cita = 0;//rds6*pi/2; //float vv = sqrt(D_parameter[9] * 2 / D_parameter[11]); //p_pat_elc[tid].pz = D_parameter[8]; //p_pat_elc[tid].pr = 0.005*S_number[i]; //p_pat_elc[tid].py = 2 * pi*S_number[i]; //p_pat_elc[tid].vr = vv*sin(afa)*cos(cita); //p_pat_elc[tid].vy = vv*sin(afa)*sin(cita); //p_pat_elc[tid].vz = 3e7; //p_pat_elc[tid].blei = p_pat_elc[tid].pr / D_parameter[0]; //p_pat_elc[tid].blek = p_pat_elc[tid].pz / D_parameter[1]; } else { int i = (int)(elcPr[threadIdx.x] / D_parameter[0]); int j = (int)(elcPz[threadIdx.x] / D_parameter[1]); int ii = (int)(PrelcPr[threadIdx.x] / D_parameter[0]); // int kk = (int)(PrelcPz[threadIdx.x] / D_parameter[1]); float wrr = PrelcPr[threadIdx.x] / D_parameter[0] - ii; float wzz = PrelcPz[threadIdx.x] / D_parameter[1] - kk; float newwrr = elcPr[threadIdx.x] / D_parameter[0] - i; float newwzz = elcPz[threadIdx.x] / D_parameter[1] - j; float V = abs(pi*((ii + 1)*D_parameter[0] * (ii + 1)*D_parameter[0] - ii*D_parameter[0] * ii*D_parameter[0])*D_parameter[1]); float V1 = abs(pi*((i + 1)*D_parameter[0] * (i + 1)*D_parameter[0] - i*D_parameter[0] * i*D_parameter[0])*D_parameter[1]); int grid_1 = i*(nz + 1) + j; int grid_2 = i*(nz + 1) + j + 1; int grid_3 = (i + 1)*(nz + 1) + j; int grid_4 = (i + 1)*(nz + 1) + j + 1; int grid_temp1 = ii*(nz + 1) + kk;//ii kk int grid_temp2 = ii*(nz + 1) + kk + 1;//ii kk+1 int grid_temp3 = (ii + 1)*(nz + 1) + kk;//ii+1 kk int grid_temp4 = (ii + 1)*(nz + 1) + kk + 1;//ii+1 kk+1 /*(*(device_G + grid_1)).Q += D_parameter[5] * (1 - wrr)*(1 - wzz); (*(device_G + grid_2)).Q += D_parameter[5] * (1 - wrr)*wzz; (*(device_G + grid_3)).Q += D_parameter[5] * wrr*(1 - wzz); (*(device_G + grid_4)).Q += D_parameter[5] * wrr*wzz;*/ //float area = pi*((ii*D_parameter[0] + D_parameter[0])*(ii*D_parameter[0] + D_parameter[0]) - ii*D_parameter[0] * ii*D_parameter[0])*D_parameter[1]; /*(*(device_G + grid_1)).den = (*(device_G + grid_1)).Q / area; (*(device_G + grid_2)).den = (*(device_G + grid_2)).Q / area; (*(device_G + grid_3)).den = (*(device_G + grid_3)).Q / area; (*(device_G + grid_4)).den = (*(device_G + grid_4)).Q / area;*/ // float xp = d_min_1(d_min_1(ii*D_parameter[0], i*D_parameter[0]) + D_parameter[0], d_max_1(d_max_1(ii*D_parameter[0], i*D_parameter[0]), (elcPr[threadIdx.x] + PrelcPr[threadIdx.x]) / 2)); float zp = d_min_1(d_min_1(kk*D_parameter[1], j*D_parameter[1]) + D_parameter[1], d_max_1(d_max_1(kk*D_parameter[1], j*D_parameter[1]), (elcPz[threadIdx.x] + PrelcPz[threadIdx.x]) / 2)); float fr1 = D_parameter[5] * (xp - PrelcPr[threadIdx.x]) / D_parameter[4]; float fz1 = D_parameter[5] * (zp - PrelcPz[threadIdx.x]) / D_parameter[4]; float fr2 = D_parameter[5] * (elcPr[threadIdx.x] - xp) / D_parameter[4]; float fz2 = D_parameter[5] * (elcPz[threadIdx.x] - zp) / D_parameter[4]; float wr1 = (xp + PrelcPr[threadIdx.x]) / 2 / D_parameter[1] - ii; float wz1 = (zp + PrelcPz[threadIdx.x]) / 2 / D_parameter[1] - kk; float wr2 = (xp + elcPr[threadIdx.x]) / 2 / D_parameter[0] - i; float wz2 = (zp + elcPz[threadIdx.x]) / 2 / D_parameter[1] - j; //printf("%d,%e,%e\n",tail, wr2, wz2); /*float da = (*(device_G + grid_1)).den*p_pat_elc[tid].vz; float da1 = (*(device_G + grid_3)).den*p_pat_elc[tid].vz;*/ /////////////////////////////////////////////////// 1 //int logic = ii*i / ((ii - 0.001)*(i - 0.001));// //float logicV = logic*V1 + (1 - logic)*V; if (ii == 0||i==0 ) { atomicAdd(&((*(device_G + grid_temp1)).jr), (fr1*(1 - wz1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp2)).jr), (fr1*(wz1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp1)).jz), (fz1*(1 - wr1) / V));//i __syncthreads(); atomicAdd(&((*(device_G + grid_temp3)).jz), (fz1*(wr1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jr), (fr2*(1 - wz2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_2)).jr), ((fr2*wz2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jz), (fz2*(1 - wr2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_3)).jz), (fz2*wr2 / V)); __syncthreads(); } else{ atomicAdd(&((*(device_G + grid_temp1)).jr), (fr1*(1 - wz1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp2)).jr), (fr1*(wz1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp1)).jz), (fz1*(1 - wr1) / V1));//i __syncthreads(); atomicAdd(&((*(device_G + grid_temp3)).jz), (fz1*(wr1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jr), (fr2*(1 - wz2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_2)).jr), ((fr2*wz2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jz), (fz2*(1 - wr2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_3)).jz), (fz2*wr2 / V1)); __syncthreads(); } float qedens = D_parameter[5] / V; //Jy float Jc = qedens*elcVy[threadIdx.x]; float mid1, mid2; mid1 = xp / D_parameter[0] - i; mid2 = zp / D_parameter[1] - j; float A1 = (1 - wrr)*(1 - wzz); float A3 = wrr*(1 - wzz); float A2 = (1 - wrr)*wzz; float A4 = wrr*wzz; float M1 = (1 - mid1)*(1 - mid2); float M3 = mid1*(1 - mid2); float M2 = (1 - mid1)*mid2; float M4 = mid1*mid2; float B1 = (1 - newwrr)*(1 - newwzz); float B3 = newwrr*(1 - newwzz); float B2 = (1 - newwrr)*newwzz; float B4 = newwrr*newwzz; atomicAdd(&(*(device_G + grid_temp1)).jy, (Jc*(A1 + M1)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp2)).jy, (Jc*(A2 + M2)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp3)).jy, (Jc*(A3 + M3)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp4)).jy, (Jc*(A4 + M4)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_1)).jy, (Jc*(B1 + M1)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_2)).jy, (Jc*(B2 + M2)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_3)).jy, (Jc*(B3 + M3)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_4)).jy, (Jc*(B4 + M4)) / 4-0.0); __syncthreads(); } tid += gridDim.x*blockDim.x; } } __global__ void device_define_G(int n, int m, Grid *device_gn, Grid *device_g) { int tid=blockIdx.x*blockDim.x+threadIdx.x; while(tid<n*m) { /*int i=tid/m; int j=tid%m; int tid_temp=i*(m+1)+j;*/ (device_gn+tid)->ey=(device_g+tid)->ey; (device_gn+tid)->hx=(device_g+tid)->hx; (device_gn+tid)->hz=(device_g+tid)->hz; (device_gn+tid)->ex=(device_g+tid)->ex; (device_gn+tid)->hy=(device_g+tid)->hy; (device_gn+tid)->ez=(device_g+tid)->ez; (device_g+tid)->ne[2]=0; (device_g+tid)->jr=0.0; (device_g+tid)->jz=0.0; (device_g+tid)->jy=0.0; (device_g+tid)->Q=0.0; (device_g+tid)->den=0.0; (device_g+tid)->jr_ion=0.0; (device_g+tid)->jz_ion=0.0; (device_g+tid)->jy_ion=0.0; (device_g+tid)->Q_ion=0.0; (device_g+tid)->den_ion=0.0; tid+=gridDim.x*blockDim.x; } /* printf("success,device_define_G");*/ } __device__ void L_InitialPML(int tid) { d_ex1[tid] = 0; d_iex1[tid] = 0; d_ey1[tid] = 0; d_iey1[tid] = 0; d_ez1[tid] = 0; d_iez1[tid] = 0; d_hx1[tid] = 0; d_ihx1[tid] = 0; d_hy1[tid]=0; d_ihy1[tid]=0; d_hz1[tid]=0; d_ihz1[tid]=0; } __global__ void kernel_L_InitialPML(int nxx, int nzz) { int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid<nxx*nzz) { L_InitialPML(tid); tid += gridDim.x*blockDim.x; } } __global__ void cacuchang_hx(Grid *device_G,Grid *device_Gn,float *d_sigmaz1,float *d_sigmaz,float dt,float dr,float dz,int nxx,int nzz) { float epsl=(8.854e-12); float mur=4.0*pi*1.0e-7; //float t0=120*dt,T=100*dt; float /*ca=0.0,*//*cb=0.0,*/ca1=0.0,cb1=0.0/*,ca2=0,cb2=0*/; float /*da=0.0,db=0.0,*/da1=0.0,db1=0.0,da2=0,db2=0; int tid=blockDim.x*blockIdx.x+threadIdx.x; /////******************** TE01 ******************///// while (tid<nxx*nzz) { int i=tid/nzz; int k=tid%nzz; if (i != nx&&k != nz) { da2 = 1;//(1+dt*sigmaz1[k]/epsl/2); db2 = 1;//(1-dt*sigmaz1[k]/epsl/2); da1 = (2 * epsl - dt*d_sigmaz[k]) / (2 * epsl + dt*d_sigmaz[k]); db1 = (2 * epsl) / (2 * epsl + d_sigmaz[k] * dt); d_hx1[tid] = da1*d_hx1[tid] + db1*dt*((*(device_Gn + tid+1)).ey - (*(device_Gn + tid)).ey) / dz; (*(device_G + tid)).hx = (*(device_Gn + tid)).hx + (d_hx1[tid] - d_ihx1[tid]) / mur; d_ihx1[tid] = d_hx1[tid]; } tid+=gridDim.x*blockDim.x; } } __global__ void cacuchang_hy(Grid *device_G, Grid *device_Gn, float *d_sigmaz1, float *d_sigmaz, float dt, float dr, float dz, int nxx, int nzz) { float epsl = (8.854e-12); float mur = 4.0*pi*1.0e-7; //float t0=120*dt,T=100*dt; float /*ca=0.0,*//*cb=0.0,*/ca1 = 0.0, cb1 = 0.0/*,ca2=0,cb2=0*/; float /*da=0.0,db=0.0,*/da1 = 0.0, db1 = 0.0, da2 = 0, db2 = 0; int tid = blockDim.x*blockIdx.x + threadIdx.x; /////******************** TE01 ******************///// while (tid<nxx*nzz) { int i = tid / nzz; int k = tid%nzz; if (i != nx&&k != nz) { da2 = 1;//(1+dt*sigmaz1[k]/epsl/2); db2 = 1;//(1-dt*sigmaz1[k]/epsl/2); ca1 = (2 * epsl - dt*d_sigmaz[k]) / (2 * epsl + dt*d_sigmaz[k]); cb1 = 2 * epsl / (2 * epsl + dt*d_sigmaz[k]); d_hy1[tid] = d_hy1[tid] + dt*(((*(device_Gn + tid+nzz)).ez - (*(device_Gn + tid)).ez) / dr - ((*(device_Gn + tid+1)).ex - (*(device_Gn + tid)).ex) / dz); (*(device_G + tid)).hy = ca1*(*(device_Gn + tid)).hy + cb1*(d_hy1[tid] - d_ihy1[tid]) / mur; d_ihy1[tid] = d_hy1[tid]; } tid += gridDim.x*blockDim.x; } } __global__ void cacuchang_hz(Grid *device_G, Grid *device_Gn, float *d_sigmaz1, float *d_sigmaz, float dt, float dr, float dz, int nxx, int nzz) { float epsl = (8.854e-12); float mur = 4.0*pi*1.0e-7; //float t0=120*dt,T=100*dt; float /*ca=0.0,*//*cb=0.0,*/ca1 = 0.0, cb1 = 0.0/*,ca2=0,cb2=0*/; float /*da=0.0,db=0.0,*/da1 = 0.0, db1 = 0.0, da2 = 0, db2 = 0; int tid = blockDim.x*blockIdx.x + threadIdx.x; /////******************** TE01 ******************///// while (tid<nxx*nzz) { int i = tid / nzz; int k = tid%nzz; if (i != nx&&k != nz) { da2 = 1;//(1+dt*sigmaz1[k]/epsl/2); db2 = 1;//(1-dt*sigmaz1[k]/epsl/2); d_hz1[tid] = d_hz1[tid] - dt*((*(device_Gn + tid+nzz)).ey - (*(device_Gn + tid)).ey) / dr - dt*((*(device_Gn + tid+nzz)).ey + (*(device_Gn + tid)).ey) / (2 * (i + 0.5)*dr); (*(device_G + tid)).hz = (*(device_Gn + tid)).hz + (da2*d_hz1[tid] - db2*d_ihz1[tid]) / mur; d_ihz1[tid] = d_hz1[tid]; } tid += gridDim.x*blockDim.x; } } __global__ void cacuchang_ex(Grid *device_G,Grid *device_Gn,float *d_sigmaz1,float *d_sigmaz,float dt,float dr,float dz,int nxx,int nzz) { float epsl=(8.854e-12); const float cgm=0.0; float mur=4.0*pi*1.0e-7; float A,B,C,D,v,a6=0.0; //float bate=0; float omega=0,freq=0; float t0=120*dt,T=100*dt; float ca=0.0,cb=0.0,ca1=0.0,cb1=0.0,ca2=0,cb2=0; float da=0.0,db=0.0,da1=0.0,db1=0.0,da2=0,db2=0; v=1/sqrt(mur*epsl); A=(2*epsl-cgm*dt)/(2*epsl-cgm*dt); B=(2*dt)/(2*epsl+cgm*dt); C=1; D=dt/mur; freq=1e8; int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid < nxx*nzz) { int i = tid / nzz; int k = tid%nzz; if (i != nx && k != nz) { if (k == 0) { float v = 1 / sqrt(mur*epsl); float coff = (v*dt - dz) / (v*dt + dz); device_G[tid].ex = device_Gn[tid + 1].ex + coff*(device_Gn[tid + 1].ex - device_Gn[tid].ex); } else { ca = (2 * epsl - dt*d_sigmaz1[k]) / (2 * epsl + dt*d_sigmaz1[k]); cb = 2 * dt / (2 * epsl + dt*d_sigmaz1[k]); d_ex1[tid] = ca*d_ex1[tid] - cb*(device_G[tid].hy - device_G[tid - 1].hy) / dz; device_G[tid].ex = device_Gn[tid].ex + (d_ex1[tid] - d_iex1[tid]) - B*(device_G[tid].jr + device_G[tid].jr_ion); d_iex1[tid] = d_ex1[tid]; } } tid += gridDim.x*blockDim.x; } } __global__ void cacuchang_ey(Grid *device_G,Grid *device_Gn,float *d_sigmaz1,float *d_sigmaz,float dt,float dr,float dz,int nxx,int nzz) { float epsl=(8.854e-12); const float cgm=0.0; float mur=4.0*pi*1.0e-7; float A,B,C,D,v,a6=0.0; float omega=0,freq=0; float t0=120*dt,T=100*dt; float ca=0.0,cb=0.0,ca1=0.0,cb1=0.0,ca2=0,cb2=0; float da=0.0,db=0.0,da1=0.0,db1=0.0,da2=0,db2=0; v=1/sqrt(mur*epsl); A=(2*epsl-cgm*dt)/(2*epsl-cgm*dt); B=(2*dt)/(2*epsl+cgm*dt); C=1; D=dt/mur; freq=1e8; int tid = blockDim.x*blockIdx.x + threadIdx.x; while(tid<nxx*nzz) { int i=tid/nz; int k=tid%nz; if (i != nx&&k != nz) { int tid1 = i*nzz + k; int tid2 = i*nzz + k + 1; int tid3 = (i + 1)*nzz + k; int tid4 = (i + 1)*nzz + k + 1; if (i == 0) { device_G[tid].ey = 0;//te01 } else if (k == 0) { float v = 1 / sqrt(mur*epsl); float coff = (v*dt - dz) / (v*dt + dz); device_G[tid].ey = device_Gn[tid + 1].ey + coff*(device_Gn[tid + 1].ey - device_Gn[tid].ey);//TE01 } else { da = (2 * epsl - dt*d_sigmaz1[k]) / (2 * epsl + dt*d_sigmaz1[k]); //TE01 db = 2 / (2 * epsl + dt*d_sigmaz1[k]); d_ey1[tid] = d_ey1[tid] + dt*((device_G[tid].hx - device_G[tid - 1].hx) / dz - (device_G[tid].hz - device_G[tid-nzz].hz) / dr);// device_G[tid].ey = da*device_Gn[tid].ey + db*(d_ey1[tid] - d_iey1[tid]) - B*(device_G[tid].jy + device_G[tid].jy_ion) * 10; d_iey1[tid] = d_ey1[tid]; } } tid+=gridDim.x*blockDim.x; } } __global__ void cacuchang_ez(Grid *device_G,Grid *device_Gn,float *d_sigmaz1,float *d_sigmaz,float dt,float dr,float dz,int nxx,int nzz) { float epsl=(8.854e-12); const float cgm=0.0; float mur=4.0*pi*1.0e-7; float A,B,C,D,v,a6=0.0; //float bate=0; float omega=0,freq=0; float t0=120*dt,T=100*dt; float ca=0.0,cb=0.0,ca1=0.0,cb1=0.0,ca2=0,cb2=0; float da=0.0,db=0.0,da1=0.0,db1=0.0,da2=0,db2=0; v=1/sqrt(mur*epsl); A=(2*epsl-cgm*dt)/(2*epsl-cgm*dt); B=(2*dt)/(2*epsl+cgm*dt); //printf("%e\n",B); C=1; D=dt/mur; freq=1e8; int tid = blockDim.x*blockIdx.x + threadIdx.x; while(tid<nxx*nzz) { int i=tid/nzz; int k=tid%nzz; if (i != nx&&k != nz) { if (i == 0) { ca2 = 1;//(2*epsl+dt*sigmaz[k])/(2*epsl); cb2 = 1;//(2*epsl-dt*sigmaz[k])/(2*epsl); d_ez1[tid] = d_ez1[tid] + dt * 4 * (*(device_G + tid)).hy / dr / epsl; (*(device_G + tid)).ez = (*(device_Gn + tid)).ez + (ca2*d_ez1[tid] - cb2*d_iez1[tid]) - B*((*(device_G + tid)).jz + (*(device_G + tid)).jz_ion); d_iez1[tid] = d_ez1[tid]; } else { ca2 = 1;//(2*epsl+dt*sigmaz[k])/(2*epsl);//TM01 cb2 = 1;//(2*epsl-dt*sigmaz[k])/(2*epsl); d_ez1[tid] = d_ez1[tid] + dt*((1 / (2 * i*dr) + 1 / dr)*(*(device_G + tid)).hy + ((1 / (2 * i*dr)) - 1 / dr)*(*(device_G + tid - nzz)).hy); (*(device_G + tid)).ez = (*(device_Gn + tid)).ez + (ca2*d_ez1[tid] - cb2*d_iez1[tid]) / epsl - B*((*(device_G + tid)).jz + (*(device_G + tid)).jz_ion); d_iez1[tid] = d_ez1[tid]; } } tid+=gridDim.x*blockDim.x; } }
0103a89d8fb8a7a2554b41c97e52c1ebb27867ff.cu
#include "lty_paralle_common.cuh" //#include "common.cuh" #include <stdio.h> #include<iostream> #include<time.h> #include"device_functions.h" #include "device_launch_parameters.h" #include <math.h> #include "sm_20_atomic_functions.hpp" using namespace std; extern int Ne,int nxx,int nzz,int kongxue,int tail,int enlarge; extern float dr,float dz,float ca_posz,float KTe; //extern float *d_ex1,float *d_iex1,float *d_ey1,float *d_iey1,float *d_ez1,float *d_iez1, // float *d_hx1,float *d_ihx1,float *d_hy1,float *d_ihy1,float *d_hz1,float *d_ihz1, // float *d_sigmaz1,float *d_sigmaz,float *sigmaz1,float *sigmaz; extern Paticle *pat_elc; __device__ float d_ex1[51 * 101], d_iex1[51 * 101], d_ey1[51 * 101], d_iey1[51 * 101], d_ez1[51 * 101], d_iez1[51 * 101], d_hx1[51 * 101], d_ihx1[51 * 101], d_hy1[51 * 101], d_ihy1[51 * 101], d_hz1[51 * 101], d_ihz1[51 * 101]; //extern float *d_sigmaz1 = NULL, *d_sigmaz = NULL; __constant__ float D_parameter[12]={1.0e-003,1.0e-003,5.0e-002,1.0e-001,1.66782e-12,-5.5594e-15,5.5594e-17,0.005,0.03, 4.095e-16,2.4033e-20,9.1e-31}; //0dr 1dz 2R 3L 4dt 5qe 6qi 7ca_posr 8ca_posz 9KTe 10KTi 11Me /*3.2044e-19*/ __global__ void device_initialchang(Grid* device_g,Grid* device_gn)//x是行 y是列 { int x=51,y=101; int tid=blockIdx.x*blockDim.x+threadIdx.x; while(tid<x*y) { (device_g+tid)->ex=0; (device_g+tid)->ey=0; (device_g+tid)->ez=0; (device_g+tid)->hx=0; (device_g+tid)->hy=0; (device_g+tid)->hz=0; (device_g+tid)->ave_ex=0; (device_g+tid)->ave_ey=0; (device_g+tid)->ave_ez=0; (device_g+tid)->ave_hx=0; (device_g+tid)->ave_hy=0; (device_g+tid)->ave_hz=0; (device_g+tid)->ne[0]=0; (device_g+tid)->ne[1]=0; (device_g+tid)->jr=0.0; (device_g+tid)->jz=0.0; (device_g+tid)->jy=0.0; (device_g+tid)->jr_ion=0.0; (device_g+tid)->jz_ion=0.0; (device_g+tid)->jy_ion=0.0; (device_g+tid)->Pmax=0.0; (device_gn+tid)->ex=0; (device_gn+tid)->ey=0; (device_gn+tid)->ez=0; (device_gn+tid)->hx=0; (device_gn+tid)->hy=0; (device_gn+tid)->hz=0; (device_gn+tid)->ne[2]=0; tid+=gridDim.x*blockDim.x; } //printf("success,device_initial_chang"); } //void device_initial_always(float *device_rds,float *device_rds1,Paticle *p_pat_elc,Grid *device_G,float ca_posz,int Ne,int nx,int nz,float dr,float dz,float KTe) __global__ void initial_always(Paticle *pat_elc,int n,float *S_number,int tmp)//n是step*30 { int tid=blockIdx.x*blockDim.x+threadIdx.x; while(tid<n) { int i=tid%tmp; float afa=0;//rds5*(pi/12); float cita=0;//rds6*pi/2; float vv = 1e6; pat_elc[tid].pz=D_parameter[8]; pat_elc[tid].pr =0.04*S_number[i]+0.001;//发射位置垫高 pat_elc[tid].py=2*pi*S_number[i]; pat_elc[tid].vr = vv*sin(afa)*cos(cita); pat_elc[tid].vy = vv*sin(afa)*sin(cita); pat_elc[tid].vz=vv; //pat_elc[tid].blei = pat_elc[tid].pr/D_parameter[0]; // pat_elc[tid].blek = pat_elc[tid].pz/D_parameter[1]; tid+=gridDim.x*blockDim.x; } } __global__ void device_ave_field(int x,int y,Grid* device_g,Grid* device_gn) //x是行 y是列 { int u=0,v=0; int tid=blockIdx.x*blockDim.x+threadIdx.x; while(tid<x*(y)) { //bool bv=tid%y;//v; //bool bu=tid/(y);//u; /*G[u][v].ave_ex=(bv*G[u][v].ex+bu*G[u-1+1-bu][v].ex+(1-bv)*(1-bu)*G[u][v].ex+(1-bv)*bu*G[u][v].ex)/(bv+bu+(1-bv)*(1-bu)+(1-bv)*bu); G[u][v].ave_ez=(G[u][v].ez+bv*G[u][v-1+1-bv].ez)/(2-(1-bv)); G[u][v].ave_hy=(G[u][v].hy+bu*G[u-1+1-bu][v].hy+bv*G[u][v-1+1-bv].hy+bu*bv*G[u-1+1-bu][v-1+1-bv].hy +Gn[u][v].hy+bu*Gn[u-1+1-bu][v].hy+bv*Gn[u][v-1+1-bv].hy+bu*bv*Gn[u-1+1-bu][v-1+1-bv].hy)/(8/(3-bu-bv+(1-bu)*(1-bv))); G[u][v].ave_ey=G[u][v].ey; G[u][v].ave_hx=(G[u][v].hx+Gn[u][v].hx+bv*G[u][v-1+1-bv].hx+bv*Gn[u][v-1+1-bv].hx)/(2+2*bv); G[u][v].ave_hz=(G[u][v].hz+Gn[u][v].hz+bu*G[u-1+1-bu][v].hz+bu*Gn[u-1+1-bu][v].hz)/(2+2*bu);*/ /*device_g[tid].ave_ex=(bv*device_g[tid].ex+bu*device_g[tid-bu*y].ex+(1-bv)*(1-bu)*device_g[tid].ex+(1-bv)*bu*device_g[tid].ex)/(bv+bu+(1-bv)*(1-bu)+(1-bv)*bu); device_g[tid].ave_ez=(device_g[tid].ez+bv*device_g[tid-bv].ez)/(2-(1-bv)); device_g[tid].ave_hy=(device_g[tid].hy+bu*device_g[tid-bu*y].hy+bv*device_g[tid-bv].hy+bu*bv*device_g[tid-bu*y-bv].hy +device_gn[tid].hy+bu*device_gn[tid-bu*y].hy+bv*device_gn[tid-bv].hy+bu*bv*device_gn[tid-bu*y-bv].hy)/(8/(3-bu-bv+(1-bu)*(1-bv))); device_g[tid].ave_ey=device_g[tid].ey; device_g[tid].ave_hx=(device_g[tid].hx+device_gn[tid].hx+bv*device_g[tid-bv].hx+bv*device_gn[tid-bv].hx)/(2+2*bv); device_g[tid].ave_hz=(device_g[tid].hz+device_gn[tid].hz+bu*device_g[tid-bu*y].hz+bu*device_gn[tid-bu*y].hz)/(2+2*bu);*/ u=tid/(y); v=tid%y; int tid_temp=u*(y)+v; if(u==0&&v!=0) { (*(device_g+tid_temp)).ave_ex=((*(device_g+tid_temp)).ex); (*(device_g+tid_temp)).ave_ez=((*(device_g+tid_temp)).ez+(*(device_g+tid_temp-1)).ez)/2; (*(device_g+tid_temp)).ave_hy=((*(device_g+tid_temp)).hy+(*(device_g+tid_temp-1)).hy+(*(device_gn+tid_temp)).hy+(*(device_gn+tid_temp-1)).hy)/4; (*(device_g+tid_temp)).ave_ey=(*(device_g+tid_temp)).ey; (*(device_g+tid_temp)).ave_hx=((*(device_g+tid_temp)).hx+(*(device_gn+tid_temp)).hx+(*(device_g+tid_temp-1)).hx+(*(device_gn+tid_temp-1)).hx)/4; (*(device_g+tid_temp)).ave_hz=((*(device_g+tid_temp)).hz+(*(device_gn+tid_temp)).hz)/2; } else if(v==0&&u!=0) { (*(device_g+tid_temp)).ave_ex=((*(device_g+tid_temp)).ex+(*(device_g+tid_temp-y)).ex)/2; (*(device_g+tid_temp)).ave_ez=((*(device_g+tid_temp)).ez); (*(device_g+tid_temp)).ave_hy=((*(device_g+tid_temp)).hy+(*(device_g+tid_temp-y)).hy+(*(device_gn+tid_temp)).hy+(*(device_gn+tid_temp-y)).hy)/4; (*(device_g+tid_temp)).ave_ey=(*(device_g+tid_temp)).ey; (*(device_g+tid_temp)).ave_hx=((*(device_g+tid_temp)).hx+(*(device_gn+tid_temp)).hx)/2; (*(device_g+tid_temp)).ave_hz=((*(device_g+tid_temp)).hz+(*(device_gn+tid_temp)).hz+(*(device_g+tid_temp-y)).hz+(*(device_gn+tid_temp-y)).hz)/4; } else if(v==0&&u==0) { (*(device_g+tid_temp)).ave_ex=(*(device_g+tid_temp)).ex; (*(device_g+tid_temp)).ave_ez=(*(device_g+tid_temp)).ez; (*(device_g+tid_temp)).ave_hy=((*(device_g+tid_temp)).hy+(*(device_gn+tid_temp)).hy)/2; (*(device_g+tid_temp)).ave_ey=(*(device_g+tid_temp)).ey; (*(device_g+tid_temp)).ave_hx=((*(device_g+tid_temp)).hx+(*(device_gn+tid_temp)).hx)/2; (*(device_g+tid_temp)).ave_hz=((*(device_g+tid_temp)).hz+(*(device_gn+tid_temp)).hz)/2; } else{ (*(device_g+tid_temp)).ave_ex=((*(device_g+tid_temp)).ex+(*(device_g+tid_temp-y)).ex)/2; (*(device_g+tid_temp)).ave_ez=((*(device_g+tid_temp)).ez+(*(device_g+tid_temp-1)).ez)/2; (*(device_g+tid_temp)).ave_hy=((*(device_g+tid_temp)).hy+(*(device_g+tid_temp-y)).hy+(*(device_g+tid_temp-1)).hy+(*(device_g+tid_temp-y-1)).hy +(*(device_gn+tid_temp)).hy+(*(device_gn+tid_temp-y)).hy+(*(device_gn+tid_temp-1)).hy+(*(device_gn+tid_temp-y-1)).hy)/8; (*(device_g+tid_temp)).ave_ey=(*(device_g+tid_temp)).ey; (*(device_g+tid_temp)).ave_hx=((*(device_g+tid_temp)).hx+(*(device_gn+tid_temp)).hx+(*(device_g+tid_temp-1)).hx+(*(device_gn+tid_temp-1)).hx)/4; (*(device_g+tid_temp)).ave_hz=((*(device_g+tid_temp)).hz+(*(device_gn+tid_temp)).hz+(*(device_g+tid_temp-y)).hz+(*(device_gn+tid_temp-y)).hz)/4; } tid+=gridDim.x*blockDim.x; // printf("success,device_initial_chang"); } } __device__ float d_min_1(float x,float y) { return(x<y?x:y); } __device__ float d_max_1(float x,float y) { return(x>y?x:y); } __global__ void device_update_last(float *device_static_magneticX, float *device_static_magneticZ, float *device_static_electricX, float *device_static_electricZ, Paticle *p_pat_elc, Pre_Paticle *d_pre_elc, Grid *device_G, float *S_number, int tail) { //printf("%d",tail); float Qm_ion = 7.33945e+5; float Qm = -1.7588e+11; float mur = 4.0*pi*1.0e-7; float E[3] = { 0, 0, 0 }, B[3] = { 0, 0, 0 }; int tid = blockDim.x*blockIdx.x + threadIdx.x; int threadtid = threadIdx.x; //Paticle p_prepat_elc; __shared__ float elcVr[thread]; __shared__ float elcVy[thread]; __shared__ float elcVz[thread]; __shared__ float elcPr[thread]; __shared__ float elcPy[thread]; __shared__ float elcPz[thread]; __shared__ float PrelcPr[thread]; __shared__ float PrelcPz[thread]; while (tid<tail) { elcVr[threadIdx.x] = p_pat_elc[tid].vr; elcVy[threadIdx.x] = p_pat_elc[tid].vy; elcVz[threadIdx.x] = p_pat_elc[tid].vz; elcPr[threadIdx.x] = p_pat_elc[tid].pr; elcPy[threadIdx.x] = p_pat_elc[tid].py; elcPz[threadIdx.x] = p_pat_elc[tid].pz; d_pre_elc[tid].pr = elcPr[threadIdx.x];// d_pre_elc[tid].pz = elcPz[threadIdx.x];// int ii = (int)(elcPr[threadIdx.x] / D_parameter[0]); //云方程 int kk = (int)(elcPz[threadIdx.x] / D_parameter[1]); float wr = (elcPr[threadIdx.x] / D_parameter[0]) - ii; float wz = (elcPz[threadIdx.x] / D_parameter[1]) - kk; float s1 = (1 - wr)*(1 - wz); float s2 = (1 - wr)*wz; float s3 = wr*(1 - wz); float s4 = wr*wz; int grid_temp1 = ii*(nz + 1) + kk;//ii kk int grid_temp2 = ii*(nz + 1) + kk + 1;//ii kk+1 int grid_temp3 = (ii + 1)*(nz + 1) + kk;//ii+1 kk int grid_temp4 = (ii + 1)*(nz + 1) + kk + 1;//ii+1 kk+1 //printf("u:%d\t", tid); E[0] = ((*(device_G + grid_temp1)).ave_ex*s1 + (*(device_G + grid_temp3)).ave_ex*s3 + (*(device_G + grid_temp2)).ave_ex*s2 + (*(device_G + grid_temp4)).ave_ex*s4) +((*(device_static_electricX+grid_temp1))*s1+(*(device_static_electricX+grid_temp3))*s3+(*(device_static_electricX+grid_temp2))*s2+(*(device_static_electricX+grid_temp4))*s4);//+stac_ex[ii][kk]; //printf("u:%d\tE0=%f\n", tid,E[0]); E[2] = ((*(device_G + grid_temp1)).ave_ez*s1 + (*(device_G + grid_temp3)).ave_ez*s3 + (*(device_G + grid_temp2)).ave_ez*s2 + (*(device_G + grid_temp4)).ave_ez*s4) +((*(device_static_electricZ+grid_temp1))*s1+(*(device_static_electricZ+grid_temp3))*s3+(*(device_static_electricZ+grid_temp2))*s2+(*(device_static_electricZ+grid_temp4))*s4);//+(*(device_static_electricZ+grid_temp1)); //printf("u:%d\tE2=%f\n", tid,E[2]); B[1] = (((*(device_G + grid_temp1)).ave_hy*s1 + (*(device_G + grid_temp3)).ave_hy*s3 + (*(device_G + grid_temp2)).ave_hy*s2 + (*(device_G + grid_temp4)).ave_hy*s4)*mur); //printf("u:%d\tB1=%f\n", tid, B[1]); E[1] = ((*(device_G + grid_temp1)).ave_ey*s1 + (*(device_G + grid_temp3)).ave_ey*s3 + (*(device_G + grid_temp2)).ave_ey*s2 + (*(device_G + grid_temp4)).ave_ey*s4); //printf("u:%d\tE1=%f\n", tid,E[1]); B[0] = (((*(device_G + grid_temp1)).ave_hx*s1 + (*(device_G + grid_temp3)).ave_hx*s3 + (*(device_G + grid_temp2)).ave_hx*s2 + (*(device_G + grid_temp4)).ave_hx*s4)*mur) +(*(device_static_magneticX+grid_temp1))*s1+(*(device_static_magneticX+grid_temp3))*s3+((*(device_static_magneticX+grid_temp2))*s2+(*(device_static_magneticX+grid_temp4))*s4);//+(*(device_static_magneticX+grid_temp3)); //B[0]=((G[ii][kk].ave_hx*s1+G[ii+1][kk].ave_hx*s3+G[ii][kk+1].ave_hx*s2+G[ii+1][kk+1].ave_hx*s4)*mur) //+(stac_Bx[ii][kk] * s1 + stac_Bx[ii + 1][kk] * s3 + stac_Bx[ii][kk + 1] * s2 + stac_Bx[ii + 1][kk + 1] * s4); //printf("u:%d\tB0=%f\n", tid, B[0]); B[2] = (((*(device_G + grid_temp1)).ave_hz*s1 + (*(device_G + grid_temp3)).ave_hz*s3 + (*(device_G + grid_temp2)).ave_hz*s2 + (*(device_G + grid_temp4)).ave_hz*s4)*mur) +((*(device_static_magneticZ+grid_temp1))*s1+(*(device_static_magneticZ+grid_temp3))*s3+(*(device_static_magneticZ+grid_temp2))*s2+(*(device_static_magneticZ+grid_temp4))*s4);//+(*(device_static_magneticZ+grid_temp3)); //printf("B2=%f\n", B[2]); float u1[3] = { 0 }, u2[3] = { 0 }, u3[3] = { 0 };//u_n-1/2,u-,,u+,u_n+1/2 // electron 分步求解 float t[3] = { 0 }, s[3] = { 0 }; float pp[3][3] = { 0 }; u1[0] = elcVr[threadIdx.x] + (D_parameter[4] / 2)*Qm*E[0]; u1[1] = elcVy[threadIdx.x] + (D_parameter[4] / 2)*Qm*E[1]; u1[2] = elcVz[threadIdx.x] + (D_parameter[4] / 2)*Qm*E[2]; for (int m = 0; m<3; m++) { t[m] = (B[m] * Qm*D_parameter[4]) / 2; // 鲍尔斯旋转,求t s[m] = (2 * t[m]) / (1 + t[m] * t[m]); //求s } pp[0][0] = 1 - s[2] * t[2] - s[1] * t[1]; //3*3矩阵元素 pp[0][1] = s[1] * t[0] + s[2]; pp[0][2] = s[2] * t[0] - s[1]; pp[1][0] = s[0] * t[1] - s[2]; pp[1][1] = 1 - s[2] * t[2] - s[0] * t[0]; pp[1][2] = s[0] + s[2] * t[1]; pp[2][0] = s[0] * t[2] + s[1]; pp[2][1] = s[1] * t[2] - s[0]; pp[2][2] = 1 - s[1] * t[1] - s[0] * t[0]; u2[0] = pp[0][0] * u1[0] + pp[0][1] * u1[1] + pp[0][2] * u1[2]; u2[1] = pp[1][0] * u1[0] + pp[1][1] * u1[1] + pp[1][2] * u1[2]; u2[2] = pp[2][0] * u1[0] + pp[2][1] * u1[1] + pp[2][2] * u1[2]; for (int m = 0; m<3; m++) u3[m] = u2[m] + (D_parameter[4] / 2)*Qm*E[m]; elcVr[threadIdx.x] = u3[0]; elcVy[threadIdx.x] = u3[1]; elcVz[threadIdx.x] = u3[2]; float cit1a = 0; if ((elcPr[threadIdx.x]) == 0) cit1a = 0; else cit1a = atan((elcVy[threadIdx.x] * D_parameter[4]) / (elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4])); float temp_x = elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4]; if (temp_x >= 0) elcPr[threadIdx.x] = sqrt((elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4])*(elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4]) + elcVy[threadIdx.x] * D_parameter[4] * elcVy[threadIdx.x] * D_parameter[4]); else { elcPr[threadIdx.x] = -temp_x; elcVr[threadIdx.x] = -elcVr[threadIdx.x]; } elcPz[threadIdx.x] = elcPz[threadIdx.x] + elcVz[threadIdx.x] * D_parameter[4]; elcPy[threadIdx.x] = elcPy[threadIdx.x] + cit1a; if ((elcPr[threadIdx.x]) == 0) { elcVr[threadIdx.x] = elcVr[threadIdx.x]; elcVy[threadIdx.x] = elcVy[threadIdx.x]; } elcVr[threadIdx.x] = cos(cit1a)*elcVr[threadIdx.x] + sin(cit1a)*elcVy[threadIdx.x]; elcVy[threadIdx.x] = -sin(cit1a)*elcVr[threadIdx.x] + cos(cit1a)*elcVy[threadIdx.x]; p_pat_elc[tid].vr = elcVr[threadIdx.x]; p_pat_elc[tid].vy = elcVy[threadIdx.x]; p_pat_elc[tid].vz = elcVz[threadIdx.x]; p_pat_elc[tid].pr = elcPr[threadIdx.x]; p_pat_elc[tid].py = elcPy[threadIdx.x]; p_pat_elc[tid].pz = elcPz[threadIdx.x]; tid += gridDim.x*blockDim.x; } } __global__ void device_update_ion(float *device_static_magneticX, float *device_static_magneticZ, float *device_static_electricX, float *device_static_electricZ, Paticle *p_pat_elc, Pre_Paticle *d_pre_elc, Grid *device_G, float *S_number, int tail) { //printf("%d",tail); float Qm_ion = 7.33945e+5; float Qm = -1.7588e+11; float mur = 4.0*pi*1.0e-7; float E[3] = { 0, 0, 0 }, B[3] = { 0, 0, 0 }; int tid = blockDim.x*blockIdx.x + threadIdx.x; int threadtid = threadIdx.x; //Paticle p_prepat_elc; __shared__ float elcVr[thread]; __shared__ float elcVy[thread]; __shared__ float elcVz[thread]; __shared__ float elcPr[thread]; __shared__ float elcPy[thread]; __shared__ float elcPz[thread]; __shared__ float PrelcPr[thread]; __shared__ float PrelcPz[thread]; while (tid<tail) { elcVr[threadIdx.x] = p_pat_elc[tid].vr; elcVy[threadIdx.x] = p_pat_elc[tid].vy; elcVz[threadIdx.x] = p_pat_elc[tid].vz; elcPr[threadIdx.x] = p_pat_elc[tid].pr; elcPy[threadIdx.x] = p_pat_elc[tid].py; elcPz[threadIdx.x] = p_pat_elc[tid].pz; d_pre_elc[tid].pr = elcPr[threadIdx.x];// d_pre_elc[tid].pz = elcPz[threadIdx.x];// int ii = (int)(elcPr[threadIdx.x] / D_parameter[0]); //云方程 int kk = (int)(elcPz[threadIdx.x] / D_parameter[1]); float wr = (elcPr[threadIdx.x] / D_parameter[0]) - ii; float wz = (elcPz[threadIdx.x] / D_parameter[1]) - kk; float s1 = (1 - wr)*(1 - wz); float s2 = (1 - wr)*wz; float s3 = wr*(1 - wz); float s4 = wr*wz; int grid_temp1 = ii*(nz + 1) + kk;//ii kk int grid_temp2 = ii*(nz + 1) + kk + 1;//ii kk+1 int grid_temp3 = (ii + 1)*(nz + 1) + kk;//ii+1 kk int grid_temp4 = (ii + 1)*(nz + 1) + kk + 1;//ii+1 kk+1 //printf("u:%d\t", tid); E[0] = ((*(device_G + grid_temp1)).ave_ex*s1 + (*(device_G + grid_temp3)).ave_ex*s3 + (*(device_G + grid_temp2)).ave_ex*s2 + (*(device_G + grid_temp4)).ave_ex*s4) +((*(device_static_electricX+grid_temp1))*s1+(*(device_static_electricX+grid_temp3))*s3+(*(device_static_electricX+grid_temp2))*s2+(*(device_static_electricX+grid_temp4))*s4);//+stac_ex[ii][kk]; //printf("u:%d\tE0=%f\n", tid,E[0]); E[2] = ((*(device_G + grid_temp1)).ave_ez*s1 + (*(device_G + grid_temp3)).ave_ez*s3 + (*(device_G + grid_temp2)).ave_ez*s2 + (*(device_G + grid_temp4)).ave_ez*s4) +((*(device_static_electricZ+grid_temp1))*s1+(*(device_static_electricZ+grid_temp3))*s3+(*(device_static_electricZ+grid_temp2))*s2+(*(device_static_electricZ+grid_temp4))*s4);//+(*(device_static_electricZ+grid_temp1)); //printf("u:%d\tE2=%f\n", tid,E[2]); B[1] = (((*(device_G + grid_temp1)).ave_hy*s1 + (*(device_G + grid_temp3)).ave_hy*s3 + (*(device_G + grid_temp2)).ave_hy*s2 + (*(device_G + grid_temp4)).ave_hy*s4)*mur); //printf("u:%d\tB1=%f\n", tid, B[1]); E[1] = ((*(device_G + grid_temp1)).ave_ey*s1 + (*(device_G + grid_temp3)).ave_ey*s3 + (*(device_G + grid_temp2)).ave_ey*s2 + (*(device_G + grid_temp4)).ave_ey*s4); //printf("u:%d\tE1=%f\n", tid,E[1]); B[0] = (((*(device_G + grid_temp1)).ave_hx*s1 + (*(device_G + grid_temp3)).ave_hx*s3 + (*(device_G + grid_temp2)).ave_hx*s2 + (*(device_G + grid_temp4)).ave_hx*s4)*mur) +(*(device_static_magneticX+grid_temp1))*s1+(*(device_static_magneticX+grid_temp3))*s3+((*(device_static_magneticX+grid_temp2))*s2+(*(device_static_magneticX+grid_temp4))*s4);//+(*(device_static_magneticX+grid_temp3)); //B[0]=((G[ii][kk].ave_hx*s1+G[ii+1][kk].ave_hx*s3+G[ii][kk+1].ave_hx*s2+G[ii+1][kk+1].ave_hx*s4)*mur) //+(stac_Bx[ii][kk] * s1 + stac_Bx[ii + 1][kk] * s3 + stac_Bx[ii][kk + 1] * s2 + stac_Bx[ii + 1][kk + 1] * s4); //printf("u:%d\tB0=%f\n", tid, B[0]); B[2] = (((*(device_G + grid_temp1)).ave_hz*s1 + (*(device_G + grid_temp3)).ave_hz*s3 + (*(device_G + grid_temp2)).ave_hz*s2 + (*(device_G + grid_temp4)).ave_hz*s4)*mur) +((*(device_static_magneticZ+grid_temp1))*s1+(*(device_static_magneticZ+grid_temp3))*s3+(*(device_static_magneticZ+grid_temp2))*s2+(*(device_static_magneticZ+grid_temp4))*s4);//+(*(device_static_magneticZ+grid_temp3)); //printf("B2=%f\n", B[2]); float u1[3] = { 0 }, u2[3] = { 0 }, u3[3] = { 0 };//u_n-1/2,u-,,u+,u_n+1/2 // electron 分步求解 float t[3] = { 0 }, s[3] = { 0 }; float pp[3][3] = { 0 }; u1[0] = elcVr[threadIdx.x] + (D_parameter[4] / 2)*Qm_ion*E[0]; u1[1] = elcVy[threadIdx.x] + (D_parameter[4] / 2)*Qm_ion*E[1]; u1[2] = elcVz[threadIdx.x] + (D_parameter[4] / 2)*Qm_ion*E[2]; for (int m = 0; m<3; m++) { t[m] = (B[m] * Qm_ion*D_parameter[4]) / 2; // 鲍尔斯旋转,求t s[m] = (2 * t[m]) / (1 + t[m] * t[m]); //求s } pp[0][0] = 1 - s[2] * t[2] - s[1] * t[1]; //3*3矩阵元素 pp[0][1] = s[1] * t[0] + s[2]; pp[0][2] = s[2] * t[0] - s[1]; pp[1][0] = s[0] * t[1] - s[2]; pp[1][1] = 1 - s[2] * t[2] - s[0] * t[0]; pp[1][2] = s[0] + s[2] * t[1]; pp[2][0] = s[0] * t[2] + s[1]; pp[2][1] = s[1] * t[2] - s[0]; pp[2][2] = 1 - s[1] * t[1] - s[0] * t[0]; u2[0] = pp[0][0] * u1[0] + pp[0][1] * u1[1] + pp[0][2] * u1[2]; u2[1] = pp[1][0] * u1[0] + pp[1][1] * u1[1] + pp[1][2] * u1[2]; u2[2] = pp[2][0] * u1[0] + pp[2][1] * u1[1] + pp[2][2] * u1[2]; for (int m = 0; m<3; m++) u3[m] = u2[m] + (D_parameter[4] / 2)*Qm_ion*E[m]; elcVr[threadIdx.x] = u3[0]; elcVy[threadIdx.x] = u3[1]; elcVz[threadIdx.x] = u3[2]; float cit1a = 0; if ((elcPr[threadIdx.x]) == 0) cit1a = 0; else cit1a = atan((elcVy[threadIdx.x] * D_parameter[4]) / (elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4])); float temp_x = elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4]; if (temp_x >= 0) elcPr[threadIdx.x] = sqrt((elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4])*(elcPr[threadIdx.x] + elcVr[threadIdx.x] * D_parameter[4]) + elcVy[threadIdx.x] * D_parameter[4] * elcVy[threadIdx.x] * D_parameter[4]); else { elcPr[threadIdx.x] = -temp_x; elcVr[threadIdx.x] = -elcVr[threadIdx.x]; } elcPz[threadIdx.x] = elcPz[threadIdx.x] + elcVz[threadIdx.x] * D_parameter[4]; elcPy[threadIdx.x] = elcPy[threadIdx.x] + cit1a; if ((elcPr[threadIdx.x]) == 0) { elcVr[threadIdx.x] = elcVr[threadIdx.x]; elcVy[threadIdx.x] = elcVy[threadIdx.x]; } elcVr[threadIdx.x] = cos(cit1a)*elcVr[threadIdx.x] + sin(cit1a)*elcVy[threadIdx.x]; elcVy[threadIdx.x] = -sin(cit1a)*elcVr[threadIdx.x] + cos(cit1a)*elcVy[threadIdx.x]; p_pat_elc[tid].vr = elcVr[threadIdx.x]; p_pat_elc[tid].vy = elcVy[threadIdx.x]; p_pat_elc[tid].vz = elcVz[threadIdx.x]; p_pat_elc[tid].pr = elcPr[threadIdx.x]; p_pat_elc[tid].py = elcPy[threadIdx.x]; p_pat_elc[tid].pz = elcPz[threadIdx.x]; tid += gridDim.x*blockDim.x; } } __global__ void current_ion(Paticle *p_pat_elc, Pre_Paticle *d_pre_elc, Grid *device_G, float *S_number, int tail, int t) { __shared__ float elcVr[thread]; __shared__ float elcVy[thread]; __shared__ float elcVz[thread]; __shared__ float elcPr[thread]; __shared__ float elcPy[thread]; __shared__ float elcPz[thread]; __shared__ float PrelcPr[thread]; __shared__ float PrelcPz[thread]; int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid < tail) { elcVy[threadIdx.x] = p_pat_elc[tid].vy; elcPr[threadIdx.x] = p_pat_elc[tid].pr; elcPz[threadIdx.x] = p_pat_elc[tid].pz; PrelcPr[threadIdx.x] = d_pre_elc[tid].pr;// PrelcPz[threadIdx.x] = d_pre_elc[tid].pz;// if ((elcPz[threadIdx.x] >= D_parameter[3]) || (elcPz[threadIdx.x] < 0) || (elcPr[threadIdx.x] >= D_parameter[2])) { //int i = tid %30; //float afa = 0;//rds5*(pi/12); //float cita = 0;//rds6*pi/2; //float vv = sqrt(D_parameter[9] * 2 / D_parameter[11]); //p_pat_elc[tid].pz = D_parameter[8]; //p_pat_elc[tid].pr = 0.005*S_number[i]; //p_pat_elc[tid].py = 2 * pi*S_number[i]; //p_pat_elc[tid].vr = vv*sin(afa)*cos(cita); //p_pat_elc[tid].vy = vv*sin(afa)*sin(cita); //p_pat_elc[tid].vz = 3e7; //p_pat_elc[tid].blei = p_pat_elc[tid].pr / D_parameter[0]; //p_pat_elc[tid].blek = p_pat_elc[tid].pz / D_parameter[1]; } else { int i = (int)(elcPr[threadIdx.x] / D_parameter[0]); int j = (int)(elcPz[threadIdx.x] / D_parameter[1]); int ii = (int)(PrelcPr[threadIdx.x] / D_parameter[0]); //云方程 int kk = (int)(PrelcPz[threadIdx.x] / D_parameter[1]); float wrr = PrelcPr[threadIdx.x] / D_parameter[0] - ii; float wzz = PrelcPz[threadIdx.x] / D_parameter[1] - kk; float newwrr = elcPr[threadIdx.x] / D_parameter[0] - i; float newwzz = elcPz[threadIdx.x] / D_parameter[1] - j; float V = abs(pi*((ii + 1)*D_parameter[0] * (ii + 1)*D_parameter[0] - ii*D_parameter[0] * ii*D_parameter[0])*D_parameter[1]); float V1 = abs(pi*((i + 1)*D_parameter[0] * (i + 1)*D_parameter[0] - i*D_parameter[0] * i*D_parameter[0])*D_parameter[1]); int grid_1 = i*(nz + 1) + j; int grid_2 = i*(nz + 1) + j + 1; int grid_3 = (i + 1)*(nz + 1) + j; int grid_4 = (i + 1)*(nz + 1) + j + 1; int grid_temp1 = ii*(nz + 1) + kk;//ii kk int grid_temp2 = ii*(nz + 1) + kk + 1;//ii kk+1 int grid_temp3 = (ii + 1)*(nz + 1) + kk;//ii+1 kk int grid_temp4 = (ii + 1)*(nz + 1) + kk + 1;//ii+1 kk+1 /*(*(device_G + grid_1)).Q += D_parameter[6] * (1 - wrr)*(1 - wzz); (*(device_G + grid_2)).Q += D_parameter[6] * (1 - wrr)*wzz; (*(device_G + grid_3)).Q += D_parameter[6] * wrr*(1 - wzz); (*(device_G + grid_4)).Q += D_parameter[6] * wrr*wzz;*/ //float area = pi*((ii*D_parameter[0] + D_parameter[0])*(ii*D_parameter[0] + D_parameter[0]) - ii*D_parameter[0] * ii*D_parameter[0])*D_parameter[1]; /*(*(device_G + grid_1)).den = (*(device_G + grid_1)).Q / area; (*(device_G + grid_2)).den = (*(device_G + grid_2)).Q / area; (*(device_G + grid_3)).den = (*(device_G + grid_3)).Q / area; (*(device_G + grid_4)).den = (*(device_G + grid_4)).Q / area;*/ //粒子穿越网格,四种情况 float xp = d_min_1(d_min_1(ii*D_parameter[0], i*D_parameter[0]) + D_parameter[0], d_max_1(d_max_1(ii*D_parameter[0], i*D_parameter[0]), (elcPr[threadIdx.x] + PrelcPr[threadIdx.x]) / 2)); float zp = d_min_1(d_min_1(kk*D_parameter[1], j*D_parameter[1]) + D_parameter[1], d_max_1(d_max_1(kk*D_parameter[1], j*D_parameter[1]), (elcPz[threadIdx.x] + PrelcPz[threadIdx.x]) / 2)); float fr1 = D_parameter[6] * (xp - PrelcPr[threadIdx.x]) / D_parameter[4]; float fz1 = D_parameter[6] * (zp - PrelcPz[threadIdx.x]) / D_parameter[4]; float fr2 = D_parameter[6] * (elcPr[threadIdx.x] - xp) / D_parameter[4]; float fz2 = D_parameter[6] * (elcPz[threadIdx.x] - zp) / D_parameter[4]; float wr1 = (xp + PrelcPr[threadIdx.x]) / 2 / D_parameter[1] - ii; float wz1 = (zp + PrelcPz[threadIdx.x]) / 2 / D_parameter[1] - kk; float wr2 = (xp + elcPr[threadIdx.x]) / 2 / D_parameter[0] - i; float wz2 = (zp + elcPz[threadIdx.x]) / 2 / D_parameter[1] - j; //printf("%d,%e,%e\n",tail, wr2, wz2); /*float da = (*(device_G + grid_1)).den*p_pat_elc[tid].vz; float da1 = (*(device_G + grid_3)).den*p_pat_elc[tid].vz;*/ ///////////////////////////////////////////////////电子电流密度 1 //int logic = ii*i / ((ii - 0.001)*(i - 0.001));//逻辑体积 分支优化 //float logicV = logic*V1 + (1 - logic)*V; if (ii == 0 || i == 0) { atomicAdd(&((*(device_G + grid_temp1)).jr_ion), (fr1*(1 - wz1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp2)).jr_ion), (fr1*(wz1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp1)).jz_ion), (fz1*(1 - wr1) / V));//注意是否除以i __syncthreads(); atomicAdd(&((*(device_G + grid_temp3)).jz_ion), (fz1*(wr1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jr_ion), (fr2*(1 - wz2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_2)).jr_ion), ((fr2*wz2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jz_ion), (fz2*(1 - wr2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_3)).jz_ion), (fz2*wr2 / V)); __syncthreads(); } else{ atomicAdd(&((*(device_G + grid_temp1)).jr_ion), (fr1*(1 - wz1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp2)).jr_ion), (fr1*(wz1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp1)).jz_ion), (fz1*(1 - wr1) / V1));//注意是否除以i __syncthreads(); atomicAdd(&((*(device_G + grid_temp3)).jz_ion), (fz1*(wr1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jr_ion), (fr2*(1 - wz2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_2)).jr_ion), ((fr2*wz2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jz_ion), (fz2*(1 - wr2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_3)).jz_ion), (fz2*wr2 / V1)); __syncthreads(); } float qedens = D_parameter[6] / V; //电流密度Jy方向 float Jc = qedens*elcVy[threadIdx.x]; float mid1, mid2; mid1 = xp / D_parameter[0] - i; mid2 = zp / D_parameter[1] - j; float A1 = (1 - wrr)*(1 - wzz); float A3 = wrr*(1 - wzz); float A2 = (1 - wrr)*wzz; float A4 = wrr*wzz; float M1 = (1 - mid1)*(1 - mid2); float M3 = mid1*(1 - mid2); float M2 = (1 - mid1)*mid2; float M4 = mid1*mid2; float B1 = (1 - newwrr)*(1 - newwzz); float B3 = newwrr*(1 - newwzz); float B2 = (1 - newwrr)*newwzz; float B4 = newwrr*newwzz; atomicAdd(&(*(device_G + grid_temp1)).jy_ion, (Jc*(A1 + M1)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp2)).jy_ion, (Jc*(A2 + M2)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp3)).jy_ion, (Jc*(A3 + M3)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp4)).jy_ion, (Jc*(A4 + M4)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_1)).jy_ion, (Jc*(B1 + M1)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_2)).jy_ion, (Jc*(B2 + M2)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_3)).jy_ion, (Jc*(B3 + M3)) / 4 - 0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_4)).jy_ion, (Jc*(B4 + M4)) / 4 - 0.0); __syncthreads(); } tid += gridDim.x*blockDim.x; } } __global__ void current(Paticle *p_pat_elc, Pre_Paticle *d_pre_elc, Grid *device_G, float *S_number, int tail,int t) { __shared__ float elcVr[thread]; __shared__ float elcVy[thread]; __shared__ float elcVz[thread]; __shared__ float elcPr[thread]; __shared__ float elcPy[thread]; __shared__ float elcPz[thread]; __shared__ float PrelcPr[thread];__shared__ float PrelcPz[thread]; int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid < tail) { elcVy[threadIdx.x] = p_pat_elc[tid].vy; elcPr[threadIdx.x] = p_pat_elc[tid].pr; elcPz[threadIdx.x] = p_pat_elc[tid].pz; PrelcPr[threadIdx.x] = d_pre_elc[tid].pr;// PrelcPz[threadIdx.x] = d_pre_elc[tid].pz;// if ((elcPz[threadIdx.x] >= D_parameter[3]) || (elcPz[threadIdx.x] < 0) || (elcPr[threadIdx.x] >= D_parameter[2])) { //int i = tid %30; //float afa = 0;//rds5*(pi/12); //float cita = 0;//rds6*pi/2; //float vv = sqrt(D_parameter[9] * 2 / D_parameter[11]); //p_pat_elc[tid].pz = D_parameter[8]; //p_pat_elc[tid].pr = 0.005*S_number[i]; //p_pat_elc[tid].py = 2 * pi*S_number[i]; //p_pat_elc[tid].vr = vv*sin(afa)*cos(cita); //p_pat_elc[tid].vy = vv*sin(afa)*sin(cita); //p_pat_elc[tid].vz = 3e7; //p_pat_elc[tid].blei = p_pat_elc[tid].pr / D_parameter[0]; //p_pat_elc[tid].blek = p_pat_elc[tid].pz / D_parameter[1]; } else { int i = (int)(elcPr[threadIdx.x] / D_parameter[0]); int j = (int)(elcPz[threadIdx.x] / D_parameter[1]); int ii = (int)(PrelcPr[threadIdx.x] / D_parameter[0]); //云方程 int kk = (int)(PrelcPz[threadIdx.x] / D_parameter[1]); float wrr = PrelcPr[threadIdx.x] / D_parameter[0] - ii; float wzz = PrelcPz[threadIdx.x] / D_parameter[1] - kk; float newwrr = elcPr[threadIdx.x] / D_parameter[0] - i; float newwzz = elcPz[threadIdx.x] / D_parameter[1] - j; float V = abs(pi*((ii + 1)*D_parameter[0] * (ii + 1)*D_parameter[0] - ii*D_parameter[0] * ii*D_parameter[0])*D_parameter[1]); float V1 = abs(pi*((i + 1)*D_parameter[0] * (i + 1)*D_parameter[0] - i*D_parameter[0] * i*D_parameter[0])*D_parameter[1]); int grid_1 = i*(nz + 1) + j; int grid_2 = i*(nz + 1) + j + 1; int grid_3 = (i + 1)*(nz + 1) + j; int grid_4 = (i + 1)*(nz + 1) + j + 1; int grid_temp1 = ii*(nz + 1) + kk;//ii kk int grid_temp2 = ii*(nz + 1) + kk + 1;//ii kk+1 int grid_temp3 = (ii + 1)*(nz + 1) + kk;//ii+1 kk int grid_temp4 = (ii + 1)*(nz + 1) + kk + 1;//ii+1 kk+1 /*(*(device_G + grid_1)).Q += D_parameter[5] * (1 - wrr)*(1 - wzz); (*(device_G + grid_2)).Q += D_parameter[5] * (1 - wrr)*wzz; (*(device_G + grid_3)).Q += D_parameter[5] * wrr*(1 - wzz); (*(device_G + grid_4)).Q += D_parameter[5] * wrr*wzz;*/ //float area = pi*((ii*D_parameter[0] + D_parameter[0])*(ii*D_parameter[0] + D_parameter[0]) - ii*D_parameter[0] * ii*D_parameter[0])*D_parameter[1]; /*(*(device_G + grid_1)).den = (*(device_G + grid_1)).Q / area; (*(device_G + grid_2)).den = (*(device_G + grid_2)).Q / area; (*(device_G + grid_3)).den = (*(device_G + grid_3)).Q / area; (*(device_G + grid_4)).den = (*(device_G + grid_4)).Q / area;*/ //粒子穿越网格,四种情况 float xp = d_min_1(d_min_1(ii*D_parameter[0], i*D_parameter[0]) + D_parameter[0], d_max_1(d_max_1(ii*D_parameter[0], i*D_parameter[0]), (elcPr[threadIdx.x] + PrelcPr[threadIdx.x]) / 2)); float zp = d_min_1(d_min_1(kk*D_parameter[1], j*D_parameter[1]) + D_parameter[1], d_max_1(d_max_1(kk*D_parameter[1], j*D_parameter[1]), (elcPz[threadIdx.x] + PrelcPz[threadIdx.x]) / 2)); float fr1 = D_parameter[5] * (xp - PrelcPr[threadIdx.x]) / D_parameter[4]; float fz1 = D_parameter[5] * (zp - PrelcPz[threadIdx.x]) / D_parameter[4]; float fr2 = D_parameter[5] * (elcPr[threadIdx.x] - xp) / D_parameter[4]; float fz2 = D_parameter[5] * (elcPz[threadIdx.x] - zp) / D_parameter[4]; float wr1 = (xp + PrelcPr[threadIdx.x]) / 2 / D_parameter[1] - ii; float wz1 = (zp + PrelcPz[threadIdx.x]) / 2 / D_parameter[1] - kk; float wr2 = (xp + elcPr[threadIdx.x]) / 2 / D_parameter[0] - i; float wz2 = (zp + elcPz[threadIdx.x]) / 2 / D_parameter[1] - j; //printf("%d,%e,%e\n",tail, wr2, wz2); /*float da = (*(device_G + grid_1)).den*p_pat_elc[tid].vz; float da1 = (*(device_G + grid_3)).den*p_pat_elc[tid].vz;*/ ///////////////////////////////////////////////////电子电流密度 1 //int logic = ii*i / ((ii - 0.001)*(i - 0.001));//逻辑体积 分支优化 //float logicV = logic*V1 + (1 - logic)*V; if (ii == 0||i==0 ) { atomicAdd(&((*(device_G + grid_temp1)).jr), (fr1*(1 - wz1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp2)).jr), (fr1*(wz1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp1)).jz), (fz1*(1 - wr1) / V));//注意是否除以i __syncthreads(); atomicAdd(&((*(device_G + grid_temp3)).jz), (fz1*(wr1) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jr), (fr2*(1 - wz2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_2)).jr), ((fr2*wz2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jz), (fz2*(1 - wr2) / V)); __syncthreads(); atomicAdd(&((*(device_G + grid_3)).jz), (fz2*wr2 / V)); __syncthreads(); } else{ atomicAdd(&((*(device_G + grid_temp1)).jr), (fr1*(1 - wz1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp2)).jr), (fr1*(wz1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_temp1)).jz), (fz1*(1 - wr1) / V1));//注意是否除以i __syncthreads(); atomicAdd(&((*(device_G + grid_temp3)).jz), (fz1*(wr1) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jr), (fr2*(1 - wz2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_2)).jr), ((fr2*wz2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_1)).jz), (fz2*(1 - wr2) / V1)); __syncthreads(); atomicAdd(&((*(device_G + grid_3)).jz), (fz2*wr2 / V1)); __syncthreads(); } float qedens = D_parameter[5] / V; //电流密度Jy方向 float Jc = qedens*elcVy[threadIdx.x]; float mid1, mid2; mid1 = xp / D_parameter[0] - i; mid2 = zp / D_parameter[1] - j; float A1 = (1 - wrr)*(1 - wzz); float A3 = wrr*(1 - wzz); float A2 = (1 - wrr)*wzz; float A4 = wrr*wzz; float M1 = (1 - mid1)*(1 - mid2); float M3 = mid1*(1 - mid2); float M2 = (1 - mid1)*mid2; float M4 = mid1*mid2; float B1 = (1 - newwrr)*(1 - newwzz); float B3 = newwrr*(1 - newwzz); float B2 = (1 - newwrr)*newwzz; float B4 = newwrr*newwzz; atomicAdd(&(*(device_G + grid_temp1)).jy, (Jc*(A1 + M1)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp2)).jy, (Jc*(A2 + M2)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp3)).jy, (Jc*(A3 + M3)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_temp4)).jy, (Jc*(A4 + M4)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_1)).jy, (Jc*(B1 + M1)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_2)).jy, (Jc*(B2 + M2)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_3)).jy, (Jc*(B3 + M3)) / 4-0.0); __syncthreads(); atomicAdd(&(*(device_G + grid_4)).jy, (Jc*(B4 + M4)) / 4-0.0); __syncthreads(); } tid += gridDim.x*blockDim.x; } } __global__ void device_define_G(int n, int m, Grid *device_gn, Grid *device_g) { int tid=blockIdx.x*blockDim.x+threadIdx.x; while(tid<n*m) { /*int i=tid/m; int j=tid%m; int tid_temp=i*(m+1)+j;*/ (device_gn+tid)->ey=(device_g+tid)->ey; (device_gn+tid)->hx=(device_g+tid)->hx; (device_gn+tid)->hz=(device_g+tid)->hz; (device_gn+tid)->ex=(device_g+tid)->ex; (device_gn+tid)->hy=(device_g+tid)->hy; (device_gn+tid)->ez=(device_g+tid)->ez; (device_g+tid)->ne[2]=0; (device_g+tid)->jr=0.0; (device_g+tid)->jz=0.0; (device_g+tid)->jy=0.0; (device_g+tid)->Q=0.0; (device_g+tid)->den=0.0; (device_g+tid)->jr_ion=0.0; (device_g+tid)->jz_ion=0.0; (device_g+tid)->jy_ion=0.0; (device_g+tid)->Q_ion=0.0; (device_g+tid)->den_ion=0.0; tid+=gridDim.x*blockDim.x; } /* printf("success,device_define_G");*/ } __device__ void L_InitialPML(int tid) { d_ex1[tid] = 0; d_iex1[tid] = 0; d_ey1[tid] = 0; d_iey1[tid] = 0; d_ez1[tid] = 0; d_iez1[tid] = 0; d_hx1[tid] = 0; d_ihx1[tid] = 0; d_hy1[tid]=0; d_ihy1[tid]=0; d_hz1[tid]=0; d_ihz1[tid]=0; } __global__ void kernel_L_InitialPML(int nxx, int nzz) { int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid<nxx*nzz) { L_InitialPML(tid); tid += gridDim.x*blockDim.x; } } __global__ void cacuchang_hx(Grid *device_G,Grid *device_Gn,float *d_sigmaz1,float *d_sigmaz,float dt,float dr,float dz,int nxx,int nzz) { float epsl=(8.854e-12); float mur=4.0*pi*1.0e-7; //float t0=120*dt,T=100*dt; float /*ca=0.0,*//*cb=0.0,*/ca1=0.0,cb1=0.0/*,ca2=0,cb2=0*/; float /*da=0.0,db=0.0,*/da1=0.0,db1=0.0,da2=0,db2=0; int tid=blockDim.x*blockIdx.x+threadIdx.x; /////******************** TE01 ******************///// while (tid<nxx*nzz) { int i=tid/nzz; int k=tid%nzz; if (i != nx&&k != nz) { da2 = 1;//(1+dt*sigmaz1[k]/epsl/2); db2 = 1;//(1-dt*sigmaz1[k]/epsl/2); da1 = (2 * epsl - dt*d_sigmaz[k]) / (2 * epsl + dt*d_sigmaz[k]); db1 = (2 * epsl) / (2 * epsl + d_sigmaz[k] * dt); d_hx1[tid] = da1*d_hx1[tid] + db1*dt*((*(device_Gn + tid+1)).ey - (*(device_Gn + tid)).ey) / dz; (*(device_G + tid)).hx = (*(device_Gn + tid)).hx + (d_hx1[tid] - d_ihx1[tid]) / mur; d_ihx1[tid] = d_hx1[tid]; } tid+=gridDim.x*blockDim.x; } } __global__ void cacuchang_hy(Grid *device_G, Grid *device_Gn, float *d_sigmaz1, float *d_sigmaz, float dt, float dr, float dz, int nxx, int nzz) { float epsl = (8.854e-12); float mur = 4.0*pi*1.0e-7; //float t0=120*dt,T=100*dt; float /*ca=0.0,*//*cb=0.0,*/ca1 = 0.0, cb1 = 0.0/*,ca2=0,cb2=0*/; float /*da=0.0,db=0.0,*/da1 = 0.0, db1 = 0.0, da2 = 0, db2 = 0; int tid = blockDim.x*blockIdx.x + threadIdx.x; /////******************** TE01 ******************///// while (tid<nxx*nzz) { int i = tid / nzz; int k = tid%nzz; if (i != nx&&k != nz) { da2 = 1;//(1+dt*sigmaz1[k]/epsl/2); db2 = 1;//(1-dt*sigmaz1[k]/epsl/2); ca1 = (2 * epsl - dt*d_sigmaz[k]) / (2 * epsl + dt*d_sigmaz[k]); cb1 = 2 * epsl / (2 * epsl + dt*d_sigmaz[k]); d_hy1[tid] = d_hy1[tid] + dt*(((*(device_Gn + tid+nzz)).ez - (*(device_Gn + tid)).ez) / dr - ((*(device_Gn + tid+1)).ex - (*(device_Gn + tid)).ex) / dz); (*(device_G + tid)).hy = ca1*(*(device_Gn + tid)).hy + cb1*(d_hy1[tid] - d_ihy1[tid]) / mur; d_ihy1[tid] = d_hy1[tid]; } tid += gridDim.x*blockDim.x; } } __global__ void cacuchang_hz(Grid *device_G, Grid *device_Gn, float *d_sigmaz1, float *d_sigmaz, float dt, float dr, float dz, int nxx, int nzz) { float epsl = (8.854e-12); float mur = 4.0*pi*1.0e-7; //float t0=120*dt,T=100*dt; float /*ca=0.0,*//*cb=0.0,*/ca1 = 0.0, cb1 = 0.0/*,ca2=0,cb2=0*/; float /*da=0.0,db=0.0,*/da1 = 0.0, db1 = 0.0, da2 = 0, db2 = 0; int tid = blockDim.x*blockIdx.x + threadIdx.x; /////******************** TE01 ******************///// while (tid<nxx*nzz) { int i = tid / nzz; int k = tid%nzz; if (i != nx&&k != nz) { da2 = 1;//(1+dt*sigmaz1[k]/epsl/2); db2 = 1;//(1-dt*sigmaz1[k]/epsl/2); d_hz1[tid] = d_hz1[tid] - dt*((*(device_Gn + tid+nzz)).ey - (*(device_Gn + tid)).ey) / dr - dt*((*(device_Gn + tid+nzz)).ey + (*(device_Gn + tid)).ey) / (2 * (i + 0.5)*dr); (*(device_G + tid)).hz = (*(device_Gn + tid)).hz + (da2*d_hz1[tid] - db2*d_ihz1[tid]) / mur; d_ihz1[tid] = d_hz1[tid]; } tid += gridDim.x*blockDim.x; } } __global__ void cacuchang_ex(Grid *device_G,Grid *device_Gn,float *d_sigmaz1,float *d_sigmaz,float dt,float dr,float dz,int nxx,int nzz) { float epsl=(8.854e-12); const float cgm=0.0; float mur=4.0*pi*1.0e-7; float A,B,C,D,v,a6=0.0; //float bate=0; float omega=0,freq=0; float t0=120*dt,T=100*dt; float ca=0.0,cb=0.0,ca1=0.0,cb1=0.0,ca2=0,cb2=0; float da=0.0,db=0.0,da1=0.0,db1=0.0,da2=0,db2=0; v=1/sqrt(mur*epsl); A=(2*epsl-cgm*dt)/(2*epsl-cgm*dt); B=(2*dt)/(2*epsl+cgm*dt); C=1; D=dt/mur; freq=1e8; int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid < nxx*nzz) { int i = tid / nzz; int k = tid%nzz; if (i != nx && k != nz) { if (k == 0) { float v = 1 / sqrt(mur*epsl); float coff = (v*dt - dz) / (v*dt + dz); device_G[tid].ex = device_Gn[tid + 1].ex + coff*(device_Gn[tid + 1].ex - device_Gn[tid].ex); } else { ca = (2 * epsl - dt*d_sigmaz1[k]) / (2 * epsl + dt*d_sigmaz1[k]); cb = 2 * dt / (2 * epsl + dt*d_sigmaz1[k]); d_ex1[tid] = ca*d_ex1[tid] - cb*(device_G[tid].hy - device_G[tid - 1].hy) / dz; device_G[tid].ex = device_Gn[tid].ex + (d_ex1[tid] - d_iex1[tid]) - B*(device_G[tid].jr + device_G[tid].jr_ion); d_iex1[tid] = d_ex1[tid]; } } tid += gridDim.x*blockDim.x; } } __global__ void cacuchang_ey(Grid *device_G,Grid *device_Gn,float *d_sigmaz1,float *d_sigmaz,float dt,float dr,float dz,int nxx,int nzz) { float epsl=(8.854e-12); const float cgm=0.0; float mur=4.0*pi*1.0e-7; float A,B,C,D,v,a6=0.0; float omega=0,freq=0; float t0=120*dt,T=100*dt; float ca=0.0,cb=0.0,ca1=0.0,cb1=0.0,ca2=0,cb2=0; float da=0.0,db=0.0,da1=0.0,db1=0.0,da2=0,db2=0; v=1/sqrt(mur*epsl); A=(2*epsl-cgm*dt)/(2*epsl-cgm*dt); B=(2*dt)/(2*epsl+cgm*dt); C=1; D=dt/mur; freq=1e8; int tid = blockDim.x*blockIdx.x + threadIdx.x; while(tid<nxx*nzz) { int i=tid/nz; int k=tid%nz; if (i != nx&&k != nz) { int tid1 = i*nzz + k; int tid2 = i*nzz + k + 1; int tid3 = (i + 1)*nzz + k; int tid4 = (i + 1)*nzz + k + 1; if (i == 0) { device_G[tid].ey = 0;//te01 } else if (k == 0) { float v = 1 / sqrt(mur*epsl); float coff = (v*dt - dz) / (v*dt + dz); device_G[tid].ey = device_Gn[tid + 1].ey + coff*(device_Gn[tid + 1].ey - device_Gn[tid].ey);//TE01 } else { da = (2 * epsl - dt*d_sigmaz1[k]) / (2 * epsl + dt*d_sigmaz1[k]); //TE01 db = 2 / (2 * epsl + dt*d_sigmaz1[k]); d_ey1[tid] = d_ey1[tid] + dt*((device_G[tid].hx - device_G[tid - 1].hx) / dz - (device_G[tid].hz - device_G[tid-nzz].hz) / dr);//此处严重错误 device_G[tid].ey = da*device_Gn[tid].ey + db*(d_ey1[tid] - d_iey1[tid]) - B*(device_G[tid].jy + device_G[tid].jy_ion) * 10; d_iey1[tid] = d_ey1[tid]; } } tid+=gridDim.x*blockDim.x; } } __global__ void cacuchang_ez(Grid *device_G,Grid *device_Gn,float *d_sigmaz1,float *d_sigmaz,float dt,float dr,float dz,int nxx,int nzz) { float epsl=(8.854e-12); const float cgm=0.0; float mur=4.0*pi*1.0e-7; float A,B,C,D,v,a6=0.0; //float bate=0; float omega=0,freq=0; float t0=120*dt,T=100*dt; float ca=0.0,cb=0.0,ca1=0.0,cb1=0.0,ca2=0,cb2=0; float da=0.0,db=0.0,da1=0.0,db1=0.0,da2=0,db2=0; v=1/sqrt(mur*epsl); A=(2*epsl-cgm*dt)/(2*epsl-cgm*dt); B=(2*dt)/(2*epsl+cgm*dt); //printf("%e\n",B); C=1; D=dt/mur; freq=1e8; int tid = blockDim.x*blockIdx.x + threadIdx.x; while(tid<nxx*nzz) { int i=tid/nzz; int k=tid%nzz; if (i != nx&&k != nz) { if (i == 0) { ca2 = 1;//(2*epsl+dt*sigmaz[k])/(2*epsl); cb2 = 1;//(2*epsl-dt*sigmaz[k])/(2*epsl); d_ez1[tid] = d_ez1[tid] + dt * 4 * (*(device_G + tid)).hy / dr / epsl; (*(device_G + tid)).ez = (*(device_Gn + tid)).ez + (ca2*d_ez1[tid] - cb2*d_iez1[tid]) - B*((*(device_G + tid)).jz + (*(device_G + tid)).jz_ion); d_iez1[tid] = d_ez1[tid]; } else { ca2 = 1;//(2*epsl+dt*sigmaz[k])/(2*epsl);//TM01 cb2 = 1;//(2*epsl-dt*sigmaz[k])/(2*epsl); d_ez1[tid] = d_ez1[tid] + dt*((1 / (2 * i*dr) + 1 / dr)*(*(device_G + tid)).hy + ((1 / (2 * i*dr)) - 1 / dr)*(*(device_G + tid - nzz)).hy); (*(device_G + tid)).ez = (*(device_Gn + tid)).ez + (ca2*d_ez1[tid] - cb2*d_iez1[tid]) / epsl - B*((*(device_G + tid)).jz + (*(device_G + tid)).jz_ion); d_iez1[tid] = d_ez1[tid]; } } tid+=gridDim.x*blockDim.x; } }
09c76457c6452d897a3dce140f13b5239f7db9ec.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> //#include <cutil.h> #include <iostream> #include <ostream> #include <fstream> //#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h" using namespace std; #define BLOCKSIZEX 256 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define XDIM 256 #define YDIM 128 #define ZDIM 32 #define TMAX 1000 #define RE 100.f//100.f; #define UMAX 0.08f #define METHOD "SHARED" //SINGLE,HYB,TEXT,SHARED //#define CHARLENGTH = XDIM-2.f; //#define BLOCKSIZE 16; //int const XDIM = 32; //int const YDIM = 32; #include <sys/time.h> #include <time.h> inline __device__ int ImageFcn(int x, int y, int z){ if(y == 0 || z == 0 || x == XDIM-1 || y == YDIM-1 || z == ZDIM-1) return 1; else if(x == 0) return 3; else return 0; } texture<float,2,hipReadModeElementType> texRef_f0A; texture<float,2,hipReadModeElementType> texRef_f1A; texture<float,2,hipReadModeElementType> texRef_f2A; texture<float,2,hipReadModeElementType> texRef_f3A; texture<float,2,hipReadModeElementType> texRef_f4A; texture<float,2,hipReadModeElementType> texRef_f5A; texture<float,2,hipReadModeElementType> texRef_f6A; texture<float,2,hipReadModeElementType> texRef_f7A; texture<float,2,hipReadModeElementType> texRef_f8A; texture<float,2,hipReadModeElementType> texRef_f9A; texture<float,2,hipReadModeElementType> texRef_f10A; texture<float,2,hipReadModeElementType> texRef_f11A; texture<float,2,hipReadModeElementType> texRef_f12A; texture<float,2,hipReadModeElementType> texRef_f13A; texture<float,2,hipReadModeElementType> texRef_f14A; texture<float,2,hipReadModeElementType> texRef_f15A; texture<float,2,hipReadModeElementType> texRef_f16A; texture<float,2,hipReadModeElementType> texRef_f17A; texture<float,2,hipReadModeElementType> texRef_f18A; texture<float,2,hipReadModeElementType> texRef_f0B; texture<float,2,hipReadModeElementType> texRef_f1B; texture<float,2,hipReadModeElementType> texRef_f2B; texture<float,2,hipReadModeElementType> texRef_f3B; texture<float,2,hipReadModeElementType> texRef_f4B; texture<float,2,hipReadModeElementType> texRef_f5B; texture<float,2,hipReadModeElementType> texRef_f6B; texture<float,2,hipReadModeElementType> texRef_f7B; texture<float,2,hipReadModeElementType> texRef_f8B; texture<float,2,hipReadModeElementType> texRef_f9B; texture<float,2,hipReadModeElementType> texRef_f10B; texture<float,2,hipReadModeElementType> texRef_f11B; texture<float,2,hipReadModeElementType> texRef_f12B; texture<float,2,hipReadModeElementType> texRef_f13B; texture<float,2,hipReadModeElementType> texRef_f14B; texture<float,2,hipReadModeElementType> texRef_f15B; texture<float,2,hipReadModeElementType> texRef_f16B; texture<float,2,hipReadModeElementType> texRef_f17B; texture<float,2,hipReadModeElementType> texRef_f18B; int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } inline __device__ void bgk_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; // float usqr = u*u+v*v+w*w; float usqr = fma(u,u,fma(v,v,w*w)); // f0 -= omega*fma(-0.3333333333f,(fma(-1.5f,usqr,rho)),f0);//(f0 -0.3333333333f*(fma(-1.5f,usqr,rho)));//rho-1.5f*usqr)); // f1 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f1);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f2 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f2);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f3 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f3);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f4 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f4);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f5 -= omega*fma(-0.0555555556f,fma(3.0f,( u+v),rho)+fma(4.5f,( u+v)*( u+v),-1.5f*usqr),f5 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f6 -= omega*fma(-0.0555555556f,fma(3.0f,(-u+v),rho)+fma(4.5f,(-u+v)*(-u+v),-1.5f*usqr),f6 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f7 -= omega*fma(-0.0555555556f,fma(3.0f,(-u-v),rho)+fma(4.5f,(-u-v)*(-u-v),-1.5f*usqr),f7 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f8 -= omega*fma(-0.0555555556f,fma(3.0f,( u-v),rho)+fma(4.5f,( u-v)*( u-v),-1.5f*usqr),f8 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f9 -= omega*fma(-0.0555555556f,fma(3.0f,( w),rho)+fma(4.5f,( w)*( w),-1.5f*usqr),f9 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f10-= omega*fma(-0.0277777778f,fma(3.0f,( u+w),rho)+fma(4.5f,( u+w)*( u+w),-1.5f*usqr),f10);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f11-= omega*fma(-0.0277777778f,fma(3.0f,( v+w),rho)+fma(4.5f,( v+w)*( v+w),-1.5f*usqr),f11);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f12-= omega*fma(-0.0277777778f,fma(3.0f,(-u+w),rho)+fma(4.5f,(-u+w)*(-u+w),-1.5f*usqr),f12);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f13-= omega*fma(-0.0277777778f,fma(3.0f,(-v+w),rho)+fma(4.5f,(-v+w)*(-v+w),-1.5f*usqr),f13);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f14-= omega*fma(-0.0555555556f,fma(3.0f,( -w),rho)+fma(4.5f,( -w)*( -w),-1.5f*usqr),f14);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f15-= omega*fma(-0.0277777778f,fma(3.0f,( u-w),rho)+fma(4.5f,( u-w)*( u-w),-1.5f*usqr),f15);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f16-= omega*fma(-0.0277777778f,fma(3.0f,( v-w),rho)+fma(4.5f,( v-w)*( v-w),-1.5f*usqr),f16);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f17-= omega*fma(-0.0277777778f,fma(3.0f,(-u-w),rho)+fma(4.5f,(-u-w)*(-u-w),-1.5f*usqr),f17);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f18-= omega*fma(-0.0277777778f,fma(3.0f,(-v-w),rho)+fma(4.5f,(-v-w)*(-v-w),-1.5f*usqr),f18);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f0 = f0 -omega*(f0 -0.3333333333f*(rho-1.5f*usqr)); f1 = f1 -omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f2 = f2 -omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); f3 = f3 -omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); f4 = f4 -omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); f5 = f5 -omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); f6 = f6 -omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); f7 = f7 -omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); f8 = f8 -omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); f9 = f9 -omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); f10= f10-omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); f11= f11-omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr)); f12= f12-omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); f13= f13-omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr)); f14= f14-omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); f15= f15-omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); f16= f16-omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); f17= f17-omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); f18= f18-omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } __device__ void mrt_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18; m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ 1.f*f8+ -4.f*f9+ f10+ 1.f*f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18; m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ; m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18; m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18; m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ; m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ; m13 = f5+ - f6+ f7+ - f8 ; m14 = f11 + - f13 + - f16 + f18; m15 = f10 + - f12 + - f15 + f17 ; m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ; m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18; m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18; m1 -= -11.f*rho+19.f*(u*u+v*v+w*w); m2 -= -7.53968254f*(u*u+v*v+w*w); m4 -= -0.66666667f*u;//qx_eq m6 -= -0.66666667f*v;//qx_eq m8 -= -0.66666667f*w;//qx_eq m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq m11-= (v*v-w*w);//pww_eq m13-= u*v;//pxy_eq m14-= v*w;//pyz_eq m15-= u*w;//pxz_eq f0 -= - 0.012531328f*(m1)+ 0.047619048f*(m2); f1 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10); f2 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10); f3 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10); f4 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10); f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10); f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10); f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10); f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10); f9 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10); f10 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10); f11 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10); f12 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10); f13 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10); f14 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10); f15 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10); f16 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10); f17 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10); f18 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10); f2 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12); f4 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12); f5 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega; f6 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega; f7 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega; f8 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega; f9 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12); f10 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ; f11 -= +( 0.25f*(m14) )*omega ; f12 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ; f13 -= +( -0.25f*(m14) )*omega ; f14 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12); f15 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ; f16 -= +( -0.25f*(m14) )*omega ; f17 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ; f18 -= +( 0.25f*(m14) )*omega ; f5 -= 0.125f*(m16)+ -0.125f*(m17); f6 -= -0.125f*(m16)+ -0.125f*(m17); f7 -= -0.125f*(m16)+ 0.125f*(m17); f8 -= 0.125f*(m16)+ 0.125f*(m17); f10 -= -0.125f*(m16) + 0.125f*(m18); f11 -= + 0.125f*(m17)+ -0.125f*(m18); f12 -= 0.125f*(m16) + 0.125f*(m18); f13 -= + -0.125f*(m17)+ -0.125f*(m18); f15 -= -0.125f*(m16) + -0.125f*(m18); f16 -= + 0.125f*(m17)+ 0.125f*(m18); f17 -= 0.125f*(m16) + -0.125f*(m18); f18 -= + -0.125f*(m17)+ 0.125f*(m18); } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch) { // if (x<0 || x>pitch || y<0 || y>YDIM || z<0 || z>ZDIM) return 0; // else return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*ZDIM; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __global__ void simple_copy(float* fA, float* fB, int *image, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // fB[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(10,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(11,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(12,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(13,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(14,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(15,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(16,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(17,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(18,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // float f0;//,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // float f0 = fA[j+pitch*YDIM*ZDIM]; // float f0 = fA[f_mem(0 ,x,y,z,pitch,YDIM,ZDIM)]; // f0 = tex2D(texRef_f0A ,x,y+YDIM*z); // f1 = tex2D(texRef_f1A ,x,y+YDIM*z); // f2 = tex2D(texRef_f2A ,x,y+YDIM*z); // f3 = tex2D(texRef_f3A ,x,y+YDIM*z); // f4 = tex2D(texRef_f4A ,x,y+YDIM*z); // f5 = tex2D(texRef_f5A ,x,y+YDIM*z); // f6 = tex2D(texRef_f6A ,x,y+YDIM*z); // f7 = tex2D(texRef_f7A ,x,y+YDIM*z); // f8 = tex2D(texRef_f8A ,x,y+YDIM*z); // f9 = tex2D(texRef_f9A ,x,y+YDIM*z); // f10 = tex2D(texRef_f10A,x,y+YDIM*z); // f11 = tex2D(texRef_f11A,x,y+YDIM*z); // f12 = tex2D(texRef_f12A,x,y+YDIM*z); // f13 = tex2D(texRef_f13A,x,y+YDIM*z); // f14 = tex2D(texRef_f14A,x,y+YDIM*z); // f15 = tex2D(texRef_f15A,x,y+YDIM*z); // f16 = tex2D(texRef_f16A,x,y+YDIM*z); // f17 = tex2D(texRef_f17A,x,y+YDIM*z); // f18 = tex2D(texRef_f18A,x,y+YDIM*z); // float f1 = fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f1 = fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f2 = fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // f3 = fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; // f4 = fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // f5 = fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; // f6 = fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; // f7 = fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; // f8 = fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; // f9 = fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; // f10 = fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; // f11 = fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; // f12 = fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; // f13 = fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // f14 = fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; // f15 = fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; // f16 = fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; // f17 = fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; // f18 = fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(0 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(0 ,x,y,z,pitch,YDIM,ZDIM)];//+0.01f; fB[j] = fA[j];//+0.01f; // fB[j+pitch*YDIM*ZDIM+pitch*YDIM*ZDIM] = f2; // fB[(x+y*pitch+z*YDIM*pitch)+pitch*YDIM*ZDIM] = f1 ;//+0.01f; // fB[f_mem(0 ,x,y,z,pitch,YDIM,ZDIM)] = f0 ; // fB[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)] = f1;//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)] = f2 ;//+0.01f; // fB[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)] = f3 ;//+0.01f; // fB[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)] = f4 ;//+0.01f; // fB[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)] = f5 ;//+0.01f; // fB[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)] = f6 ;//+0.01f; // fB[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)] = f7 ;//+0.01f; // fB[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)] = f8 ;//+0.01f; // fB[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)] = f9 ;//+0.01f; // fB[f_mem(10,x,y,z,pitch,YDIM,ZDIM)] = f10;//+0.01f; // fB[f_mem(11,x,y,z,pitch,YDIM,ZDIM)] = f11;//+0.01f; // fB[f_mem(12,x,y,z,pitch,YDIM,ZDIM)] = f12;//+0.01f; // fB[f_mem(13,x,y,z,pitch,YDIM,ZDIM)] = f13;//+0.01f; // fB[f_mem(14,x,y,z,pitch,YDIM,ZDIM)] = f14;//+0.01f; // fB[f_mem(15,x,y,z,pitch,YDIM,ZDIM)] = f15;//+0.01f; // fB[f_mem(16,x,y,z,pitch,YDIM,ZDIM)] = f16;//+0.01f; // fB[f_mem(17,x,y,z,pitch,YDIM,ZDIM)] = f17;//+0.01f; // fB[f_mem(18,x,y,z,pitch,YDIM,ZDIM)] = f18;//+0.01f; } //int const blockx = 192; //int const blocky = 1; __global__ void mrt_d_hybAB(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); // int i = x+y*blockDim.x*gridDim.x; //float u,v,w,rho;//,usqr; int im = ImageFcn(x,y,z); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // f1 = fin[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)]; // f3 = fin[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,YDIM,ZDIM)]; // f5 = fin[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)]; // f7 = fin[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)]; // f6 = fin[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)]; // f8 = fin[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch,YDIM,ZDIM)]; // f10= fin[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f12= fin[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f15= fin[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch,YDIM,ZDIM)]; // f17= fin[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)]; f2 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)]; f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)]; f9 = fin[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)]; f11= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch)]; f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)]; f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)]; f16= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)]; f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)]; f3 = tex2D(texRef_f1A ,x-1,(y )+YDIM*(z)); f1 = tex2D(texRef_f3A ,x+1,(y )+YDIM*(z)); f5 = tex2D(texRef_f7A ,x+1,(y+1)+YDIM*(z)); f6 = tex2D(texRef_f8A ,x-1,(y+1)+YDIM*(z)); f7 = tex2D(texRef_f5A ,x-1,(y-1)+YDIM*(z)); f8 = tex2D(texRef_f6A ,x+1,(y-1)+YDIM*(z)); f10= tex2D(texRef_f17A,x+1,(y )+YDIM*(z+1)); f12= tex2D(texRef_f15A,x-1,(y )+YDIM*(z+1)); f17= tex2D(texRef_f10A,x-1,(y )+YDIM*(z-1)); f15= tex2D(texRef_f12A,x+1,(y )+YDIM*(z-1)); fout[j+pitch*YDIM*ZDIM*1 ] = f1 ; fout[j+pitch*YDIM*ZDIM*2 ] = f2 ; fout[j+pitch*YDIM*ZDIM*3 ] = f3 ; fout[j+pitch*YDIM*ZDIM*4 ] = f4 ; fout[j+pitch*YDIM*ZDIM*5 ] = f5 ; fout[j+pitch*YDIM*ZDIM*6 ] = f6 ; fout[j+pitch*YDIM*ZDIM*7 ] = f7 ; fout[j+pitch*YDIM*ZDIM*8 ] = f8 ; fout[j+pitch*YDIM*ZDIM*9 ] = f9 ; fout[j+pitch*YDIM*ZDIM*10] = f10; fout[j+pitch*YDIM*ZDIM*11] = f11; fout[j+pitch*YDIM*ZDIM*12] = f12; fout[j+pitch*YDIM*ZDIM*13] = f13; fout[j+pitch*YDIM*ZDIM*14] = f14; fout[j+pitch*YDIM*ZDIM*15] = f15; fout[j+pitch*YDIM*ZDIM*16] = f16; fout[j+pitch*YDIM*ZDIM*17] = f17; fout[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // f1 = fin[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; // f3 = fin[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; // f5 = fin[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; // f6 = fin[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; // f7 = fin[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; // f8 = fin[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; // f10= fin[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; // f12= fin[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; // f15= fin[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; // f17= fin[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; f0 = fin[j]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; f1 = tex2D(texRef_f1A ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3A ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5A ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6A ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7A ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8A ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15A,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17A,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10A,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12A,x+1,y +YDIM*(z-1)); if(im == 3)//DirichletWest { if(y == 0){ // f2 = f4; f6 = f7; // f11 = f13; // f16 = f18; } else if(y == YDIM-1){ // f4 = f2; f7 = f6; // f13 = f11; // f18 = f16; } if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; f12 = f17; // f13 = f18; } if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; f17 = f12; // f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // // float usqr = u*u+v*v+w*w; // f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_hybBA(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f2 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)]; f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)]; f9 = fin[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)]; f11= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch)]; f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)]; f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)]; f16= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)]; f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)]; f3 = tex2D(texRef_f1B ,x-1,(y )+YDIM*(z)); f1 = tex2D(texRef_f3B ,x+1,(y )+YDIM*(z)); f5 = tex2D(texRef_f7B ,x+1,(y+1)+YDIM*(z)); f6 = tex2D(texRef_f8B ,x-1,(y+1)+YDIM*(z)); f7 = tex2D(texRef_f5B ,x-1,(y-1)+YDIM*(z)); f8 = tex2D(texRef_f6B ,x+1,(y-1)+YDIM*(z)); f10= tex2D(texRef_f17B,x+1,(y )+YDIM*(z+1)); f12= tex2D(texRef_f15B,x-1,(y )+YDIM*(z+1)); f17= tex2D(texRef_f10B,x-1,(y )+YDIM*(z-1)); f15= tex2D(texRef_f12B,x+1,(y )+YDIM*(z-1)); fout[j+pitch*YDIM*ZDIM*1 ] = f1 ; fout[j+pitch*YDIM*ZDIM*2 ] = f2 ; fout[j+pitch*YDIM*ZDIM*3 ] = f3 ; fout[j+pitch*YDIM*ZDIM*4 ] = f4 ; fout[j+pitch*YDIM*ZDIM*5 ] = f5 ; fout[j+pitch*YDIM*ZDIM*6 ] = f6 ; fout[j+pitch*YDIM*ZDIM*7 ] = f7 ; fout[j+pitch*YDIM*ZDIM*8 ] = f8 ; fout[j+pitch*YDIM*ZDIM*9 ] = f9 ; fout[j+pitch*YDIM*ZDIM*10] = f10; fout[j+pitch*YDIM*ZDIM*11] = f11; fout[j+pitch*YDIM*ZDIM*12] = f12; fout[j+pitch*YDIM*ZDIM*13] = f13; fout[j+pitch*YDIM*ZDIM*14] = f14; fout[j+pitch*YDIM*ZDIM*15] = f15; fout[j+pitch*YDIM*ZDIM*16] = f16; fout[j+pitch*YDIM*ZDIM*17] = f17; fout[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; f1 = tex2D(texRef_f1B ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3B ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5B ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6B ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7B ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8B ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15B,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17B,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10B,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12B,x+1,y +YDIM*(z-1)); if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // // float usqr = u*u+v*v+w*w; // f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_textAB(float* fin, float* fout, int *image, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = image[i]; if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f2 = tex2D(texRef_f4A ,x ,(y+1)+YDIM*(z )); f4 = tex2D(texRef_f2A ,x ,(y-1)+YDIM*(z )); f9 = tex2D(texRef_f14A,x ,(y )+YDIM*(z+1)); f14= tex2D(texRef_f9A ,x ,(y )+YDIM*(z-1)); f11= tex2D(texRef_f18A,x ,(y+1)+YDIM*(z+1)); f18= tex2D(texRef_f11A,x ,(y-1)+YDIM*(z-1)); f16= tex2D(texRef_f13A,x ,(y+1)+YDIM*(z-1)); f13= tex2D(texRef_f16A,x ,(y-1)+YDIM*(z+1)); // f2 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)]; // f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)]; // f9 = fin[f_mem(14,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f11= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)]; // f16= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch,YDIM,ZDIM)]; // f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,YDIM,ZDIM)]; f3 = tex2D(texRef_f1A ,x-1,(y )+YDIM*(z)); f1 = tex2D(texRef_f3A ,x+1,(y )+YDIM*(z)); f5 = tex2D(texRef_f7A ,x+1,(y+1)+YDIM*(z)); f6 = tex2D(texRef_f8A ,x-1,(y+1)+YDIM*(z)); f7 = tex2D(texRef_f5A ,x-1,(y-1)+YDIM*(z)); f8 = tex2D(texRef_f6A ,x+1,(y-1)+YDIM*(z)); f10= tex2D(texRef_f17A,x+1,(y )+YDIM*(z+1)); f12= tex2D(texRef_f15A,x-1,(y )+YDIM*(z+1)); f17= tex2D(texRef_f10A,x-1,(y )+YDIM*(z-1)); f15= tex2D(texRef_f12A,x+1,(y )+YDIM*(z-1)); fout[j+pitch*YDIM*ZDIM*1 ] = f1 ; fout[j+pitch*YDIM*ZDIM*2 ] = f2 ; fout[j+pitch*YDIM*ZDIM*3 ] = f3 ; fout[j+pitch*YDIM*ZDIM*4 ] = f4 ; fout[j+pitch*YDIM*ZDIM*5 ] = f5 ; fout[j+pitch*YDIM*ZDIM*6 ] = f6 ; fout[j+pitch*YDIM*ZDIM*7 ] = f7 ; fout[j+pitch*YDIM*ZDIM*8 ] = f8 ; fout[j+pitch*YDIM*ZDIM*9 ] = f9 ; fout[j+pitch*YDIM*ZDIM*10] = f10; fout[j+pitch*YDIM*ZDIM*11] = f11; fout[j+pitch*YDIM*ZDIM*12] = f12; fout[j+pitch*YDIM*ZDIM*13] = f13; fout[j+pitch*YDIM*ZDIM*14] = f14; fout[j+pitch*YDIM*ZDIM*15] = f15; fout[j+pitch*YDIM*ZDIM*16] = f16; fout[j+pitch*YDIM*ZDIM*17] = f17; fout[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = tex2D(texRef_f2A ,x ,y-1+YDIM*(z)); f4 = tex2D(texRef_f4A ,x ,y+1+YDIM*(z)); f9 = tex2D(texRef_f9A ,x ,y+1+YDIM*(z-1)); f11= tex2D(texRef_f11A,x ,y-1+YDIM*(z-1)); f13= tex2D(texRef_f13A,x ,y+1+YDIM*(z-1)); f14= tex2D(texRef_f14A,x ,y +YDIM*(z+1)); f16= tex2D(texRef_f16A,x ,y-1+YDIM*(z+1)); f18= tex2D(texRef_f18A,x ,y+1+YDIM*(z+1)); // f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,YDIM,ZDIM)]; // f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,YDIM,ZDIM)]; // f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,YDIM,ZDIM)]; // f11= fin[f_mem(11,x ,y-1,z-1,pitch,YDIM,ZDIM)]; // f13= fin[f_mem(13,x ,y+1,z-1,pitch,YDIM,ZDIM)]; // f14= fin[f_mem(14,x ,y ,z+1,pitch,YDIM,ZDIM)]; // f16= fin[f_mem(16,x ,y-1,z+1,pitch,YDIM,ZDIM)]; // f18= fin[f_mem(18,x ,y+1,z+1,pitch,YDIM,ZDIM)]; f1 = tex2D(texRef_f1A ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3A ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5A ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6A ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7A ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8A ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15A,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17A,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10A,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12A,x+1,y +YDIM*(z-1)); if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i float usqr = u*u+v*v+w*w; f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_textBA(float* fin, float* fout, int *image, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = image[i]; if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f2 = tex2D(texRef_f4B ,x ,(y+1)+YDIM*(z )); f4 = tex2D(texRef_f2B ,x ,(y-1)+YDIM*(z )); f9 = tex2D(texRef_f14B,x ,(y )+YDIM*(z+1)); f14= tex2D(texRef_f9B ,x ,(y )+YDIM*(z-1)); f11= tex2D(texRef_f18B,x ,(y+1)+YDIM*(z+1)); f18= tex2D(texRef_f11B,x ,(y-1)+YDIM*(z-1)); f16= tex2D(texRef_f13B,x ,(y+1)+YDIM*(z-1)); f13= tex2D(texRef_f16B,x ,(y-1)+YDIM*(z+1)); // f2 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)]; // f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)]; // f9 = fin[f_mem(14,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f11= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)]; // f16= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch,YDIM,ZDIM)]; // f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,YDIM,ZDIM)]; f3 = tex2D(texRef_f1B ,x-1,(y )+YDIM*(z)); f1 = tex2D(texRef_f3B ,x+1,(y )+YDIM*(z)); f5 = tex2D(texRef_f7B ,x+1,(y+1)+YDIM*(z)); f6 = tex2D(texRef_f8B ,x-1,(y+1)+YDIM*(z)); f7 = tex2D(texRef_f5B ,x-1,(y-1)+YDIM*(z)); f8 = tex2D(texRef_f6B ,x+1,(y-1)+YDIM*(z)); f10= tex2D(texRef_f17B,x+1,(y )+YDIM*(z+1)); f12= tex2D(texRef_f15B,x-1,(y )+YDIM*(z+1)); f17= tex2D(texRef_f10B,x-1,(y )+YDIM*(z-1)); f15= tex2D(texRef_f12B,x+1,(y )+YDIM*(z-1)); fout[j+pitch*YDIM*ZDIM*1 ] = f1 ; fout[j+pitch*YDIM*ZDIM*2 ] = f2 ; fout[j+pitch*YDIM*ZDIM*3 ] = f3 ; fout[j+pitch*YDIM*ZDIM*4 ] = f4 ; fout[j+pitch*YDIM*ZDIM*5 ] = f5 ; fout[j+pitch*YDIM*ZDIM*6 ] = f6 ; fout[j+pitch*YDIM*ZDIM*7 ] = f7 ; fout[j+pitch*YDIM*ZDIM*8 ] = f8 ; fout[j+pitch*YDIM*ZDIM*9 ] = f9 ; fout[j+pitch*YDIM*ZDIM*10] = f10; fout[j+pitch*YDIM*ZDIM*11] = f11; fout[j+pitch*YDIM*ZDIM*12] = f12; fout[j+pitch*YDIM*ZDIM*13] = f13; fout[j+pitch*YDIM*ZDIM*14] = f14; fout[j+pitch*YDIM*ZDIM*15] = f15; fout[j+pitch*YDIM*ZDIM*16] = f16; fout[j+pitch*YDIM*ZDIM*17] = f17; fout[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = tex2D(texRef_f2B ,x ,y-1+YDIM*(z)); f4 = tex2D(texRef_f4B ,x ,y+1+YDIM*(z)); f9 = tex2D(texRef_f9B ,x ,y+1+YDIM*(z-1)); f11= tex2D(texRef_f11B,x ,y-1+YDIM*(z-1)); f13= tex2D(texRef_f13B,x ,y+1+YDIM*(z-1)); f14= tex2D(texRef_f14B,x ,y +YDIM*(z+1)); f16= tex2D(texRef_f16B,x ,y-1+YDIM*(z+1)); f18= tex2D(texRef_f18B,x ,y+1+YDIM*(z+1)); // f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,YDIM,ZDIM)]; // f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,YDIM,ZDIM)]; // f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,YDIM,ZDIM)]; // f11= fin[f_mem(11,x ,y-1,z-1,pitch,YDIM,ZDIM)]; // f13= fin[f_mem(13,x ,y+1,z-1,pitch,YDIM,ZDIM)]; // f14= fin[f_mem(14,x ,y ,z+1,pitch,YDIM,ZDIM)]; // f16= fin[f_mem(16,x ,y-1,z+1,pitch,YDIM,ZDIM)]; // f18= fin[f_mem(18,x ,y+1,z+1,pitch,YDIM,ZDIM)]; f1 = tex2D(texRef_f1B ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3B ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5B ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6B ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7B ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8B ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15B,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17B,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10B,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12B,x+1,y +YDIM*(z-1)); if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i float usqr = u*u+v*v+w*w; f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_shared(float* fA, float* fB, float omega, size_t pitch)//pitch in elements // int *image, float omega, float UMAX, // int XDIM, int YDIM, int ZDIM, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); // int i = x+y*blockDim.x*gridDim.x; //float u,v,w,rho;//,usqr; // int im = image[i]; // if(x == 0 || y == 0 || z == 0 || x == XDIM-1 || y == YDIM-1 || z == ZDIM-1) im = 1; int im = ImageFcn(x,y,z); __shared__ float f1_s[BLOCKSIZEX]; __shared__ float f3_s[BLOCKSIZEX]; __shared__ float f5_s[BLOCKSIZEX]; __shared__ float f7_s[BLOCKSIZEX]; __shared__ float f6_s[BLOCKSIZEX]; __shared__ float f8_s[BLOCKSIZEX]; __shared__ float f10_s[BLOCKSIZEX]; __shared__ float f12_s[BLOCKSIZEX]; __shared__ float f15_s[BLOCKSIZEX]; __shared__ float f17_s[BLOCKSIZEX]; f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch)];//dmax(x-1) f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch)];//dmin(x+1,XDIM) // if(y != 0){ f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch)];//dmax(x-1) f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y-1,z ,pitch)];//dmax(x-1) // } // else if(y != YDIM){ f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) // } // if(z != 0){ f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch)];//dmax(x-1) f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch)];//dmin(x+1,XDIM) // } // else if(z != ZDIM-1){ f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch)];//dmax(x-1) f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch)];//dmin(x+1,XDIM) // } // f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) // f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // f5_s[threadIdx.x] = fA[f_mem(5 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) // f7_s[threadIdx.x] = fA[f_mem(7 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // f6_s[threadIdx.x] = fA[f_mem(6 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // f8_s[threadIdx.x] = fA[f_mem(8 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) // f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//dmax(x-1) // f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//dmax(x-1) // f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // __syncthreads(); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(y != 0){ f4 = fA[f_mem(2 ,x ,y-1 ,z ,pitch)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(y != YDIM-1){ f2 = fA[f_mem(4 ,x ,y+1 ,z ,pitch)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // } // if(z != ZDIM-1){ f9 = fA[f_mem(14,x ,y ,z+1,pitch)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; f11= fA[f_mem(18,x ,y+1 ,z+1,pitch)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; f13= fA[f_mem(16,x ,y-1 ,z+1,pitch)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(z != 0){ f14= fA[f_mem(9 ,x ,y ,z-1,pitch)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; f16= fA[f_mem(13,x ,y+1 ,z-1,pitch)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; f18= fA[f_mem(11,x ,y-1 ,z-1,pitch)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // } // f2 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // f9 = fA[f_mem(14,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; // f11= fA[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; // f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; // f16= fA[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; // f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // f1 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; // f5 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; // f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; // f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; // f8 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; // f10= fA[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; // f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; // f15= fA[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; // f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; if(threadIdx.x != XDIM-1){ f1 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f5 = f7_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f8 = f6_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f10=f17_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f15=f12_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; } if(threadIdx.x != 0){ f3 = f1_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f7 = f5_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f6 = f8_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f17=f10_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f12=f15_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; } // f1 = f3_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f3 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f5 = f7_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f7 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f6 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f8 = f6_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f10=f17_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f17=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f12=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f15=f12_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; // fB[j+pitch*YDIM*ZDIM*1 ] = f1 ; // fB[j+pitch*YDIM*ZDIM*2 ] = f2 ; // fB[j+pitch*YDIM*ZDIM*3 ] = f3 ; // fB[j+pitch*YDIM*ZDIM*4 ] = f4 ; // fB[j+pitch*YDIM*ZDIM*5 ] = f5 ; // fB[j+pitch*YDIM*ZDIM*6 ] = f6 ; // fB[j+pitch*YDIM*ZDIM*7 ] = f7 ; // fB[j+pitch*YDIM*ZDIM*8 ] = f8 ; // fB[j+pitch*YDIM*ZDIM*9 ] = f9 ; // fB[j+pitch*YDIM*ZDIM*10] = f10; // fB[j+pitch*YDIM*ZDIM*11] = f11; // fB[j+pitch*YDIM*ZDIM*12] = f12; // fB[j+pitch*YDIM*ZDIM*13] = f13; // fB[j+pitch*YDIM*ZDIM*14] = f14; // fB[j+pitch*YDIM*ZDIM*15] = f15; // fB[j+pitch*YDIM*ZDIM*16] = f16; // fB[j+pitch*YDIM*ZDIM*17] = f17; // fB[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fA[j]; //f0 = fA[f_mem(0 ,x ,y ,z ,pitch)]; // if(y != 0){ f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // } // else if(y != YDIM-1){ f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // } f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; } // else if(z != 0){ f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; // } if(threadIdx.x != XDIM-1){ f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; } if(threadIdx.x != 0){ f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; } // f0 = fA[j]; // f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,YDIM,ZDIM)]; // f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,YDIM,ZDIM)]; // f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,YDIM,ZDIM)]; // f11= fA[f_mem(11,x ,y-1,z-1,pitch,YDIM,ZDIM)]; // f13= fA[f_mem(13,x ,y+1,z-1,pitch,YDIM,ZDIM)]; // f14= fA[f_mem(14,x ,y ,z+1,pitch,YDIM,ZDIM)]; // f16= fA[f_mem(16,x ,y-1,z+1,pitch,YDIM,ZDIM)]; // f18= fA[f_mem(18,x ,y+1,z+1,pitch,YDIM,ZDIM)]; // // f1 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; // f3 = f3_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; // f5 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; // f6 = f6_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; // f7 = f7_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; // f8 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; // f10=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; // f12=f12_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; // f15=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; // f17=f17_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i // rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float usqr = u*u+v*v+w*w; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } } //{ // int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem // int y = threadIdx.y+blockIdx.y*blockDim.y; // int z = threadIdx.z+blockIdx.z*blockDim.z; //// int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem // int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) //// f1out[j] = tex2D(texRef_f2A,x,y+h*z); // //// int i = x+y*blockDim.x*gridDim.x; // //float u,v,w,rho;//,usqr; //// int im = image[i]; //// if(x == 0 || y == 0 || z == 0 || x == XDIM-1 || y == YDIM-1 || z == ZDIM-1) im = 1; // int im = ImageFcn(x,y,z); // // __shared__ float f1_s[BLOCKSIZEX]; // __shared__ float f3_s[BLOCKSIZEX]; // __shared__ float f5_s[BLOCKSIZEX]; // __shared__ float f7_s[BLOCKSIZEX]; // __shared__ float f6_s[BLOCKSIZEX]; // __shared__ float f8_s[BLOCKSIZEX]; // __shared__ float f10_s[BLOCKSIZEX]; // __shared__ float f12_s[BLOCKSIZEX]; // __shared__ float f15_s[BLOCKSIZEX]; // __shared__ float f17_s[BLOCKSIZEX]; // // f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch)];//dmax(x-1) // f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch)];//dmin(x+1,XDIM) //// if(y != 0){ // f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch)];//dmax(x-1) // f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y-1,z ,pitch)];//dmax(x-1) //// } //// else if(y != YDIM){ // f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) // f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) //// } //// if(z != 0){ // f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch)];//dmax(x-1) // f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch)];//dmin(x+1,XDIM) //// } //// else if(z != ZDIM-1){ // f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch)];//dmax(x-1) // f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch)];//dmin(x+1,XDIM) //// } // //// f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// f5_s[threadIdx.x] = fA[f_mem(5 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f7_s[threadIdx.x] = fA[f_mem(7 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// f6_s[threadIdx.x] = fA[f_mem(6 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// f8_s[threadIdx.x] = fA[f_mem(8 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// // __syncthreads(); // // if(im == 1){//BB // float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(y != 0){ // f4 = fA[f_mem(2 ,x ,y-1 ,z ,pitch)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(y != YDIM){ // f2 = fA[f_mem(4 ,x ,y+1 ,z ,pitch)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // } // if(z != ZDIM-1){ // f9 = fA[f_mem(14,x ,y ,z+1,pitch)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; // f11= fA[f_mem(18,x ,y+1 ,z+1,pitch)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; // f13= fA[f_mem(16,x ,y-1 ,z+1,pitch)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(z != 0){ // f14= fA[f_mem(9 ,x ,y ,z-1,pitch)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; // f16= fA[f_mem(13,x ,y+1 ,z-1,pitch)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; // f18= fA[f_mem(11,x ,y-1 ,z-1,pitch)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // } // //// f2 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; //// f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; //// f9 = fA[f_mem(14,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; //// f11= fA[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; //// f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; //// f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; //// f16= fA[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; //// f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // //// f1 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; //// f5 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; //// f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; //// f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; //// f8 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; //// f10= fA[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; //// f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; //// f15= fA[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; //// f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; // // if(threadIdx.x != XDIM-1){ // f1 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f5 = f7_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f8 = f6_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f10=f17_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f15=f12_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(threadIdx.x != 0){ // f3 = f1_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f7 = f5_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f6 = f8_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f17=f10_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f12=f15_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // } // // //// f1 = f3_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f3 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f5 = f7_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f7 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f6 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f8 = f6_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f10=f17_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f17=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f12=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f15=f12_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // // fB[f_mem(1 ,x,y,z,pitch)] = f1 ; // fB[f_mem(2 ,x,y,z,pitch)] = f2 ; // fB[f_mem(3 ,x,y,z,pitch)] = f3 ; // fB[f_mem(4 ,x,y,z,pitch)] = f4 ; // fB[f_mem(5 ,x,y,z,pitch)] = f5 ; // fB[f_mem(6 ,x,y,z,pitch)] = f6 ; // fB[f_mem(7 ,x,y,z,pitch)] = f7 ; // fB[f_mem(8 ,x,y,z,pitch)] = f8 ; // fB[f_mem(9 ,x,y,z,pitch)] = f9 ; // fB[f_mem(10,x,y,z,pitch)] = f10; // fB[f_mem(11,x,y,z,pitch)] = f11; // fB[f_mem(12,x,y,z,pitch)] = f12; // fB[f_mem(13,x,y,z,pitch)] = f13; // fB[f_mem(14,x,y,z,pitch)] = f14; // fB[f_mem(15,x,y,z,pitch)] = f15; // fB[f_mem(16,x,y,z,pitch)] = f16; // fB[f_mem(17,x,y,z,pitch)] = f17; // fB[f_mem(18,x,y,z,pitch)] = f18; // //// fB[j+pitch*YDIM*ZDIM*1 ] = f1 ; //// fB[j+pitch*YDIM*ZDIM*2 ] = f2 ; //// fB[j+pitch*YDIM*ZDIM*3 ] = f3 ; //// fB[j+pitch*YDIM*ZDIM*4 ] = f4 ; //// fB[j+pitch*YDIM*ZDIM*5 ] = f5 ; //// fB[j+pitch*YDIM*ZDIM*6 ] = f6 ; //// fB[j+pitch*YDIM*ZDIM*7 ] = f7 ; //// fB[j+pitch*YDIM*ZDIM*8 ] = f8 ; //// fB[j+pitch*YDIM*ZDIM*9 ] = f9 ; //// fB[j+pitch*YDIM*ZDIM*10] = f10; //// fB[j+pitch*YDIM*ZDIM*11] = f11; //// fB[j+pitch*YDIM*ZDIM*12] = f12; //// fB[j+pitch*YDIM*ZDIM*13] = f13; //// fB[j+pitch*YDIM*ZDIM*14] = f14; //// fB[j+pitch*YDIM*ZDIM*15] = f15; //// fB[j+pitch*YDIM*ZDIM*16] = f16; //// fB[j+pitch*YDIM*ZDIM*17] = f17; //// fB[j+pitch*YDIM*ZDIM*18] = f18; // // } // else{ // // // float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // // f0 = fA[j]; // //f0 = fA[f_mem(0 ,x ,y ,z ,pitch)]; // if(y != 0){ // f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // } // else if(y != YDIM-1){ // f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // } // if(z != ZDIM-1){ // f14= fA[f_mem(14,x ,y ,z+1,pitch)]; // f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; // f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; // } // else if(z != 0){ // f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; // f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; // f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; // } // // if(threadIdx.x != XDIM-1){ // f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; // f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; // f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; // f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; // f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; // } // else if(threadIdx.x != 0){ // f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; // f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; // f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; // f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; // f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; // } // // //// f0 = fA[j]; //// f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,YDIM,ZDIM)]; //// f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,YDIM,ZDIM)]; //// f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,YDIM,ZDIM)]; //// f11= fA[f_mem(11,x ,y-1,z-1,pitch,YDIM,ZDIM)]; //// f13= fA[f_mem(13,x ,y+1,z-1,pitch,YDIM,ZDIM)]; //// f14= fA[f_mem(14,x ,y ,z+1,pitch,YDIM,ZDIM)]; //// f16= fA[f_mem(16,x ,y-1,z+1,pitch,YDIM,ZDIM)]; //// f18= fA[f_mem(18,x ,y+1,z+1,pitch,YDIM,ZDIM)]; //// //// f1 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; //// f3 = f3_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; //// f5 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; //// f6 = f6_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; //// f7 = f7_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; //// f8 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; //// f10=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; //// f12=f12_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; //// f15=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; //// f17=f17_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; // // if(im == 3)//DirichletWest // { // if(y == 0){ // f2 = f4; // f6 = f7; // f11 = f13; // f16 = f18; // } // else if(y == YDIM-1){ // f4 = f2; // f7 = f6; // f13 = f11; // f18 = f16; // } // if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; // f12 = f17; // f13 = f18; // } // else if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; // f17 = f12; // f18 = f13; // } //// float fInt1,fInt2;//,fDiff; // float u,v,w;//,rho; // u = 0.0f;//*PoisProf(zcoord)*1.5; // v = UMAX;//0.0; // w = 0.0f; // //// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; //// fInt2 = f3+f6+f7+f12+f17; //// rho = u+(fInt1+2.0f*fInt2); //D2Q9i // //// rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // //// float usqr = u*u+v*v+w*w; // f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // //// f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; //// f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); //// f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); //// f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); //// f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // } // // //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); // bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); // // // fB[f_mem(0 ,x,y,z,pitch)] = f0 ; // fB[f_mem(1 ,x,y,z,pitch)] = f1 ; // fB[f_mem(2 ,x,y,z,pitch)] = f2 ; // fB[f_mem(3 ,x,y,z,pitch)] = f3 ; // fB[f_mem(4 ,x,y,z,pitch)] = f4 ; // fB[f_mem(5 ,x,y,z,pitch)] = f5 ; // fB[f_mem(6 ,x,y,z,pitch)] = f6 ; // fB[f_mem(7 ,x,y,z,pitch)] = f7 ; // fB[f_mem(8 ,x,y,z,pitch)] = f8 ; // fB[f_mem(9 ,x,y,z,pitch)] = f9 ; // fB[f_mem(10,x,y,z,pitch)] = f10; // fB[f_mem(11,x,y,z,pitch)] = f11; // fB[f_mem(12,x,y,z,pitch)] = f12; // fB[f_mem(13,x,y,z,pitch)] = f13; // fB[f_mem(14,x,y,z,pitch)] = f14; // fB[f_mem(15,x,y,z,pitch)] = f15; // fB[f_mem(16,x,y,z,pitch)] = f16; // fB[f_mem(17,x,y,z,pitch)] = f17; // fB[f_mem(18,x,y,z,pitch)] = f18; // } //} __global__ void mrt_d_single(float* fA, float* fB, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); // int i = x+y*blockDim.x*gridDim.x; //float u,v,w,rho;//,usqr; int im = ImageFcn(x,y,z); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f1 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f2 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch)];//fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; f5 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];//fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];//fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];//fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; f8 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];//fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; f9 = fA[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; f10= fA[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM) ,pitch)];//fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; f11= fA[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM),pitch)];//fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; f15= fA[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch)];//fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; f16= fA[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch)];//fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; // fB[j+pitch*YDIM*ZDIM*1 ] = f1 ; // fB[j+pitch*YDIM*ZDIM*2 ] = f2 ; // fB[j+pitch*YDIM*ZDIM*3 ] = f3 ; // fB[j+pitch*YDIM*ZDIM*4 ] = f4 ; // fB[j+pitch*YDIM*ZDIM*5 ] = f5 ; // fB[j+pitch*YDIM*ZDIM*6 ] = f6 ; // fB[j+pitch*YDIM*ZDIM*7 ] = f7 ; // fB[j+pitch*YDIM*ZDIM*8 ] = f8 ; // fB[j+pitch*YDIM*ZDIM*9 ] = f9 ; // fB[j+pitch*YDIM*ZDIM*10] = f10; // fB[j+pitch*YDIM*ZDIM*11] = f11; // fB[j+pitch*YDIM*ZDIM*12] = f12; // fB[j+pitch*YDIM*ZDIM*13] = f13; // fB[j+pitch*YDIM*ZDIM*14] = f14; // fB[j+pitch*YDIM*ZDIM*15] = f15; // fB[j+pitch*YDIM*ZDIM*16] = f16; // fB[j+pitch*YDIM*ZDIM*17] = f17; // fB[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fA[j]; f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)]; f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)]; f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)]; f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)]; f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)]; f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)]; f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f10= fA[f_mem(10,x-1,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f12= fA[f_mem(12,x+1,y ,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f15= fA[f_mem(15,x-1,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; f17= fA[f_mem(17,x+1,y ,z+1,pitch)]; f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i // rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float usqr = u*u+v*v+w*w; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void initialize_single(float *f, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float u,v,w,rho,usqr; rho = 1.f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr); f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } __global__ void initialize(float* f0, float* f1, float* f2, float* f3, float* f4, float* f5, float* f6, float* f7, float* f8, float* f9, float* f10, float* f11, float* f12, float* f13, float* f14, float* f15, float* f16, float* f17, float* f18, size_t pitch)//pitch in elements //__global__ void initialize(void** f0in, void** f1in, // int w, int h, int pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); float u,v,w,rho,feq,usqr; rho = 1.0f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; feq = 1.0f/3.0f*(rho-1.5f*usqr); f0[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f1[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f2[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f3[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f4[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f5[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f6[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f7[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f8[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f9[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f10[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f11[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f12[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f13[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f14[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f15[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f16[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f17[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f18[j] = feq; } int main(int argc, char *argv[]) { // float *f0_h, *f1_h, *f2_h, *f3_h, *f4_h, *f5_h, *f6_h, *f7_h, *f8_h, *f9_h; // float *f10_h, *f11_h, *f12_h, *f13_h, *f14_h, *f15_h, *f16_h, *f17_h, *f18_h; // float *f0_dA, *f1_dA, *f2_dA, *f3_dA, *f4_dA, *f5_dA, *f6_dA, *f7_dA, *f8_dA, *f9_dA; // float *f10_dA, *f11_dA, *f12_dA, *f13_dA, *f14_dA, *f15_dA, *f16_dA, *f17_dA, *f18_dA; // float *f0_dB, *f1_dB, *f2_dB, *f3_dB, *f4_dB, *f5_dB, *f6_dB, *f7_dB, *f8_dB, *f9_dB; // float *f10_dB, *f11_dB, *f12_dB, *f13_dB, *f14_dB, *f15_dB, *f16_dB, *f17_dB, *f18_dB; int *image_d, *image_h; //hipPitchedPtr f0_d; ofstream output; output.open ("LBM1_out.dat"); size_t memsize, memsize_int; size_t pitch; int i, n, nBlocks; float omega, CharLength; CharLength = XDIM-2.f; omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); cout<<"omega: "<<omega<<endl; cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; cout<<"TMAX: "<<TMAX<<endl; nBlocks = (XDIM/BLOCKSIZEX+XDIM%BLOCKSIZEX)*(YDIM/BLOCKSIZEY+YDIM%BLOCKSIZEY) *(ZDIM/BLOCKSIZEZ+ZDIM%BLOCKSIZEZ); int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ; n = nBlocks*B;//block*dimx*dimy cout<<"nBlocks:"<<nBlocks<<endl; dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); dim3 grid(XDIM/BLOCKSIZEX,YDIM/BLOCKSIZEY,ZDIM/BLOCKSIZEZ); memsize = n*sizeof(float); memsize_int = n*sizeof(int); hipExtent extent = make_hipExtent(XDIM*sizeof(float),YDIM,ZDIM); image_h = (int *)malloc(memsize_int); float *fA_h,*fA_d,*fB_d; fA_h = (float *)malloc(memsize*19); hipMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); hipMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); hipMalloc((void **) &image_d, memsize_int); cout<<pitch<<endl; size_t pitch_elements = pitch/sizeof(float); hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); for (i = 0; i < n*19; i++) { fA_h[i] = i; } for (i = 0; i < n; i++) { int x = i%XDIM; int y = (i/XDIM)%YDIM; int z = (i/XDIM)/YDIM; fA_h[i] = 0; image_h[i] = 0; if(x < 1) image_h[i] = 1;//DirichletWest if(x > XDIM-2) image_h[i] = 1;//BB if(y < 1) image_h[i] = 1;//BB if(y > YDIM-2) image_h[i] = 1;//BB if(z < 1) image_h[i] = 1;//DirichletWest if(z > ZDIM-2) image_h[i] = 1;//BB } hipMemcpy(image_d, image_h, memsize_int, hipMemcpyHostToDevice); if(true)//texture settings { texRef_f0B.normalized = false; texRef_f1B.normalized = false; texRef_f2B.normalized = false; texRef_f3B.normalized = false; texRef_f4B.normalized = false; texRef_f5B.normalized = false; texRef_f6B.normalized = false; texRef_f7B.normalized = false; texRef_f8B.normalized = false; texRef_f9B.normalized = false; texRef_f10B.normalized = false; texRef_f11B.normalized = false; texRef_f12B.normalized = false; texRef_f13B.normalized = false; texRef_f14B.normalized = false; texRef_f15B.normalized = false; texRef_f16B.normalized = false; texRef_f17B.normalized = false; texRef_f18B.normalized = false; texRef_f0B.filterMode = hipFilterModePoint; texRef_f1B.filterMode = hipFilterModePoint; texRef_f2B.filterMode = hipFilterModePoint; texRef_f3B.filterMode = hipFilterModePoint; texRef_f4B.filterMode = hipFilterModePoint; texRef_f5B.filterMode = hipFilterModePoint; texRef_f6B.filterMode = hipFilterModePoint; texRef_f7B.filterMode = hipFilterModePoint; texRef_f8B.filterMode = hipFilterModePoint; texRef_f9B.filterMode = hipFilterModePoint; texRef_f10B.filterMode = hipFilterModePoint; texRef_f11B.filterMode = hipFilterModePoint; texRef_f12B.filterMode = hipFilterModePoint; texRef_f13B.filterMode = hipFilterModePoint; texRef_f14B.filterMode = hipFilterModePoint; texRef_f15B.filterMode = hipFilterModePoint; texRef_f16B.filterMode = hipFilterModePoint; texRef_f17B.filterMode = hipFilterModePoint; texRef_f18B.filterMode = hipFilterModePoint; texRef_f0A.normalized = false; texRef_f1A.normalized = false; texRef_f2A.normalized = false; texRef_f3A.normalized = false; texRef_f4A.normalized = false; texRef_f5A.normalized = false; texRef_f6A.normalized = false; texRef_f7A.normalized = false; texRef_f8A.normalized = false; texRef_f9A.normalized = false; texRef_f10A.normalized = false; texRef_f11A.normalized = false; texRef_f12A.normalized = false; texRef_f13A.normalized = false; texRef_f14A.normalized = false; texRef_f15A.normalized = false; texRef_f16A.normalized = false; texRef_f17A.normalized = false; texRef_f18A.normalized = false; texRef_f0A.filterMode = hipFilterModePoint; texRef_f1A.filterMode = hipFilterModePoint; texRef_f2A.filterMode = hipFilterModePoint; texRef_f3A.filterMode = hipFilterModePoint; texRef_f4A.filterMode = hipFilterModePoint; texRef_f5A.filterMode = hipFilterModePoint; texRef_f6A.filterMode = hipFilterModePoint; texRef_f7A.filterMode = hipFilterModePoint; texRef_f8A.filterMode = hipFilterModePoint; texRef_f9A.filterMode = hipFilterModePoint; texRef_f10A.filterMode = hipFilterModePoint; texRef_f11A.filterMode = hipFilterModePoint; texRef_f12A.filterMode = hipFilterModePoint; texRef_f13A.filterMode = hipFilterModePoint; texRef_f14A.filterMode = hipFilterModePoint; texRef_f15A.filterMode = hipFilterModePoint; texRef_f16A.filterMode = hipFilterModePoint; texRef_f17A.filterMode = hipFilterModePoint; texRef_f18A.filterMode = hipFilterModePoint; } hipMemcpy2D(fA_d ,pitch,fA_h ,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyHostToDevice); hipMemcpy2D(fB_d ,pitch,fA_h ,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyHostToDevice); for (i = 0; i < n*19; i++) { fA_h[i] = 0; } if(true)//bind texture { hipBindTexture2D(0,&texRef_f0A, fA_d ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f0B, fB_d ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); } // initialize<<<grid, threads>>>(f0_dA.ptr, f1_dA.ptr, f2_dA.ptr, f3_dA.ptr, f4_dA.ptr, f5_dA.ptr, f6_dA.ptr, f7_dA.ptr, f8_dA.ptr, f9_dA.ptr, // f10_dA.ptr, f11_dA.ptr, f12_dA.ptr, f13_dA.ptr, f14_dA.ptr, f15_dA.ptr, f16_dA.ptr, f17_dA.ptr, f18_dA.ptr, // XDIM,YDIM,pitch); // initialize<<<grid, threads>>>(f0_dA, f1_dA, f2_dA, f3_dA, f4_dA, f5_dA, f6_dA, f7_dA, f8_dA, f9_dA, // f10_dA, f11_dA, f12_dA, f13_dA, f14_dA, f15_dA, f16_dA, f17_dA, f18_dA, // XDIM,YDIM,pitch_elements); hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fA_d,pitch_elements); // hipFuncSetCacheConfig(mrt_d_single,hipFuncCachePreferL1); struct timeval tdr0,tdr1; double restime; hipDeviceSynchronize(); gettimeofday (&tdr0,NULL); for(int t = 0; t<TMAX; t=t+2){ //for(int t = 0; t<TMAX; t=t+1){ if(METHOD == "SINGLE"){ hipLaunchKernelGGL(( mrt_d_single), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements); hipLaunchKernelGGL(( mrt_d_single), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements); } if(METHOD == "HYB"){ hipLaunchKernelGGL(( mrt_d_hybAB), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements); hipLaunchKernelGGL(( mrt_d_hybBA), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements); } if(METHOD == "TEXT"){ hipLaunchKernelGGL(( mrt_d_textAB), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,image_d,omega,pitch_elements); hipLaunchKernelGGL(( mrt_d_textBA), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,image_d,omega,pitch_elements); } if(METHOD == "SHARED"){ hipLaunchKernelGGL(( mrt_d_shared), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements); hipLaunchKernelGGL(( mrt_d_shared), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements); } // simple_copy<<<grid, threads>>>(fA_d,fB_d,image_d,omega,UMAX,XDIM,YDIM,ZDIM,pitch_elements); // simple_copy<<<grid, threads>>>(fB_d,fA_d,image_d,omega,UMAX,XDIM,YDIM,ZDIM,pitch_elements); if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n"; } hipDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); cout<<"Time taken for main kernel: "<<restime<<" (" <<double(XDIM*YDIM*ZDIM*double(TMAX/1000000.f))/restime<<"MLUPS)"<<endl; cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl; // copytest<<<grid, threads>>>(f10_dA,test_d,XDIM,YDIM,ZDIM); //copytest<<<grid, threads>>>(test_d); //copytest<<<grid, threads>>>(image_d); hipUnbindTexture(texRef_f0A); hipUnbindTexture(texRef_f1A); hipUnbindTexture(texRef_f2A); hipUnbindTexture(texRef_f3A); hipUnbindTexture(texRef_f4A); hipUnbindTexture(texRef_f5A); hipUnbindTexture(texRef_f6A); hipUnbindTexture(texRef_f7A); hipUnbindTexture(texRef_f8A); hipUnbindTexture(texRef_f9A); hipUnbindTexture(texRef_f10A); hipUnbindTexture(texRef_f11A); hipUnbindTexture(texRef_f12A); hipUnbindTexture(texRef_f13A); hipUnbindTexture(texRef_f14A); hipUnbindTexture(texRef_f15A); hipUnbindTexture(texRef_f16A); hipUnbindTexture(texRef_f17A); hipUnbindTexture(texRef_f18A); hipUnbindTexture(texRef_f0B); hipUnbindTexture(texRef_f1B); hipUnbindTexture(texRef_f2B); hipUnbindTexture(texRef_f3B); hipUnbindTexture(texRef_f4B); hipUnbindTexture(texRef_f5B); hipUnbindTexture(texRef_f6B); hipUnbindTexture(texRef_f7B); hipUnbindTexture(texRef_f8B); hipUnbindTexture(texRef_f9B); hipUnbindTexture(texRef_f10B); hipUnbindTexture(texRef_f11B); hipUnbindTexture(texRef_f12B); hipUnbindTexture(texRef_f13B); hipUnbindTexture(texRef_f14B); hipUnbindTexture(texRef_f15B); hipUnbindTexture(texRef_f16B); hipUnbindTexture(texRef_f17B); hipUnbindTexture(texRef_f18B); // hipMemcpy2D(f0_h,XDIM*sizeof(float) , f0_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f1_h,XDIM*sizeof(float) , f1_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f2_h,XDIM*sizeof(float) , f2_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f3_h,XDIM*sizeof(float) , f3_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f4_h,XDIM*sizeof(float) , f4_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f5_h,XDIM*sizeof(float) , f5_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f6_h,XDIM*sizeof(float) , f6_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f7_h,XDIM*sizeof(float) , f7_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f8_h,XDIM*sizeof(float) , f8_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f9_h,XDIM*sizeof(float) , f9_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f10_h,XDIM*sizeof(float),f10_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f11_h,XDIM*sizeof(float),f11_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f12_h,XDIM*sizeof(float),f12_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f13_h,XDIM*sizeof(float),f13_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f14_h,XDIM*sizeof(float),f14_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f15_h,XDIM*sizeof(float),f15_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f16_h,XDIM*sizeof(float),f16_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f17_h,XDIM*sizeof(float),f17_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f18_h,XDIM*sizeof(float),f18_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); hipMemcpy2D(fA_h,XDIM*sizeof(float),fA_d,pitch,XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyDeviceToHost); // cout<<"f1_h is "<<f1_h[0]<<endl; //hipMemcpy(f0_h, f0_d.ptr, memsize, hipMemcpyDeviceToHost); hipMemcpy(image_h, image_d, memsize_int, hipMemcpyDeviceToHost); // cout<<image_h[0]<<endl; // cout<<"test_d: "<<test_h[0]<<endl; // for(i = 0; i<n; i++){ // cout<<f0_h[i]<<","; // } output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n"; int row = 0; int col = 0; int dep = 0; i = 0; float rho, u, v, w; int j; for(dep = 0; dep<ZDIM; dep++){ for(row = 0; row<YDIM; row++){ for(col = 0; col<XDIM; col++){ i = dep*XDIM*YDIM+row*XDIM+col; // rho = 0; rho = fA_h[i]; for(j = 1; j<19; j++) rho+=fA_h[i+XDIM*YDIM*ZDIM*j]; // rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i]+f9_h[i]+ // f10_h[i]+f11_h[i]+f12_h[i]+f13_h[i]+f14_h[i]+f15_h[i]+f16_h[i]+f17_h[i]+f18_h[i]; u = fA_h[i+XDIM*YDIM*ZDIM*1]-fA_h[i+XDIM*YDIM*ZDIM*3]+fA_h[i+XDIM*YDIM*ZDIM*5]-fA_h[i+XDIM*YDIM*ZDIM*6]- fA_h[i+XDIM*YDIM*ZDIM*7]+fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*10]-fA_h[i+XDIM*YDIM*ZDIM*12] +fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*17]; v = fA_h[i+XDIM*YDIM*ZDIM*2]-fA_h[i+XDIM*YDIM*ZDIM*4]+fA_h[i+XDIM*YDIM*ZDIM*5]+fA_h[i+XDIM*YDIM*ZDIM*6]-fA_h[i+XDIM*YDIM*ZDIM*7]-fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*11]-fA_h[i+XDIM*YDIM*ZDIM*13]+fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*18]; w = fA_h[i+XDIM*YDIM*ZDIM*9]+fA_h[i+XDIM*YDIM*ZDIM*10]+fA_h[i+XDIM*YDIM*ZDIM*11]+fA_h[i+XDIM*YDIM*ZDIM*12]+fA_h[i+XDIM*YDIM*ZDIM*13]-fA_h[i+XDIM*YDIM*ZDIM*14]-fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*17]-fA_h[i+XDIM*YDIM*ZDIM*18]; output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl; // output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<fA_h[i+XDIM*YDIM*ZDIM*1]<<","<<rho<<endl; } } } output.close(); hipFree(image_d); // hipFree(f0_dA); // hipFree(f1_dA); // hipFree(f2_dA); // hipFree(f3_dA); // hipFree(f4_dA); // hipFree(f5_dA); // hipFree(f6_dA); // hipFree(f7_dA); // hipFree(f8_dA); // hipFree(f9_dA); // hipFree(f10_dA); // hipFree(f11_dA); // hipFree(f12_dA); // hipFree(f13_dA); // hipFree(f14_dA); // hipFree(f15_dA); // hipFree(f16_dA); // hipFree(f17_dA); // hipFree(f18_dA); // hipFree(f0_dB); // hipFree(f1_dB); // hipFree(f2_dB); // hipFree(f3_dB); // hipFree(f4_dB); // hipFree(f5_dB); // hipFree(f6_dB); // hipFree(f7_dB); // hipFree(f8_dB); // hipFree(f9_dB); // hipFree(f10_dB); // hipFree(f11_dB); // hipFree(f12_dB); // hipFree(f13_dB); // hipFree(f14_dB); // hipFree(f15_dB); // hipFree(f16_dB); // hipFree(f17_dB); // hipFree(f18_dB); hipFree(fA_d); hipFree(fB_d); return(0); }
09c76457c6452d897a3dce140f13b5239f7db9ec.cu
#include <cuda.h> //#include <cutil.h> #include <iostream> #include <ostream> #include <fstream> //#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h" using namespace std; #define BLOCKSIZEX 256 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define XDIM 256 #define YDIM 128 #define ZDIM 32 #define TMAX 1000 #define RE 100.f//100.f; #define UMAX 0.08f #define METHOD "SHARED" //SINGLE,HYB,TEXT,SHARED //#define CHARLENGTH = XDIM-2.f; //#define BLOCKSIZE 16; //int const XDIM = 32; //int const YDIM = 32; #include <sys/time.h> #include <time.h> inline __device__ int ImageFcn(int x, int y, int z){ if(y == 0 || z == 0 || x == XDIM-1 || y == YDIM-1 || z == ZDIM-1) return 1; else if(x == 0) return 3; else return 0; } texture<float,2,cudaReadModeElementType> texRef_f0A; texture<float,2,cudaReadModeElementType> texRef_f1A; texture<float,2,cudaReadModeElementType> texRef_f2A; texture<float,2,cudaReadModeElementType> texRef_f3A; texture<float,2,cudaReadModeElementType> texRef_f4A; texture<float,2,cudaReadModeElementType> texRef_f5A; texture<float,2,cudaReadModeElementType> texRef_f6A; texture<float,2,cudaReadModeElementType> texRef_f7A; texture<float,2,cudaReadModeElementType> texRef_f8A; texture<float,2,cudaReadModeElementType> texRef_f9A; texture<float,2,cudaReadModeElementType> texRef_f10A; texture<float,2,cudaReadModeElementType> texRef_f11A; texture<float,2,cudaReadModeElementType> texRef_f12A; texture<float,2,cudaReadModeElementType> texRef_f13A; texture<float,2,cudaReadModeElementType> texRef_f14A; texture<float,2,cudaReadModeElementType> texRef_f15A; texture<float,2,cudaReadModeElementType> texRef_f16A; texture<float,2,cudaReadModeElementType> texRef_f17A; texture<float,2,cudaReadModeElementType> texRef_f18A; texture<float,2,cudaReadModeElementType> texRef_f0B; texture<float,2,cudaReadModeElementType> texRef_f1B; texture<float,2,cudaReadModeElementType> texRef_f2B; texture<float,2,cudaReadModeElementType> texRef_f3B; texture<float,2,cudaReadModeElementType> texRef_f4B; texture<float,2,cudaReadModeElementType> texRef_f5B; texture<float,2,cudaReadModeElementType> texRef_f6B; texture<float,2,cudaReadModeElementType> texRef_f7B; texture<float,2,cudaReadModeElementType> texRef_f8B; texture<float,2,cudaReadModeElementType> texRef_f9B; texture<float,2,cudaReadModeElementType> texRef_f10B; texture<float,2,cudaReadModeElementType> texRef_f11B; texture<float,2,cudaReadModeElementType> texRef_f12B; texture<float,2,cudaReadModeElementType> texRef_f13B; texture<float,2,cudaReadModeElementType> texRef_f14B; texture<float,2,cudaReadModeElementType> texRef_f15B; texture<float,2,cudaReadModeElementType> texRef_f16B; texture<float,2,cudaReadModeElementType> texRef_f17B; texture<float,2,cudaReadModeElementType> texRef_f18B; int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } inline __device__ void bgk_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; // float usqr = u*u+v*v+w*w; float usqr = fma(u,u,fma(v,v,w*w)); // f0 -= omega*fma(-0.3333333333f,(fma(-1.5f,usqr,rho)),f0);//(f0 -0.3333333333f*(fma(-1.5f,usqr,rho)));//rho-1.5f*usqr)); // f1 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f1);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f2 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f2);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f3 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f3);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f4 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f4);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f5 -= omega*fma(-0.0555555556f,fma(3.0f,( u+v),rho)+fma(4.5f,( u+v)*( u+v),-1.5f*usqr),f5 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f6 -= omega*fma(-0.0555555556f,fma(3.0f,(-u+v),rho)+fma(4.5f,(-u+v)*(-u+v),-1.5f*usqr),f6 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f7 -= omega*fma(-0.0555555556f,fma(3.0f,(-u-v),rho)+fma(4.5f,(-u-v)*(-u-v),-1.5f*usqr),f7 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f8 -= omega*fma(-0.0555555556f,fma(3.0f,( u-v),rho)+fma(4.5f,( u-v)*( u-v),-1.5f*usqr),f8 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f9 -= omega*fma(-0.0555555556f,fma(3.0f,( w),rho)+fma(4.5f,( w)*( w),-1.5f*usqr),f9 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f10-= omega*fma(-0.0277777778f,fma(3.0f,( u+w),rho)+fma(4.5f,( u+w)*( u+w),-1.5f*usqr),f10);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f11-= omega*fma(-0.0277777778f,fma(3.0f,( v+w),rho)+fma(4.5f,( v+w)*( v+w),-1.5f*usqr),f11);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f12-= omega*fma(-0.0277777778f,fma(3.0f,(-u+w),rho)+fma(4.5f,(-u+w)*(-u+w),-1.5f*usqr),f12);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f13-= omega*fma(-0.0277777778f,fma(3.0f,(-v+w),rho)+fma(4.5f,(-v+w)*(-v+w),-1.5f*usqr),f13);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f14-= omega*fma(-0.0555555556f,fma(3.0f,( -w),rho)+fma(4.5f,( -w)*( -w),-1.5f*usqr),f14);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f15-= omega*fma(-0.0277777778f,fma(3.0f,( u-w),rho)+fma(4.5f,( u-w)*( u-w),-1.5f*usqr),f15);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f16-= omega*fma(-0.0277777778f,fma(3.0f,( v-w),rho)+fma(4.5f,( v-w)*( v-w),-1.5f*usqr),f16);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f17-= omega*fma(-0.0277777778f,fma(3.0f,(-u-w),rho)+fma(4.5f,(-u-w)*(-u-w),-1.5f*usqr),f17);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f18-= omega*fma(-0.0277777778f,fma(3.0f,(-v-w),rho)+fma(4.5f,(-v-w)*(-v-w),-1.5f*usqr),f18);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f0 = f0 -omega*(f0 -0.3333333333f*(rho-1.5f*usqr)); f1 = f1 -omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f2 = f2 -omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); f3 = f3 -omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); f4 = f4 -omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); f5 = f5 -omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); f6 = f6 -omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); f7 = f7 -omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); f8 = f8 -omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); f9 = f9 -omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); f10= f10-omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); f11= f11-omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr)); f12= f12-omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); f13= f13-omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr)); f14= f14-omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); f15= f15-omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); f16= f16-omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); f17= f17-omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); f18= f18-omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } __device__ void mrt_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18; m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ 1.f*f8+ -4.f*f9+ f10+ 1.f*f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18; m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ; m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18; m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18; m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ; m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ; m13 = f5+ - f6+ f7+ - f8 ; m14 = f11 + - f13 + - f16 + f18; m15 = f10 + - f12 + - f15 + f17 ; m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ; m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18; m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18; m1 -= -11.f*rho+19.f*(u*u+v*v+w*w); m2 -= -7.53968254f*(u*u+v*v+w*w); m4 -= -0.66666667f*u;//qx_eq m6 -= -0.66666667f*v;//qx_eq m8 -= -0.66666667f*w;//qx_eq m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq m11-= (v*v-w*w);//pww_eq m13-= u*v;//pxy_eq m14-= v*w;//pyz_eq m15-= u*w;//pxz_eq f0 -= - 0.012531328f*(m1)+ 0.047619048f*(m2); f1 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10); f2 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10); f3 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10); f4 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10); f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10); f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10); f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10); f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10); f9 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10); f10 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10); f11 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10); f12 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10); f13 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10); f14 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10); f15 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10); f16 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10); f17 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10); f18 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10); f2 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12); f4 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12); f5 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega; f6 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega; f7 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega; f8 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega; f9 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12); f10 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ; f11 -= +( 0.25f*(m14) )*omega ; f12 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ; f13 -= +( -0.25f*(m14) )*omega ; f14 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12); f15 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ; f16 -= +( -0.25f*(m14) )*omega ; f17 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ; f18 -= +( 0.25f*(m14) )*omega ; f5 -= 0.125f*(m16)+ -0.125f*(m17); f6 -= -0.125f*(m16)+ -0.125f*(m17); f7 -= -0.125f*(m16)+ 0.125f*(m17); f8 -= 0.125f*(m16)+ 0.125f*(m17); f10 -= -0.125f*(m16) + 0.125f*(m18); f11 -= + 0.125f*(m17)+ -0.125f*(m18); f12 -= 0.125f*(m16) + 0.125f*(m18); f13 -= + -0.125f*(m17)+ -0.125f*(m18); f15 -= -0.125f*(m16) + -0.125f*(m18); f16 -= + 0.125f*(m17)+ 0.125f*(m18); f17 -= 0.125f*(m16) + -0.125f*(m18); f18 -= + -0.125f*(m17)+ 0.125f*(m18); } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch) { // if (x<0 || x>pitch || y<0 || y>YDIM || z<0 || z>ZDIM) return 0; // else return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*ZDIM; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __global__ void simple_copy(float* fA, float* fB, int *image, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // fB[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(10,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(11,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(12,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(13,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(14,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(15,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(16,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(17,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(18,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // float f0;//,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // float f0 = fA[j+pitch*YDIM*ZDIM]; // float f0 = fA[f_mem(0 ,x,y,z,pitch,YDIM,ZDIM)]; // f0 = tex2D(texRef_f0A ,x,y+YDIM*z); // f1 = tex2D(texRef_f1A ,x,y+YDIM*z); // f2 = tex2D(texRef_f2A ,x,y+YDIM*z); // f3 = tex2D(texRef_f3A ,x,y+YDIM*z); // f4 = tex2D(texRef_f4A ,x,y+YDIM*z); // f5 = tex2D(texRef_f5A ,x,y+YDIM*z); // f6 = tex2D(texRef_f6A ,x,y+YDIM*z); // f7 = tex2D(texRef_f7A ,x,y+YDIM*z); // f8 = tex2D(texRef_f8A ,x,y+YDIM*z); // f9 = tex2D(texRef_f9A ,x,y+YDIM*z); // f10 = tex2D(texRef_f10A,x,y+YDIM*z); // f11 = tex2D(texRef_f11A,x,y+YDIM*z); // f12 = tex2D(texRef_f12A,x,y+YDIM*z); // f13 = tex2D(texRef_f13A,x,y+YDIM*z); // f14 = tex2D(texRef_f14A,x,y+YDIM*z); // f15 = tex2D(texRef_f15A,x,y+YDIM*z); // f16 = tex2D(texRef_f16A,x,y+YDIM*z); // f17 = tex2D(texRef_f17A,x,y+YDIM*z); // f18 = tex2D(texRef_f18A,x,y+YDIM*z); // float f1 = fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f1 = fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f2 = fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // f3 = fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; // f4 = fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // f5 = fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; // f6 = fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; // f7 = fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; // f8 = fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; // f9 = fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; // f10 = fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; // f11 = fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; // f12 = fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; // f13 = fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // f14 = fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; // f15 = fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; // f16 = fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; // f17 = fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; // f18 = fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(0 ,x,y,z,pitch,YDIM,ZDIM)] = fA[f_mem(0 ,x,y,z,pitch,YDIM,ZDIM)];//+0.01f; fB[j] = fA[j];//+0.01f; // fB[j+pitch*YDIM*ZDIM+pitch*YDIM*ZDIM] = f2; // fB[(x+y*pitch+z*YDIM*pitch)+pitch*YDIM*ZDIM] = f1 ;//+0.01f; // fB[f_mem(0 ,x,y,z,pitch,YDIM,ZDIM)] = f0 ; // fB[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)] = f1;//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // fB[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)] = f2 ;//+0.01f; // fB[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)] = f3 ;//+0.01f; // fB[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)] = f4 ;//+0.01f; // fB[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)] = f5 ;//+0.01f; // fB[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)] = f6 ;//+0.01f; // fB[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)] = f7 ;//+0.01f; // fB[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)] = f8 ;//+0.01f; // fB[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)] = f9 ;//+0.01f; // fB[f_mem(10,x,y,z,pitch,YDIM,ZDIM)] = f10;//+0.01f; // fB[f_mem(11,x,y,z,pitch,YDIM,ZDIM)] = f11;//+0.01f; // fB[f_mem(12,x,y,z,pitch,YDIM,ZDIM)] = f12;//+0.01f; // fB[f_mem(13,x,y,z,pitch,YDIM,ZDIM)] = f13;//+0.01f; // fB[f_mem(14,x,y,z,pitch,YDIM,ZDIM)] = f14;//+0.01f; // fB[f_mem(15,x,y,z,pitch,YDIM,ZDIM)] = f15;//+0.01f; // fB[f_mem(16,x,y,z,pitch,YDIM,ZDIM)] = f16;//+0.01f; // fB[f_mem(17,x,y,z,pitch,YDIM,ZDIM)] = f17;//+0.01f; // fB[f_mem(18,x,y,z,pitch,YDIM,ZDIM)] = f18;//+0.01f; } //int const blockx = 192; //int const blocky = 1; __global__ void mrt_d_hybAB(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); // int i = x+y*blockDim.x*gridDim.x; //float u,v,w,rho;//,usqr; int im = ImageFcn(x,y,z); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // f1 = fin[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)]; // f3 = fin[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,YDIM,ZDIM)]; // f5 = fin[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)]; // f7 = fin[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)]; // f6 = fin[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)]; // f8 = fin[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch,YDIM,ZDIM)]; // f10= fin[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f12= fin[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f15= fin[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch,YDIM,ZDIM)]; // f17= fin[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)]; f2 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)]; f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)]; f9 = fin[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)]; f11= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch)]; f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)]; f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)]; f16= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)]; f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)]; f3 = tex2D(texRef_f1A ,x-1,(y )+YDIM*(z)); f1 = tex2D(texRef_f3A ,x+1,(y )+YDIM*(z)); f5 = tex2D(texRef_f7A ,x+1,(y+1)+YDIM*(z)); f6 = tex2D(texRef_f8A ,x-1,(y+1)+YDIM*(z)); f7 = tex2D(texRef_f5A ,x-1,(y-1)+YDIM*(z)); f8 = tex2D(texRef_f6A ,x+1,(y-1)+YDIM*(z)); f10= tex2D(texRef_f17A,x+1,(y )+YDIM*(z+1)); f12= tex2D(texRef_f15A,x-1,(y )+YDIM*(z+1)); f17= tex2D(texRef_f10A,x-1,(y )+YDIM*(z-1)); f15= tex2D(texRef_f12A,x+1,(y )+YDIM*(z-1)); fout[j+pitch*YDIM*ZDIM*1 ] = f1 ; fout[j+pitch*YDIM*ZDIM*2 ] = f2 ; fout[j+pitch*YDIM*ZDIM*3 ] = f3 ; fout[j+pitch*YDIM*ZDIM*4 ] = f4 ; fout[j+pitch*YDIM*ZDIM*5 ] = f5 ; fout[j+pitch*YDIM*ZDIM*6 ] = f6 ; fout[j+pitch*YDIM*ZDIM*7 ] = f7 ; fout[j+pitch*YDIM*ZDIM*8 ] = f8 ; fout[j+pitch*YDIM*ZDIM*9 ] = f9 ; fout[j+pitch*YDIM*ZDIM*10] = f10; fout[j+pitch*YDIM*ZDIM*11] = f11; fout[j+pitch*YDIM*ZDIM*12] = f12; fout[j+pitch*YDIM*ZDIM*13] = f13; fout[j+pitch*YDIM*ZDIM*14] = f14; fout[j+pitch*YDIM*ZDIM*15] = f15; fout[j+pitch*YDIM*ZDIM*16] = f16; fout[j+pitch*YDIM*ZDIM*17] = f17; fout[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // f1 = fin[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; // f3 = fin[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; // f5 = fin[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; // f6 = fin[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; // f7 = fin[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; // f8 = fin[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; // f10= fin[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; // f12= fin[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; // f15= fin[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; // f17= fin[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; f0 = fin[j]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; f1 = tex2D(texRef_f1A ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3A ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5A ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6A ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7A ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8A ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15A,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17A,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10A,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12A,x+1,y +YDIM*(z-1)); if(im == 3)//DirichletWest { if(y == 0){ // f2 = f4; f6 = f7; // f11 = f13; // f16 = f18; } else if(y == YDIM-1){ // f4 = f2; f7 = f6; // f13 = f11; // f18 = f16; } if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; f12 = f17; // f13 = f18; } if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; f17 = f12; // f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // // float usqr = u*u+v*v+w*w; // f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_hybBA(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f2 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)]; f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)]; f9 = fin[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)]; f11= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch)]; f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)]; f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)]; f16= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)]; f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)]; f3 = tex2D(texRef_f1B ,x-1,(y )+YDIM*(z)); f1 = tex2D(texRef_f3B ,x+1,(y )+YDIM*(z)); f5 = tex2D(texRef_f7B ,x+1,(y+1)+YDIM*(z)); f6 = tex2D(texRef_f8B ,x-1,(y+1)+YDIM*(z)); f7 = tex2D(texRef_f5B ,x-1,(y-1)+YDIM*(z)); f8 = tex2D(texRef_f6B ,x+1,(y-1)+YDIM*(z)); f10= tex2D(texRef_f17B,x+1,(y )+YDIM*(z+1)); f12= tex2D(texRef_f15B,x-1,(y )+YDIM*(z+1)); f17= tex2D(texRef_f10B,x-1,(y )+YDIM*(z-1)); f15= tex2D(texRef_f12B,x+1,(y )+YDIM*(z-1)); fout[j+pitch*YDIM*ZDIM*1 ] = f1 ; fout[j+pitch*YDIM*ZDIM*2 ] = f2 ; fout[j+pitch*YDIM*ZDIM*3 ] = f3 ; fout[j+pitch*YDIM*ZDIM*4 ] = f4 ; fout[j+pitch*YDIM*ZDIM*5 ] = f5 ; fout[j+pitch*YDIM*ZDIM*6 ] = f6 ; fout[j+pitch*YDIM*ZDIM*7 ] = f7 ; fout[j+pitch*YDIM*ZDIM*8 ] = f8 ; fout[j+pitch*YDIM*ZDIM*9 ] = f9 ; fout[j+pitch*YDIM*ZDIM*10] = f10; fout[j+pitch*YDIM*ZDIM*11] = f11; fout[j+pitch*YDIM*ZDIM*12] = f12; fout[j+pitch*YDIM*ZDIM*13] = f13; fout[j+pitch*YDIM*ZDIM*14] = f14; fout[j+pitch*YDIM*ZDIM*15] = f15; fout[j+pitch*YDIM*ZDIM*16] = f16; fout[j+pitch*YDIM*ZDIM*17] = f17; fout[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; f1 = tex2D(texRef_f1B ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3B ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5B ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6B ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7B ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8B ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15B,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17B,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10B,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12B,x+1,y +YDIM*(z-1)); if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // // float usqr = u*u+v*v+w*w; // f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_textAB(float* fin, float* fout, int *image, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = image[i]; if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f2 = tex2D(texRef_f4A ,x ,(y+1)+YDIM*(z )); f4 = tex2D(texRef_f2A ,x ,(y-1)+YDIM*(z )); f9 = tex2D(texRef_f14A,x ,(y )+YDIM*(z+1)); f14= tex2D(texRef_f9A ,x ,(y )+YDIM*(z-1)); f11= tex2D(texRef_f18A,x ,(y+1)+YDIM*(z+1)); f18= tex2D(texRef_f11A,x ,(y-1)+YDIM*(z-1)); f16= tex2D(texRef_f13A,x ,(y+1)+YDIM*(z-1)); f13= tex2D(texRef_f16A,x ,(y-1)+YDIM*(z+1)); // f2 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)]; // f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)]; // f9 = fin[f_mem(14,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f11= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)]; // f16= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch,YDIM,ZDIM)]; // f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,YDIM,ZDIM)]; f3 = tex2D(texRef_f1A ,x-1,(y )+YDIM*(z)); f1 = tex2D(texRef_f3A ,x+1,(y )+YDIM*(z)); f5 = tex2D(texRef_f7A ,x+1,(y+1)+YDIM*(z)); f6 = tex2D(texRef_f8A ,x-1,(y+1)+YDIM*(z)); f7 = tex2D(texRef_f5A ,x-1,(y-1)+YDIM*(z)); f8 = tex2D(texRef_f6A ,x+1,(y-1)+YDIM*(z)); f10= tex2D(texRef_f17A,x+1,(y )+YDIM*(z+1)); f12= tex2D(texRef_f15A,x-1,(y )+YDIM*(z+1)); f17= tex2D(texRef_f10A,x-1,(y )+YDIM*(z-1)); f15= tex2D(texRef_f12A,x+1,(y )+YDIM*(z-1)); fout[j+pitch*YDIM*ZDIM*1 ] = f1 ; fout[j+pitch*YDIM*ZDIM*2 ] = f2 ; fout[j+pitch*YDIM*ZDIM*3 ] = f3 ; fout[j+pitch*YDIM*ZDIM*4 ] = f4 ; fout[j+pitch*YDIM*ZDIM*5 ] = f5 ; fout[j+pitch*YDIM*ZDIM*6 ] = f6 ; fout[j+pitch*YDIM*ZDIM*7 ] = f7 ; fout[j+pitch*YDIM*ZDIM*8 ] = f8 ; fout[j+pitch*YDIM*ZDIM*9 ] = f9 ; fout[j+pitch*YDIM*ZDIM*10] = f10; fout[j+pitch*YDIM*ZDIM*11] = f11; fout[j+pitch*YDIM*ZDIM*12] = f12; fout[j+pitch*YDIM*ZDIM*13] = f13; fout[j+pitch*YDIM*ZDIM*14] = f14; fout[j+pitch*YDIM*ZDIM*15] = f15; fout[j+pitch*YDIM*ZDIM*16] = f16; fout[j+pitch*YDIM*ZDIM*17] = f17; fout[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = tex2D(texRef_f2A ,x ,y-1+YDIM*(z)); f4 = tex2D(texRef_f4A ,x ,y+1+YDIM*(z)); f9 = tex2D(texRef_f9A ,x ,y+1+YDIM*(z-1)); f11= tex2D(texRef_f11A,x ,y-1+YDIM*(z-1)); f13= tex2D(texRef_f13A,x ,y+1+YDIM*(z-1)); f14= tex2D(texRef_f14A,x ,y +YDIM*(z+1)); f16= tex2D(texRef_f16A,x ,y-1+YDIM*(z+1)); f18= tex2D(texRef_f18A,x ,y+1+YDIM*(z+1)); // f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,YDIM,ZDIM)]; // f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,YDIM,ZDIM)]; // f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,YDIM,ZDIM)]; // f11= fin[f_mem(11,x ,y-1,z-1,pitch,YDIM,ZDIM)]; // f13= fin[f_mem(13,x ,y+1,z-1,pitch,YDIM,ZDIM)]; // f14= fin[f_mem(14,x ,y ,z+1,pitch,YDIM,ZDIM)]; // f16= fin[f_mem(16,x ,y-1,z+1,pitch,YDIM,ZDIM)]; // f18= fin[f_mem(18,x ,y+1,z+1,pitch,YDIM,ZDIM)]; f1 = tex2D(texRef_f1A ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3A ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5A ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6A ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7A ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8A ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15A,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17A,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10A,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12A,x+1,y +YDIM*(z-1)); if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i float usqr = u*u+v*v+w*w; f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_textBA(float* fin, float* fout, int *image, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = image[i]; if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f2 = tex2D(texRef_f4B ,x ,(y+1)+YDIM*(z )); f4 = tex2D(texRef_f2B ,x ,(y-1)+YDIM*(z )); f9 = tex2D(texRef_f14B,x ,(y )+YDIM*(z+1)); f14= tex2D(texRef_f9B ,x ,(y )+YDIM*(z-1)); f11= tex2D(texRef_f18B,x ,(y+1)+YDIM*(z+1)); f18= tex2D(texRef_f11B,x ,(y-1)+YDIM*(z-1)); f16= tex2D(texRef_f13B,x ,(y+1)+YDIM*(z-1)); f13= tex2D(texRef_f16B,x ,(y-1)+YDIM*(z+1)); // f2 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)]; // f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)]; // f9 = fin[f_mem(14,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f11= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)]; // f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)]; // f16= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch,YDIM,ZDIM)]; // f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,YDIM,ZDIM)]; f3 = tex2D(texRef_f1B ,x-1,(y )+YDIM*(z)); f1 = tex2D(texRef_f3B ,x+1,(y )+YDIM*(z)); f5 = tex2D(texRef_f7B ,x+1,(y+1)+YDIM*(z)); f6 = tex2D(texRef_f8B ,x-1,(y+1)+YDIM*(z)); f7 = tex2D(texRef_f5B ,x-1,(y-1)+YDIM*(z)); f8 = tex2D(texRef_f6B ,x+1,(y-1)+YDIM*(z)); f10= tex2D(texRef_f17B,x+1,(y )+YDIM*(z+1)); f12= tex2D(texRef_f15B,x-1,(y )+YDIM*(z+1)); f17= tex2D(texRef_f10B,x-1,(y )+YDIM*(z-1)); f15= tex2D(texRef_f12B,x+1,(y )+YDIM*(z-1)); fout[j+pitch*YDIM*ZDIM*1 ] = f1 ; fout[j+pitch*YDIM*ZDIM*2 ] = f2 ; fout[j+pitch*YDIM*ZDIM*3 ] = f3 ; fout[j+pitch*YDIM*ZDIM*4 ] = f4 ; fout[j+pitch*YDIM*ZDIM*5 ] = f5 ; fout[j+pitch*YDIM*ZDIM*6 ] = f6 ; fout[j+pitch*YDIM*ZDIM*7 ] = f7 ; fout[j+pitch*YDIM*ZDIM*8 ] = f8 ; fout[j+pitch*YDIM*ZDIM*9 ] = f9 ; fout[j+pitch*YDIM*ZDIM*10] = f10; fout[j+pitch*YDIM*ZDIM*11] = f11; fout[j+pitch*YDIM*ZDIM*12] = f12; fout[j+pitch*YDIM*ZDIM*13] = f13; fout[j+pitch*YDIM*ZDIM*14] = f14; fout[j+pitch*YDIM*ZDIM*15] = f15; fout[j+pitch*YDIM*ZDIM*16] = f16; fout[j+pitch*YDIM*ZDIM*17] = f17; fout[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = tex2D(texRef_f2B ,x ,y-1+YDIM*(z)); f4 = tex2D(texRef_f4B ,x ,y+1+YDIM*(z)); f9 = tex2D(texRef_f9B ,x ,y+1+YDIM*(z-1)); f11= tex2D(texRef_f11B,x ,y-1+YDIM*(z-1)); f13= tex2D(texRef_f13B,x ,y+1+YDIM*(z-1)); f14= tex2D(texRef_f14B,x ,y +YDIM*(z+1)); f16= tex2D(texRef_f16B,x ,y-1+YDIM*(z+1)); f18= tex2D(texRef_f18B,x ,y+1+YDIM*(z+1)); // f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,YDIM,ZDIM)]; // f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,YDIM,ZDIM)]; // f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,YDIM,ZDIM)]; // f11= fin[f_mem(11,x ,y-1,z-1,pitch,YDIM,ZDIM)]; // f13= fin[f_mem(13,x ,y+1,z-1,pitch,YDIM,ZDIM)]; // f14= fin[f_mem(14,x ,y ,z+1,pitch,YDIM,ZDIM)]; // f16= fin[f_mem(16,x ,y-1,z+1,pitch,YDIM,ZDIM)]; // f18= fin[f_mem(18,x ,y+1,z+1,pitch,YDIM,ZDIM)]; f1 = tex2D(texRef_f1B ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3B ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5B ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6B ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7B ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8B ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15B,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17B,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10B,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12B,x+1,y +YDIM*(z-1)); if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i float usqr = u*u+v*v+w*w; f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_shared(float* fA, float* fB, float omega, size_t pitch)//pitch in elements // int *image, float omega, float UMAX, // int XDIM, int YDIM, int ZDIM, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); // int i = x+y*blockDim.x*gridDim.x; //float u,v,w,rho;//,usqr; // int im = image[i]; // if(x == 0 || y == 0 || z == 0 || x == XDIM-1 || y == YDIM-1 || z == ZDIM-1) im = 1; int im = ImageFcn(x,y,z); __shared__ float f1_s[BLOCKSIZEX]; __shared__ float f3_s[BLOCKSIZEX]; __shared__ float f5_s[BLOCKSIZEX]; __shared__ float f7_s[BLOCKSIZEX]; __shared__ float f6_s[BLOCKSIZEX]; __shared__ float f8_s[BLOCKSIZEX]; __shared__ float f10_s[BLOCKSIZEX]; __shared__ float f12_s[BLOCKSIZEX]; __shared__ float f15_s[BLOCKSIZEX]; __shared__ float f17_s[BLOCKSIZEX]; f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch)];//dmax(x-1) f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch)];//dmin(x+1,XDIM) // if(y != 0){ f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch)];//dmax(x-1) f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y-1,z ,pitch)];//dmax(x-1) // } // else if(y != YDIM){ f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) // } // if(z != 0){ f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch)];//dmax(x-1) f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch)];//dmin(x+1,XDIM) // } // else if(z != ZDIM-1){ f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch)];//dmax(x-1) f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch)];//dmin(x+1,XDIM) // } // f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) // f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // f5_s[threadIdx.x] = fA[f_mem(5 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) // f7_s[threadIdx.x] = fA[f_mem(7 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // f6_s[threadIdx.x] = fA[f_mem(6 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // f8_s[threadIdx.x] = fA[f_mem(8 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) // f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//dmax(x-1) // f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//dmax(x-1) // f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) // __syncthreads(); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(y != 0){ f4 = fA[f_mem(2 ,x ,y-1 ,z ,pitch)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(y != YDIM-1){ f2 = fA[f_mem(4 ,x ,y+1 ,z ,pitch)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // } // if(z != ZDIM-1){ f9 = fA[f_mem(14,x ,y ,z+1,pitch)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; f11= fA[f_mem(18,x ,y+1 ,z+1,pitch)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; f13= fA[f_mem(16,x ,y-1 ,z+1,pitch)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(z != 0){ f14= fA[f_mem(9 ,x ,y ,z-1,pitch)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; f16= fA[f_mem(13,x ,y+1 ,z-1,pitch)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; f18= fA[f_mem(11,x ,y-1 ,z-1,pitch)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // } // f2 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // f9 = fA[f_mem(14,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; // f11= fA[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; // f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; // f16= fA[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; // f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // f1 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; // f5 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; // f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; // f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; // f8 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; // f10= fA[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; // f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; // f15= fA[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; // f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; if(threadIdx.x != XDIM-1){ f1 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f5 = f7_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f8 = f6_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f10=f17_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f15=f12_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; } if(threadIdx.x != 0){ f3 = f1_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f7 = f5_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f6 = f8_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f17=f10_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f12=f15_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; } // f1 = f3_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f3 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f5 = f7_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f7 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f6 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f8 = f6_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f10=f17_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f17=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f12=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f15=f12_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; // fB[j+pitch*YDIM*ZDIM*1 ] = f1 ; // fB[j+pitch*YDIM*ZDIM*2 ] = f2 ; // fB[j+pitch*YDIM*ZDIM*3 ] = f3 ; // fB[j+pitch*YDIM*ZDIM*4 ] = f4 ; // fB[j+pitch*YDIM*ZDIM*5 ] = f5 ; // fB[j+pitch*YDIM*ZDIM*6 ] = f6 ; // fB[j+pitch*YDIM*ZDIM*7 ] = f7 ; // fB[j+pitch*YDIM*ZDIM*8 ] = f8 ; // fB[j+pitch*YDIM*ZDIM*9 ] = f9 ; // fB[j+pitch*YDIM*ZDIM*10] = f10; // fB[j+pitch*YDIM*ZDIM*11] = f11; // fB[j+pitch*YDIM*ZDIM*12] = f12; // fB[j+pitch*YDIM*ZDIM*13] = f13; // fB[j+pitch*YDIM*ZDIM*14] = f14; // fB[j+pitch*YDIM*ZDIM*15] = f15; // fB[j+pitch*YDIM*ZDIM*16] = f16; // fB[j+pitch*YDIM*ZDIM*17] = f17; // fB[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fA[j]; //f0 = fA[f_mem(0 ,x ,y ,z ,pitch)]; // if(y != 0){ f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // } // else if(y != YDIM-1){ f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // } f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; } // else if(z != 0){ f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; // } if(threadIdx.x != XDIM-1){ f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; } if(threadIdx.x != 0){ f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; } // f0 = fA[j]; // f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,YDIM,ZDIM)]; // f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,YDIM,ZDIM)]; // f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,YDIM,ZDIM)]; // f11= fA[f_mem(11,x ,y-1,z-1,pitch,YDIM,ZDIM)]; // f13= fA[f_mem(13,x ,y+1,z-1,pitch,YDIM,ZDIM)]; // f14= fA[f_mem(14,x ,y ,z+1,pitch,YDIM,ZDIM)]; // f16= fA[f_mem(16,x ,y-1,z+1,pitch,YDIM,ZDIM)]; // f18= fA[f_mem(18,x ,y+1,z+1,pitch,YDIM,ZDIM)]; // // f1 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; // f3 = f3_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; // f5 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; // f6 = f6_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; // f7 = f7_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; // f8 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; // f10=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; // f12=f12_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; // f15=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; // f17=f17_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i // rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float usqr = u*u+v*v+w*w; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } } //{ // int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem // int y = threadIdx.y+blockIdx.y*blockDim.y; // int z = threadIdx.z+blockIdx.z*blockDim.z; //// int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem // int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) //// f1out[j] = tex2D(texRef_f2A,x,y+h*z); // //// int i = x+y*blockDim.x*gridDim.x; // //float u,v,w,rho;//,usqr; //// int im = image[i]; //// if(x == 0 || y == 0 || z == 0 || x == XDIM-1 || y == YDIM-1 || z == ZDIM-1) im = 1; // int im = ImageFcn(x,y,z); // // __shared__ float f1_s[BLOCKSIZEX]; // __shared__ float f3_s[BLOCKSIZEX]; // __shared__ float f5_s[BLOCKSIZEX]; // __shared__ float f7_s[BLOCKSIZEX]; // __shared__ float f6_s[BLOCKSIZEX]; // __shared__ float f8_s[BLOCKSIZEX]; // __shared__ float f10_s[BLOCKSIZEX]; // __shared__ float f12_s[BLOCKSIZEX]; // __shared__ float f15_s[BLOCKSIZEX]; // __shared__ float f17_s[BLOCKSIZEX]; // // f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch)];//dmax(x-1) // f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch)];//dmin(x+1,XDIM) //// if(y != 0){ // f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch)];//dmax(x-1) // f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y-1,z ,pitch)];//dmax(x-1) //// } //// else if(y != YDIM){ // f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) // f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) //// } //// if(z != 0){ // f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch)];//dmax(x-1) // f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch)];//dmin(x+1,XDIM) //// } //// else if(z != ZDIM-1){ // f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch)];//dmax(x-1) // f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch)];//dmin(x+1,XDIM) //// } // //// f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// f5_s[threadIdx.x] = fA[f_mem(5 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f7_s[threadIdx.x] = fA[f_mem(7 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// f6_s[threadIdx.x] = fA[f_mem(6 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// f8_s[threadIdx.x] = fA[f_mem(8 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//dmax(x-1) //// f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//dmin(x+1,XDIM) //// // __syncthreads(); // // if(im == 1){//BB // float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(y != 0){ // f4 = fA[f_mem(2 ,x ,y-1 ,z ,pitch)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(y != YDIM){ // f2 = fA[f_mem(4 ,x ,y+1 ,z ,pitch)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; // } // if(z != ZDIM-1){ // f9 = fA[f_mem(14,x ,y ,z+1,pitch)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; // f11= fA[f_mem(18,x ,y+1 ,z+1,pitch)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; // f13= fA[f_mem(16,x ,y-1 ,z+1,pitch)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(z != 0){ // f14= fA[f_mem(9 ,x ,y ,z-1,pitch)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; // f16= fA[f_mem(13,x ,y+1 ,z-1,pitch)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; // f18= fA[f_mem(11,x ,y-1 ,z-1,pitch)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // } // //// f2 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; //// f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; //// f9 = fA[f_mem(14,x ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; //// f11= fA[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; //// f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; //// f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; //// f16= fA[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; //// f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; // //// f1 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; //// f5 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; //// f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; //// f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch,YDIM,ZDIM)];//fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; //// f8 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; //// f10= fA[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; //// f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM) ,pitch,YDIM,ZDIM)];//fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; //// f15= fA[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; //// f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,YDIM,ZDIM)];//fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; // // if(threadIdx.x != XDIM-1){ // f1 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f5 = f7_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f8 = f6_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f10=f17_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f15=f12_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // } // else if(threadIdx.x != 0){ // f3 = f1_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f7 = f5_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f6 = f8_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f17=f10_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // f12=f15_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // } // // //// f1 = f3_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f3 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f5 = f7_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f7 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f6 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f8 = f6_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f10=f17_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f17=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f12=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; //// f15=f12_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch,YDIM,ZDIM)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; // // fB[f_mem(1 ,x,y,z,pitch)] = f1 ; // fB[f_mem(2 ,x,y,z,pitch)] = f2 ; // fB[f_mem(3 ,x,y,z,pitch)] = f3 ; // fB[f_mem(4 ,x,y,z,pitch)] = f4 ; // fB[f_mem(5 ,x,y,z,pitch)] = f5 ; // fB[f_mem(6 ,x,y,z,pitch)] = f6 ; // fB[f_mem(7 ,x,y,z,pitch)] = f7 ; // fB[f_mem(8 ,x,y,z,pitch)] = f8 ; // fB[f_mem(9 ,x,y,z,pitch)] = f9 ; // fB[f_mem(10,x,y,z,pitch)] = f10; // fB[f_mem(11,x,y,z,pitch)] = f11; // fB[f_mem(12,x,y,z,pitch)] = f12; // fB[f_mem(13,x,y,z,pitch)] = f13; // fB[f_mem(14,x,y,z,pitch)] = f14; // fB[f_mem(15,x,y,z,pitch)] = f15; // fB[f_mem(16,x,y,z,pitch)] = f16; // fB[f_mem(17,x,y,z,pitch)] = f17; // fB[f_mem(18,x,y,z,pitch)] = f18; // //// fB[j+pitch*YDIM*ZDIM*1 ] = f1 ; //// fB[j+pitch*YDIM*ZDIM*2 ] = f2 ; //// fB[j+pitch*YDIM*ZDIM*3 ] = f3 ; //// fB[j+pitch*YDIM*ZDIM*4 ] = f4 ; //// fB[j+pitch*YDIM*ZDIM*5 ] = f5 ; //// fB[j+pitch*YDIM*ZDIM*6 ] = f6 ; //// fB[j+pitch*YDIM*ZDIM*7 ] = f7 ; //// fB[j+pitch*YDIM*ZDIM*8 ] = f8 ; //// fB[j+pitch*YDIM*ZDIM*9 ] = f9 ; //// fB[j+pitch*YDIM*ZDIM*10] = f10; //// fB[j+pitch*YDIM*ZDIM*11] = f11; //// fB[j+pitch*YDIM*ZDIM*12] = f12; //// fB[j+pitch*YDIM*ZDIM*13] = f13; //// fB[j+pitch*YDIM*ZDIM*14] = f14; //// fB[j+pitch*YDIM*ZDIM*15] = f15; //// fB[j+pitch*YDIM*ZDIM*16] = f16; //// fB[j+pitch*YDIM*ZDIM*17] = f17; //// fB[j+pitch*YDIM*ZDIM*18] = f18; // // } // else{ // // // float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // // f0 = fA[j]; // //f0 = fA[f_mem(0 ,x ,y ,z ,pitch)]; // if(y != 0){ // f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // } // else if(y != YDIM-1){ // f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // } // if(z != ZDIM-1){ // f14= fA[f_mem(14,x ,y ,z+1,pitch)]; // f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; // f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; // } // else if(z != 0){ // f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; // f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; // f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; // } // // if(threadIdx.x != XDIM-1){ // f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; // f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; // f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; // f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; // f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; // } // else if(threadIdx.x != 0){ // f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; // f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; // f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; // f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; // f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; // } // // //// f0 = fA[j]; //// f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,YDIM,ZDIM)]; //// f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,YDIM,ZDIM)]; //// f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,YDIM,ZDIM)]; //// f11= fA[f_mem(11,x ,y-1,z-1,pitch,YDIM,ZDIM)]; //// f13= fA[f_mem(13,x ,y+1,z-1,pitch,YDIM,ZDIM)]; //// f14= fA[f_mem(14,x ,y ,z+1,pitch,YDIM,ZDIM)]; //// f16= fA[f_mem(16,x ,y-1,z+1,pitch,YDIM,ZDIM)]; //// f18= fA[f_mem(18,x ,y+1,z+1,pitch,YDIM,ZDIM)]; //// //// f1 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; //// f3 = f3_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; //// f5 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; //// f6 = f6_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; //// f7 = f7_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; //// f8 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; //// f10=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; //// f12=f12_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; //// f15=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; //// f17=f17_s[dmin(threadIdx.x+1,XDIM)];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; // // if(im == 3)//DirichletWest // { // if(y == 0){ // f2 = f4; // f6 = f7; // f11 = f13; // f16 = f18; // } // else if(y == YDIM-1){ // f4 = f2; // f7 = f6; // f13 = f11; // f18 = f16; // } // if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; // f12 = f17; // f13 = f18; // } // else if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; // f17 = f12; // f18 = f13; // } //// float fInt1,fInt2;//,fDiff; // float u,v,w;//,rho; // u = 0.0f;//*PoisProf(zcoord)*1.5; // v = UMAX;//0.0; // w = 0.0f; // //// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; //// fInt2 = f3+f6+f7+f12+f17; //// rho = u+(fInt1+2.0f*fInt2); //D2Q9i // //// rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // //// float usqr = u*u+v*v+w*w; // f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // //// f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; //// f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); //// f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); //// f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); //// f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // } // // //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); // bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); // // // fB[f_mem(0 ,x,y,z,pitch)] = f0 ; // fB[f_mem(1 ,x,y,z,pitch)] = f1 ; // fB[f_mem(2 ,x,y,z,pitch)] = f2 ; // fB[f_mem(3 ,x,y,z,pitch)] = f3 ; // fB[f_mem(4 ,x,y,z,pitch)] = f4 ; // fB[f_mem(5 ,x,y,z,pitch)] = f5 ; // fB[f_mem(6 ,x,y,z,pitch)] = f6 ; // fB[f_mem(7 ,x,y,z,pitch)] = f7 ; // fB[f_mem(8 ,x,y,z,pitch)] = f8 ; // fB[f_mem(9 ,x,y,z,pitch)] = f9 ; // fB[f_mem(10,x,y,z,pitch)] = f10; // fB[f_mem(11,x,y,z,pitch)] = f11; // fB[f_mem(12,x,y,z,pitch)] = f12; // fB[f_mem(13,x,y,z,pitch)] = f13; // fB[f_mem(14,x,y,z,pitch)] = f14; // fB[f_mem(15,x,y,z,pitch)] = f15; // fB[f_mem(16,x,y,z,pitch)] = f16; // fB[f_mem(17,x,y,z,pitch)] = f17; // fB[f_mem(18,x,y,z,pitch)] = f18; // } //} __global__ void mrt_d_single(float* fA, float* fB, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); // int i = x+y*blockDim.x*gridDim.x; //float u,v,w,rho;//,usqr; int im = ImageFcn(x,y,z); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f1 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch)];//fA[f_mem(1 ,x,y,z,pitch,YDIM,ZDIM)]; f2 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)];//fA[f_mem(2 ,x,y,z,pitch,YDIM,ZDIM)]; f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch)];//fA[f_mem(3 ,x,y,z,pitch,YDIM,ZDIM)]; f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)];//fA[f_mem(4 ,x,y,z,pitch,YDIM,ZDIM)]; f5 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];//fA[f_mem(5 ,x,y,z,pitch,YDIM,ZDIM)]; f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];//fA[f_mem(7 ,x,y,z,pitch,YDIM,ZDIM)]; f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];//fA[f_mem(6 ,x,y,z,pitch,YDIM,ZDIM)]; f8 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];//fA[f_mem(8 ,x,y,z,pitch,YDIM,ZDIM)]; f9 = fA[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)];//fA[f_mem(9 ,x,y,z,pitch,YDIM,ZDIM)]; f10= fA[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM) ,pitch)];//fA[f_mem(10,x,y,z,pitch,YDIM,ZDIM)]; f11= fA[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM) ,pitch)];//fA[f_mem(11,x,y,z,pitch,YDIM,ZDIM)]; f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM),pitch)];//fA[f_mem(12,x,y,z,pitch,YDIM,ZDIM)]; f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];//fA[f_mem(13,x,y,z,pitch,YDIM,ZDIM)]; f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)];//fA[f_mem(14,x,y,z,pitch,YDIM,ZDIM)]; f15= fA[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch)];//fA[f_mem(15,x,y,z,pitch,YDIM,ZDIM)]; f16= fA[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];//fA[f_mem(16,x,y,z,pitch,YDIM,ZDIM)]; f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch)];//fA[f_mem(17,x,y,z,pitch,YDIM,ZDIM)]; f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];//fA[f_mem(18,x,y,z,pitch,YDIM,ZDIM)]; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; // fB[j+pitch*YDIM*ZDIM*1 ] = f1 ; // fB[j+pitch*YDIM*ZDIM*2 ] = f2 ; // fB[j+pitch*YDIM*ZDIM*3 ] = f3 ; // fB[j+pitch*YDIM*ZDIM*4 ] = f4 ; // fB[j+pitch*YDIM*ZDIM*5 ] = f5 ; // fB[j+pitch*YDIM*ZDIM*6 ] = f6 ; // fB[j+pitch*YDIM*ZDIM*7 ] = f7 ; // fB[j+pitch*YDIM*ZDIM*8 ] = f8 ; // fB[j+pitch*YDIM*ZDIM*9 ] = f9 ; // fB[j+pitch*YDIM*ZDIM*10] = f10; // fB[j+pitch*YDIM*ZDIM*11] = f11; // fB[j+pitch*YDIM*ZDIM*12] = f12; // fB[j+pitch*YDIM*ZDIM*13] = f13; // fB[j+pitch*YDIM*ZDIM*14] = f14; // fB[j+pitch*YDIM*ZDIM*15] = f15; // fB[j+pitch*YDIM*ZDIM*16] = f16; // fB[j+pitch*YDIM*ZDIM*17] = f17; // fB[j+pitch*YDIM*ZDIM*18] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fA[j]; f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)]; f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)]; f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)]; f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)]; f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)]; f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)]; f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f10= fA[f_mem(10,x-1,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f12= fA[f_mem(12,x+1,y ,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f15= fA[f_mem(15,x-1,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; f17= fA[f_mem(17,x+1,y ,z+1,pitch)]; f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } // float fInt1,fInt2;//,fDiff; float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; // fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18; // fInt2 = f3+f6+f7+f12+f17; // rho = u+(fInt1+2.0f*fInt2); //D2Q9i // rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float usqr = u*u+v*v+w*w; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } //mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void initialize_single(float *f, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float u,v,w,rho,usqr; rho = 1.f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr); f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } __global__ void initialize(float* f0, float* f1, float* f2, float* f3, float* f4, float* f5, float* f6, float* f7, float* f8, float* f9, float* f10, float* f11, float* f12, float* f13, float* f14, float* f15, float* f16, float* f17, float* f18, size_t pitch)//pitch in elements //__global__ void initialize(void** f0in, void** f1in, // int w, int h, int pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); float u,v,w,rho,feq,usqr; rho = 1.0f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; feq = 1.0f/3.0f*(rho-1.5f*usqr); f0[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f1[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f2[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f3[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f4[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f5[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f6[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f7[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f8[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f9[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f10[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f11[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f12[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f13[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f14[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f15[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f16[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f17[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f18[j] = feq; } int main(int argc, char *argv[]) { // float *f0_h, *f1_h, *f2_h, *f3_h, *f4_h, *f5_h, *f6_h, *f7_h, *f8_h, *f9_h; // float *f10_h, *f11_h, *f12_h, *f13_h, *f14_h, *f15_h, *f16_h, *f17_h, *f18_h; // float *f0_dA, *f1_dA, *f2_dA, *f3_dA, *f4_dA, *f5_dA, *f6_dA, *f7_dA, *f8_dA, *f9_dA; // float *f10_dA, *f11_dA, *f12_dA, *f13_dA, *f14_dA, *f15_dA, *f16_dA, *f17_dA, *f18_dA; // float *f0_dB, *f1_dB, *f2_dB, *f3_dB, *f4_dB, *f5_dB, *f6_dB, *f7_dB, *f8_dB, *f9_dB; // float *f10_dB, *f11_dB, *f12_dB, *f13_dB, *f14_dB, *f15_dB, *f16_dB, *f17_dB, *f18_dB; int *image_d, *image_h; //cudaPitchedPtr f0_d; ofstream output; output.open ("LBM1_out.dat"); size_t memsize, memsize_int; size_t pitch; int i, n, nBlocks; float omega, CharLength; CharLength = XDIM-2.f; omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); cout<<"omega: "<<omega<<endl; cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; cout<<"TMAX: "<<TMAX<<endl; nBlocks = (XDIM/BLOCKSIZEX+XDIM%BLOCKSIZEX)*(YDIM/BLOCKSIZEY+YDIM%BLOCKSIZEY) *(ZDIM/BLOCKSIZEZ+ZDIM%BLOCKSIZEZ); int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ; n = nBlocks*B;//block*dimx*dimy cout<<"nBlocks:"<<nBlocks<<endl; dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); dim3 grid(XDIM/BLOCKSIZEX,YDIM/BLOCKSIZEY,ZDIM/BLOCKSIZEZ); memsize = n*sizeof(float); memsize_int = n*sizeof(int); cudaExtent extent = make_cudaExtent(XDIM*sizeof(float),YDIM,ZDIM); image_h = (int *)malloc(memsize_int); float *fA_h,*fA_d,*fB_d; fA_h = (float *)malloc(memsize*19); cudaMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); cudaMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); cudaMalloc((void **) &image_d, memsize_int); cout<<pitch<<endl; size_t pitch_elements = pitch/sizeof(float); cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); for (i = 0; i < n*19; i++) { fA_h[i] = i; } for (i = 0; i < n; i++) { int x = i%XDIM; int y = (i/XDIM)%YDIM; int z = (i/XDIM)/YDIM; fA_h[i] = 0; image_h[i] = 0; if(x < 1) image_h[i] = 1;//DirichletWest if(x > XDIM-2) image_h[i] = 1;//BB if(y < 1) image_h[i] = 1;//BB if(y > YDIM-2) image_h[i] = 1;//BB if(z < 1) image_h[i] = 1;//DirichletWest if(z > ZDIM-2) image_h[i] = 1;//BB } cudaMemcpy(image_d, image_h, memsize_int, cudaMemcpyHostToDevice); if(true)//texture settings { texRef_f0B.normalized = false; texRef_f1B.normalized = false; texRef_f2B.normalized = false; texRef_f3B.normalized = false; texRef_f4B.normalized = false; texRef_f5B.normalized = false; texRef_f6B.normalized = false; texRef_f7B.normalized = false; texRef_f8B.normalized = false; texRef_f9B.normalized = false; texRef_f10B.normalized = false; texRef_f11B.normalized = false; texRef_f12B.normalized = false; texRef_f13B.normalized = false; texRef_f14B.normalized = false; texRef_f15B.normalized = false; texRef_f16B.normalized = false; texRef_f17B.normalized = false; texRef_f18B.normalized = false; texRef_f0B.filterMode = cudaFilterModePoint; texRef_f1B.filterMode = cudaFilterModePoint; texRef_f2B.filterMode = cudaFilterModePoint; texRef_f3B.filterMode = cudaFilterModePoint; texRef_f4B.filterMode = cudaFilterModePoint; texRef_f5B.filterMode = cudaFilterModePoint; texRef_f6B.filterMode = cudaFilterModePoint; texRef_f7B.filterMode = cudaFilterModePoint; texRef_f8B.filterMode = cudaFilterModePoint; texRef_f9B.filterMode = cudaFilterModePoint; texRef_f10B.filterMode = cudaFilterModePoint; texRef_f11B.filterMode = cudaFilterModePoint; texRef_f12B.filterMode = cudaFilterModePoint; texRef_f13B.filterMode = cudaFilterModePoint; texRef_f14B.filterMode = cudaFilterModePoint; texRef_f15B.filterMode = cudaFilterModePoint; texRef_f16B.filterMode = cudaFilterModePoint; texRef_f17B.filterMode = cudaFilterModePoint; texRef_f18B.filterMode = cudaFilterModePoint; texRef_f0A.normalized = false; texRef_f1A.normalized = false; texRef_f2A.normalized = false; texRef_f3A.normalized = false; texRef_f4A.normalized = false; texRef_f5A.normalized = false; texRef_f6A.normalized = false; texRef_f7A.normalized = false; texRef_f8A.normalized = false; texRef_f9A.normalized = false; texRef_f10A.normalized = false; texRef_f11A.normalized = false; texRef_f12A.normalized = false; texRef_f13A.normalized = false; texRef_f14A.normalized = false; texRef_f15A.normalized = false; texRef_f16A.normalized = false; texRef_f17A.normalized = false; texRef_f18A.normalized = false; texRef_f0A.filterMode = cudaFilterModePoint; texRef_f1A.filterMode = cudaFilterModePoint; texRef_f2A.filterMode = cudaFilterModePoint; texRef_f3A.filterMode = cudaFilterModePoint; texRef_f4A.filterMode = cudaFilterModePoint; texRef_f5A.filterMode = cudaFilterModePoint; texRef_f6A.filterMode = cudaFilterModePoint; texRef_f7A.filterMode = cudaFilterModePoint; texRef_f8A.filterMode = cudaFilterModePoint; texRef_f9A.filterMode = cudaFilterModePoint; texRef_f10A.filterMode = cudaFilterModePoint; texRef_f11A.filterMode = cudaFilterModePoint; texRef_f12A.filterMode = cudaFilterModePoint; texRef_f13A.filterMode = cudaFilterModePoint; texRef_f14A.filterMode = cudaFilterModePoint; texRef_f15A.filterMode = cudaFilterModePoint; texRef_f16A.filterMode = cudaFilterModePoint; texRef_f17A.filterMode = cudaFilterModePoint; texRef_f18A.filterMode = cudaFilterModePoint; } cudaMemcpy2D(fA_d ,pitch,fA_h ,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyHostToDevice); cudaMemcpy2D(fB_d ,pitch,fA_h ,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyHostToDevice); for (i = 0; i < n*19; i++) { fA_h[i] = 0; } if(true)//bind texture { cudaBindTexture2D(0,&texRef_f0A, fA_d ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f0B, fB_d ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); } // initialize<<<grid, threads>>>(f0_dA.ptr, f1_dA.ptr, f2_dA.ptr, f3_dA.ptr, f4_dA.ptr, f5_dA.ptr, f6_dA.ptr, f7_dA.ptr, f8_dA.ptr, f9_dA.ptr, // f10_dA.ptr, f11_dA.ptr, f12_dA.ptr, f13_dA.ptr, f14_dA.ptr, f15_dA.ptr, f16_dA.ptr, f17_dA.ptr, f18_dA.ptr, // XDIM,YDIM,pitch); // initialize<<<grid, threads>>>(f0_dA, f1_dA, f2_dA, f3_dA, f4_dA, f5_dA, f6_dA, f7_dA, f8_dA, f9_dA, // f10_dA, f11_dA, f12_dA, f13_dA, f14_dA, f15_dA, f16_dA, f17_dA, f18_dA, // XDIM,YDIM,pitch_elements); initialize_single<<<grid, threads>>>(fA_d,pitch_elements); // cudaFuncSetCacheConfig(mrt_d_single,cudaFuncCachePreferL1); struct timeval tdr0,tdr1; double restime; cudaDeviceSynchronize(); gettimeofday (&tdr0,NULL); for(int t = 0; t<TMAX; t=t+2){ //for(int t = 0; t<TMAX; t=t+1){ if(METHOD == "SINGLE"){ mrt_d_single<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); mrt_d_single<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); } if(METHOD == "HYB"){ mrt_d_hybAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); mrt_d_hybBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); } if(METHOD == "TEXT"){ mrt_d_textAB<<<grid, threads>>>(fA_d,fB_d,image_d,omega,pitch_elements); mrt_d_textBA<<<grid, threads>>>(fB_d,fA_d,image_d,omega,pitch_elements); } if(METHOD == "SHARED"){ mrt_d_shared<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); mrt_d_shared<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); } // simple_copy<<<grid, threads>>>(fA_d,fB_d,image_d,omega,UMAX,XDIM,YDIM,ZDIM,pitch_elements); // simple_copy<<<grid, threads>>>(fB_d,fA_d,image_d,omega,UMAX,XDIM,YDIM,ZDIM,pitch_elements); if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n"; } cudaDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); cout<<"Time taken for main kernel: "<<restime<<" (" <<double(XDIM*YDIM*ZDIM*double(TMAX/1000000.f))/restime<<"MLUPS)"<<endl; cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl; // copytest<<<grid, threads>>>(f10_dA,test_d,XDIM,YDIM,ZDIM); //copytest<<<grid, threads>>>(test_d); //copytest<<<grid, threads>>>(image_d); cudaUnbindTexture(texRef_f0A); cudaUnbindTexture(texRef_f1A); cudaUnbindTexture(texRef_f2A); cudaUnbindTexture(texRef_f3A); cudaUnbindTexture(texRef_f4A); cudaUnbindTexture(texRef_f5A); cudaUnbindTexture(texRef_f6A); cudaUnbindTexture(texRef_f7A); cudaUnbindTexture(texRef_f8A); cudaUnbindTexture(texRef_f9A); cudaUnbindTexture(texRef_f10A); cudaUnbindTexture(texRef_f11A); cudaUnbindTexture(texRef_f12A); cudaUnbindTexture(texRef_f13A); cudaUnbindTexture(texRef_f14A); cudaUnbindTexture(texRef_f15A); cudaUnbindTexture(texRef_f16A); cudaUnbindTexture(texRef_f17A); cudaUnbindTexture(texRef_f18A); cudaUnbindTexture(texRef_f0B); cudaUnbindTexture(texRef_f1B); cudaUnbindTexture(texRef_f2B); cudaUnbindTexture(texRef_f3B); cudaUnbindTexture(texRef_f4B); cudaUnbindTexture(texRef_f5B); cudaUnbindTexture(texRef_f6B); cudaUnbindTexture(texRef_f7B); cudaUnbindTexture(texRef_f8B); cudaUnbindTexture(texRef_f9B); cudaUnbindTexture(texRef_f10B); cudaUnbindTexture(texRef_f11B); cudaUnbindTexture(texRef_f12B); cudaUnbindTexture(texRef_f13B); cudaUnbindTexture(texRef_f14B); cudaUnbindTexture(texRef_f15B); cudaUnbindTexture(texRef_f16B); cudaUnbindTexture(texRef_f17B); cudaUnbindTexture(texRef_f18B); // cudaMemcpy2D(f0_h,XDIM*sizeof(float) , f0_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f1_h,XDIM*sizeof(float) , f1_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f2_h,XDIM*sizeof(float) , f2_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f3_h,XDIM*sizeof(float) , f3_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f4_h,XDIM*sizeof(float) , f4_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f5_h,XDIM*sizeof(float) , f5_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f6_h,XDIM*sizeof(float) , f6_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f7_h,XDIM*sizeof(float) , f7_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f8_h,XDIM*sizeof(float) , f8_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f9_h,XDIM*sizeof(float) , f9_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f10_h,XDIM*sizeof(float),f10_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f11_h,XDIM*sizeof(float),f11_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f12_h,XDIM*sizeof(float),f12_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f13_h,XDIM*sizeof(float),f13_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f14_h,XDIM*sizeof(float),f14_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f15_h,XDIM*sizeof(float),f15_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f16_h,XDIM*sizeof(float),f16_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f17_h,XDIM*sizeof(float),f17_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f18_h,XDIM*sizeof(float),f18_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(fA_h,XDIM*sizeof(float),fA_d,pitch,XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyDeviceToHost); // cout<<"f1_h is "<<f1_h[0]<<endl; //cudaMemcpy(f0_h, f0_d.ptr, memsize, cudaMemcpyDeviceToHost); cudaMemcpy(image_h, image_d, memsize_int, cudaMemcpyDeviceToHost); // cout<<image_h[0]<<endl; // cout<<"test_d: "<<test_h[0]<<endl; // for(i = 0; i<n; i++){ // cout<<f0_h[i]<<","; // } output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n"; int row = 0; int col = 0; int dep = 0; i = 0; float rho, u, v, w; int j; for(dep = 0; dep<ZDIM; dep++){ for(row = 0; row<YDIM; row++){ for(col = 0; col<XDIM; col++){ i = dep*XDIM*YDIM+row*XDIM+col; // rho = 0; rho = fA_h[i]; for(j = 1; j<19; j++) rho+=fA_h[i+XDIM*YDIM*ZDIM*j]; // rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i]+f9_h[i]+ // f10_h[i]+f11_h[i]+f12_h[i]+f13_h[i]+f14_h[i]+f15_h[i]+f16_h[i]+f17_h[i]+f18_h[i]; u = fA_h[i+XDIM*YDIM*ZDIM*1]-fA_h[i+XDIM*YDIM*ZDIM*3]+fA_h[i+XDIM*YDIM*ZDIM*5]-fA_h[i+XDIM*YDIM*ZDIM*6]- fA_h[i+XDIM*YDIM*ZDIM*7]+fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*10]-fA_h[i+XDIM*YDIM*ZDIM*12] +fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*17]; v = fA_h[i+XDIM*YDIM*ZDIM*2]-fA_h[i+XDIM*YDIM*ZDIM*4]+fA_h[i+XDIM*YDIM*ZDIM*5]+fA_h[i+XDIM*YDIM*ZDIM*6]-fA_h[i+XDIM*YDIM*ZDIM*7]-fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*11]-fA_h[i+XDIM*YDIM*ZDIM*13]+fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*18]; w = fA_h[i+XDIM*YDIM*ZDIM*9]+fA_h[i+XDIM*YDIM*ZDIM*10]+fA_h[i+XDIM*YDIM*ZDIM*11]+fA_h[i+XDIM*YDIM*ZDIM*12]+fA_h[i+XDIM*YDIM*ZDIM*13]-fA_h[i+XDIM*YDIM*ZDIM*14]-fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*17]-fA_h[i+XDIM*YDIM*ZDIM*18]; output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl; // output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<fA_h[i+XDIM*YDIM*ZDIM*1]<<","<<rho<<endl; } } } output.close(); cudaFree(image_d); // cudaFree(f0_dA); // cudaFree(f1_dA); // cudaFree(f2_dA); // cudaFree(f3_dA); // cudaFree(f4_dA); // cudaFree(f5_dA); // cudaFree(f6_dA); // cudaFree(f7_dA); // cudaFree(f8_dA); // cudaFree(f9_dA); // cudaFree(f10_dA); // cudaFree(f11_dA); // cudaFree(f12_dA); // cudaFree(f13_dA); // cudaFree(f14_dA); // cudaFree(f15_dA); // cudaFree(f16_dA); // cudaFree(f17_dA); // cudaFree(f18_dA); // cudaFree(f0_dB); // cudaFree(f1_dB); // cudaFree(f2_dB); // cudaFree(f3_dB); // cudaFree(f4_dB); // cudaFree(f5_dB); // cudaFree(f6_dB); // cudaFree(f7_dB); // cudaFree(f8_dB); // cudaFree(f9_dB); // cudaFree(f10_dB); // cudaFree(f11_dB); // cudaFree(f12_dB); // cudaFree(f13_dB); // cudaFree(f14_dB); // cudaFree(f15_dB); // cudaFree(f16_dB); // cudaFree(f17_dB); // cudaFree(f18_dB); cudaFree(fA_d); cudaFree(fB_d); return(0); }
de20b8aa3147151ea8e5057745ee4495ea9ac9f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * \file magnetic_update_tests.cu * \author Robert 'Bob' Caddy ([email protected]) * \brief Tests for the magnetic update code * */ // STL Includes #include <iostream> #include <numeric> #include <string> #include <vector> // External Includes #include <gtest/gtest.h> // Include GoogleTest and related libraries/headers // Local Includes #include "../mhd/magnetic_update.h" #include "../utils/cuda_utilities.h" #include "../utils/testing_utilities.h" #ifdef MHD // ============================================================================= /*! * \brief Test fixture for tMHDUpdateMagneticField3D test suite * */ class tMHDUpdateMagneticField3D : public ::testing::Test { public: /*! * \brief Initialize and allocate all the various required variables and * arrays * */ tMHDUpdateMagneticField3D() : n_cells(nx * ny * nz), sourceGrid(n_cells * (grid_enum::num_fields)), destinationGrid(n_cells * (grid_enum::num_fields), -999.), ctElectricFields(n_cells * 3), fiducialData(n_cells * (grid_enum::num_fields), -999.), dimGrid((n_cells + TPB - 1) / TPB, 1, 1), dimBlock(TPB, 1, 1) { // Allocate device arrays CudaSafeCall(hipMalloc(&dev_sourceGrid, sourceGrid.size() * sizeof(double))); CudaSafeCall(hipMalloc(&dev_destinationGrid, destinationGrid.size() * sizeof(double))); CudaSafeCall(hipMalloc(&dev_ctElectricFields, ctElectricFields.size() * sizeof(double))); // Populate the grids with values where vector.at(i) = double(i). The // values chosen aren't that important, just that every cell has a unique // value std::iota(std::begin(sourceGrid), std::end(sourceGrid), 0.); std::iota(std::begin(ctElectricFields), std::end(ctElectricFields), sourceGrid.back() + 1); } ~tMHDUpdateMagneticField3D() = default; protected: // Initialize the test grid and other state variables size_t const nx = 3, ny = nx, nz = nx; size_t const n_cells; Real const dt = 3.2, dx = 2.5, dy = dx, dz = dx; // Launch Parameters dim3 const dimGrid; // How many blocks in the grid dim3 const dimBlock; // How many threads per block // Make sure the vector is large enough that the locations where the // magnetic field would be in the real grid are filled std::vector<double> sourceGrid; std::vector<double> destinationGrid; std::vector<double> ctElectricFields; std::vector<double> fiducialData; // device pointers double *dev_sourceGrid, *dev_destinationGrid, *dev_ctElectricFields, *dev_fiducialData; /*! * \brief Launch the kernel and check results * */ void runTest() { // Copy values to GPU CudaSafeCall( hipMemcpy(dev_sourceGrid, sourceGrid.data(), sourceGrid.size() * sizeof(Real), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(dev_destinationGrid, destinationGrid.data(), destinationGrid.size() * sizeof(Real), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(dev_ctElectricFields, ctElectricFields.data(), ctElectricFields.size() * sizeof(Real), hipMemcpyHostToDevice)); // Call the kernel to test hipLaunchKernelGGL(mhd::Update_Magnetic_Field_3D, dimGrid, dimBlock, 0, 0, dev_sourceGrid, dev_destinationGrid, dev_ctElectricFields, nx, ny, nz, n_cells, dt, dx, dy, dz); CudaCheckError(); // Copy test data back CudaSafeCall(hipMemcpy(destinationGrid.data(), dev_destinationGrid, destinationGrid.size() * sizeof(Real), hipMemcpyDeviceToHost)); hipDeviceSynchronize(); // Check the results for (size_t i = 0; i < fiducialData.size(); i++) { int xid, yid, zid; cuda_utilities::compute3DIndices(i, nx, ny, xid, yid, zid); testingUtilities::checkResults(fiducialData.at(i), destinationGrid.at(i), "value at i = " + std::to_string(i) + ", xid = " + std::to_string(xid) + ", yid = " + std::to_string(yid) + ", zid = " + std::to_string(zid)); } } }; // ============================================================================= // ============================================================================= TEST_F(tMHDUpdateMagneticField3D, CorrectInputExpectCorrectOutput) { // Fiducial values fiducialData.at(148) = 155.68000000000001; fiducialData.at(175) = 164.75999999999999; fiducialData.at(202) = 204.56; // Launch kernel and check results runTest(); } // ============================================================================= #endif // MHD
de20b8aa3147151ea8e5057745ee4495ea9ac9f6.cu
/*! * \file magnetic_update_tests.cu * \author Robert 'Bob' Caddy ([email protected]) * \brief Tests for the magnetic update code * */ // STL Includes #include <iostream> #include <numeric> #include <string> #include <vector> // External Includes #include <gtest/gtest.h> // Include GoogleTest and related libraries/headers // Local Includes #include "../mhd/magnetic_update.h" #include "../utils/cuda_utilities.h" #include "../utils/testing_utilities.h" #ifdef MHD // ============================================================================= /*! * \brief Test fixture for tMHDUpdateMagneticField3D test suite * */ class tMHDUpdateMagneticField3D : public ::testing::Test { public: /*! * \brief Initialize and allocate all the various required variables and * arrays * */ tMHDUpdateMagneticField3D() : n_cells(nx * ny * nz), sourceGrid(n_cells * (grid_enum::num_fields)), destinationGrid(n_cells * (grid_enum::num_fields), -999.), ctElectricFields(n_cells * 3), fiducialData(n_cells * (grid_enum::num_fields), -999.), dimGrid((n_cells + TPB - 1) / TPB, 1, 1), dimBlock(TPB, 1, 1) { // Allocate device arrays CudaSafeCall(cudaMalloc(&dev_sourceGrid, sourceGrid.size() * sizeof(double))); CudaSafeCall(cudaMalloc(&dev_destinationGrid, destinationGrid.size() * sizeof(double))); CudaSafeCall(cudaMalloc(&dev_ctElectricFields, ctElectricFields.size() * sizeof(double))); // Populate the grids with values where vector.at(i) = double(i). The // values chosen aren't that important, just that every cell has a unique // value std::iota(std::begin(sourceGrid), std::end(sourceGrid), 0.); std::iota(std::begin(ctElectricFields), std::end(ctElectricFields), sourceGrid.back() + 1); } ~tMHDUpdateMagneticField3D() = default; protected: // Initialize the test grid and other state variables size_t const nx = 3, ny = nx, nz = nx; size_t const n_cells; Real const dt = 3.2, dx = 2.5, dy = dx, dz = dx; // Launch Parameters dim3 const dimGrid; // How many blocks in the grid dim3 const dimBlock; // How many threads per block // Make sure the vector is large enough that the locations where the // magnetic field would be in the real grid are filled std::vector<double> sourceGrid; std::vector<double> destinationGrid; std::vector<double> ctElectricFields; std::vector<double> fiducialData; // device pointers double *dev_sourceGrid, *dev_destinationGrid, *dev_ctElectricFields, *dev_fiducialData; /*! * \brief Launch the kernel and check results * */ void runTest() { // Copy values to GPU CudaSafeCall( cudaMemcpy(dev_sourceGrid, sourceGrid.data(), sourceGrid.size() * sizeof(Real), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(dev_destinationGrid, destinationGrid.data(), destinationGrid.size() * sizeof(Real), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(dev_ctElectricFields, ctElectricFields.data(), ctElectricFields.size() * sizeof(Real), cudaMemcpyHostToDevice)); // Call the kernel to test hipLaunchKernelGGL(mhd::Update_Magnetic_Field_3D, dimGrid, dimBlock, 0, 0, dev_sourceGrid, dev_destinationGrid, dev_ctElectricFields, nx, ny, nz, n_cells, dt, dx, dy, dz); CudaCheckError(); // Copy test data back CudaSafeCall(cudaMemcpy(destinationGrid.data(), dev_destinationGrid, destinationGrid.size() * sizeof(Real), cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); // Check the results for (size_t i = 0; i < fiducialData.size(); i++) { int xid, yid, zid; cuda_utilities::compute3DIndices(i, nx, ny, xid, yid, zid); testingUtilities::checkResults(fiducialData.at(i), destinationGrid.at(i), "value at i = " + std::to_string(i) + ", xid = " + std::to_string(xid) + ", yid = " + std::to_string(yid) + ", zid = " + std::to_string(zid)); } } }; // ============================================================================= // ============================================================================= TEST_F(tMHDUpdateMagneticField3D, CorrectInputExpectCorrectOutput) { // Fiducial values fiducialData.at(148) = 155.68000000000001; fiducialData.at(175) = 164.75999999999999; fiducialData.at(202) = 204.56; // Launch kernel and check results runTest(); } // ============================================================================= #endif // MHD
2194bd2a4f9a2e0e75a67589574e9a3a8432211d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/Dispatch.h> #include <ATen/AccumulateType.h> #include <ATen/OpMathType.h> #include <ATen/hip/DeviceUtils.cuh> #include <ATen/native/ForeachUtils.h> #include <ATen/native/hip/block_reduce.cuh> #include <ATen/native/hip/ForeachFunctors.cuh> #include <ATen/native/hip/MultiTensorApply.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_foreach_norm_native.h> #include <ATen/ops/zeros.h> #include <ATen/ops/empty.h> #endif namespace at::native { template<typename T, int NormType, int depth=1, int r_args_depth=1, int res_arg_index=0> struct LpNormFunctor { static_assert(NormType == 1 || NormType == 2, "foreach_norm supports only L1 and L2 norm"); using opmath_t = typename at::opmath_type<T>; __device__ __forceinline__ void operator() ( int chunk_size, TensorListMetadata<depth>& tl, opmath_t* output_per_tensor, const int max_chunks_per_tensor ) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.numel_for_tensor[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; __shared__ opmath_t s_vals[512]; opmath_t vals[kILP]; T r_x[kILP]; for (int i = 0; i < kILP; i++) { vals[i] = opmath_t(0); r_x[i] = T(0); } if (n % kILP == 0 && (chunk_size & kILP) == 0 && is_aligned(x)) { for (int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0, i_start); #pragma unroll for (int ii = 0; ii < kILP; ii++) { opmath_t next = static_cast<opmath_t>(r_x[ii]); vals[ii] += NormType == 1 ? ::abs(next) : next * next; } } } else { for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for (int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if (i < n && i < chunk_size) { opmath_t next = static_cast<opmath_t>(x[i]); vals[ii] += NormType == 1 ? ::abs(next) : next * next; } } } } auto val = opmath_t(0); for (int i = 0; i < kILP; i++) { val += vals[i]; } auto final = at::native::cuda_utils::BlockReduceSum(val, s_vals); if (threadIdx.x == 0) { output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final; } } }; template<typename T, int NormType, typename opmath_t = at::opmath_type<T>> __global__ void lpnorm_cleanup( const opmath_t* output_per_tensor, T* ret_per_tensor, int max_chunks_per_tensor) { __shared__ opmath_t vals[512]; const opmath_t* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor; opmath_t val = 0; for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) { val += output_this_tensor[i]; } opmath_t final = at::native::cuda_utils::BlockReduceSum<opmath_t>(val, vals); if(threadIdx.x == 0) { ret_per_tensor[blockIdx.x] = NormType == 1 ? final : ::sqrt(final); } } // note(mkozuki): Why excluding Int and Complex from fast path // - Int: at::norm does not support. // - Complex: __shfl_down_sync does not support complex and foreach does not support functions whose inputs dtypes and output dtype are different. std::vector<Tensor> foreach_tensor_norm_cuda(TensorList tensors, const Scalar& ord) { double p; if (ord.isIntegral(false)) { p = ord.to<int64_t>(); } else if (ord.isFloatingPoint()) { p = ord.to<double>(); } else { AT_ERROR("foreach_tensor_norm_cuda expects ord to be integer or float"); } check_foreach_api_restrictions(tensors); const bool has_int_or_complex = std::any_of(tensors.begin(), tensors.end(), [](const auto & t) { const auto scalar_type = t.scalar_type(); return at::isIntegralType(scalar_type, /*includeBool*/true) || at::isComplexType(scalar_type); }); if (!can_use_fast_route(tensors) || has_int_or_complex || !(p == static_cast<double>(1) || p == static_cast<double>(2))) { return foreach_tensor_norm_slow(tensors, ord); } const int ntensors = tensors.size(); int max_chunks_per_tensor = -1; for (int t = 0; t < ntensors; t++) { int max_chunks_this_tensor = (tensors[t].numel() + kChunkSize - 1) / kChunkSize; if(max_chunks_this_tensor > max_chunks_per_tensor) { max_chunks_per_tensor = max_chunks_this_tensor; } } const auto options = tensors[0].options(); auto output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, options.dtype(toOpMathType(tensors[0].scalar_type()))); auto ret_per_tensor = at::empty({ntensors}, options); auto tensor_lists = std::vector<std::vector<Tensor>>{tensors.vec()}; if (p == static_cast<double>(1)) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<1>( tensor_lists, LpNormFunctor<scalar_t, 1>(), output_per_tensor.mutable_data_ptr<opmath_t>(), max_chunks_per_tensor); C10_HIP_KERNEL_LAUNCH_CHECK(); const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(output_per_tensor)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( lpnorm_cleanup<scalar_t, 1>), dim3(ntensors), dim3(512), 0, stream, output_per_tensor.const_data_ptr<opmath_t>(), ret_per_tensor.mutable_data_ptr<scalar_t>(), max_chunks_per_tensor); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } else if (p == static_cast<double>(2)) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<1>( tensor_lists, LpNormFunctor<scalar_t, 2>(), output_per_tensor.mutable_data_ptr<opmath_t>(), max_chunks_per_tensor); C10_HIP_KERNEL_LAUNCH_CHECK(); const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(output_per_tensor)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( lpnorm_cleanup<scalar_t, 2>), dim3(ntensors), dim3(512), 0, stream, output_per_tensor.const_data_ptr<opmath_t>(), ret_per_tensor.mutable_data_ptr<scalar_t>(), max_chunks_per_tensor); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } else { AT_ERROR("foreach_tensor_norm_cuda fast path got unexpected ord value: ", p); } std::vector<Tensor> result; result.reserve(ntensors); for (const auto& i : c10::irange(ntensors)) { result.emplace_back(ret_per_tensor[i]); } return result; } } // namespace at::native
2194bd2a4f9a2e0e75a67589574e9a3a8432211d.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/Dispatch.h> #include <ATen/AccumulateType.h> #include <ATen/OpMathType.h> #include <ATen/cuda/DeviceUtils.cuh> #include <ATen/native/ForeachUtils.h> #include <ATen/native/cuda/block_reduce.cuh> #include <ATen/native/cuda/ForeachFunctors.cuh> #include <ATen/native/cuda/MultiTensorApply.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_foreach_norm_native.h> #include <ATen/ops/zeros.h> #include <ATen/ops/empty.h> #endif namespace at::native { template<typename T, int NormType, int depth=1, int r_args_depth=1, int res_arg_index=0> struct LpNormFunctor { static_assert(NormType == 1 || NormType == 2, "foreach_norm supports only L1 and L2 norm"); using opmath_t = typename at::opmath_type<T>; __device__ __forceinline__ void operator() ( int chunk_size, TensorListMetadata<depth>& tl, opmath_t* output_per_tensor, const int max_chunks_per_tensor ) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.numel_for_tensor[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; __shared__ opmath_t s_vals[512]; opmath_t vals[kILP]; T r_x[kILP]; for (int i = 0; i < kILP; i++) { vals[i] = opmath_t(0); r_x[i] = T(0); } if (n % kILP == 0 && (chunk_size & kILP) == 0 && is_aligned(x)) { for (int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0, i_start); #pragma unroll for (int ii = 0; ii < kILP; ii++) { opmath_t next = static_cast<opmath_t>(r_x[ii]); vals[ii] += NormType == 1 ? ::abs(next) : next * next; } } } else { for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for (int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if (i < n && i < chunk_size) { opmath_t next = static_cast<opmath_t>(x[i]); vals[ii] += NormType == 1 ? ::abs(next) : next * next; } } } } auto val = opmath_t(0); for (int i = 0; i < kILP; i++) { val += vals[i]; } auto final = at::native::cuda_utils::BlockReduceSum(val, s_vals); if (threadIdx.x == 0) { output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final; } } }; template<typename T, int NormType, typename opmath_t = at::opmath_type<T>> __global__ void lpnorm_cleanup( const opmath_t* output_per_tensor, T* ret_per_tensor, int max_chunks_per_tensor) { __shared__ opmath_t vals[512]; const opmath_t* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor; opmath_t val = 0; for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) { val += output_this_tensor[i]; } opmath_t final = at::native::cuda_utils::BlockReduceSum<opmath_t>(val, vals); if(threadIdx.x == 0) { ret_per_tensor[blockIdx.x] = NormType == 1 ? final : ::sqrt(final); } } // note(mkozuki): Why excluding Int and Complex from fast path // - Int: at::norm does not support. // - Complex: __shfl_down_sync does not support complex and foreach does not support functions whose inputs dtypes and output dtype are different. std::vector<Tensor> foreach_tensor_norm_cuda(TensorList tensors, const Scalar& ord) { double p; if (ord.isIntegral(false)) { p = ord.to<int64_t>(); } else if (ord.isFloatingPoint()) { p = ord.to<double>(); } else { AT_ERROR("foreach_tensor_norm_cuda expects ord to be integer or float"); } check_foreach_api_restrictions(tensors); const bool has_int_or_complex = std::any_of(tensors.begin(), tensors.end(), [](const auto & t) { const auto scalar_type = t.scalar_type(); return at::isIntegralType(scalar_type, /*includeBool*/true) || at::isComplexType(scalar_type); }); if (!can_use_fast_route(tensors) || has_int_or_complex || !(p == static_cast<double>(1) || p == static_cast<double>(2))) { return foreach_tensor_norm_slow(tensors, ord); } const int ntensors = tensors.size(); int max_chunks_per_tensor = -1; for (int t = 0; t < ntensors; t++) { int max_chunks_this_tensor = (tensors[t].numel() + kChunkSize - 1) / kChunkSize; if(max_chunks_this_tensor > max_chunks_per_tensor) { max_chunks_per_tensor = max_chunks_this_tensor; } } const auto options = tensors[0].options(); auto output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, options.dtype(toOpMathType(tensors[0].scalar_type()))); auto ret_per_tensor = at::empty({ntensors}, options); auto tensor_lists = std::vector<std::vector<Tensor>>{tensors.vec()}; if (p == static_cast<double>(1)) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<1>( tensor_lists, LpNormFunctor<scalar_t, 1>(), output_per_tensor.mutable_data_ptr<opmath_t>(), max_chunks_per_tensor); C10_CUDA_KERNEL_LAUNCH_CHECK(); const at::cuda::OptionalCUDAGuard device_guard(device_of(output_per_tensor)); auto stream = at::cuda::getCurrentCUDAStream(); lpnorm_cleanup<scalar_t, 1><<<ntensors, 512, 0, stream>>>( output_per_tensor.const_data_ptr<opmath_t>(), ret_per_tensor.mutable_data_ptr<scalar_t>(), max_chunks_per_tensor); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } else if (p == static_cast<double>(2)) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<1>( tensor_lists, LpNormFunctor<scalar_t, 2>(), output_per_tensor.mutable_data_ptr<opmath_t>(), max_chunks_per_tensor); C10_CUDA_KERNEL_LAUNCH_CHECK(); const at::cuda::OptionalCUDAGuard device_guard(device_of(output_per_tensor)); auto stream = at::cuda::getCurrentCUDAStream(); lpnorm_cleanup<scalar_t, 2><<<ntensors, 512, 0, stream>>>( output_per_tensor.const_data_ptr<opmath_t>(), ret_per_tensor.mutable_data_ptr<scalar_t>(), max_chunks_per_tensor); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } else { AT_ERROR("foreach_tensor_norm_cuda fast path got unexpected ord value: ", p); } std::vector<Tensor> result; result.reserve(ntensors); for (const auto& i : c10::irange(ntensors)) { result.emplace_back(ret_per_tensor[i]); } return result; } } // namespace at::native
18b4eef0fc260ae74af0325657e780092163b286.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include <ctype.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <iostream> // CUDA stuff: #include "hip/hip_runtime.h" #include "device_launch_parameters.h" // OpenCV stuff (note: C++ not C): #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> using namespace cv; // Convenience function for checking CUDA runtime API results can be wrapped // around any runtime API call. Source: // https://github.com/parallel-forall/code-samples inline hipError_t checkCuda(hipError_t result) { if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); // We should be free()ing CPU+GPU memory here, but we're relying on the OS // to do it for us. hipDeviceReset(); assert(result == hipSuccess); } return result; } #define EDGE 255 #define NOEDGE 0 hipError_t launch_helper(uchar *CPU_InputArray, uchar *CPU_OutputArray, float *GPURuntimes); long NumThreads; // Total number of threads working in parallel //int ThParam[MAXTHREADS]; // Thread parameters ... //double RotAngle; // rotation angle //void* (*RotateFunc)(void *arg); // Function pointer to rotate the image (multi-threaded) //int nframes; //int BOX_SIZE; //int version; int M; // number of rows in image int N; // number of columns in image int TotalSize; int TotalSize_2; int nStreams = 4; int levels = 8; int Thresh; int BOX_SIZE; // ThreadsPerBlock == BOX_SIZE*BOX_SIZE bool show_images; // whether we should pop up the I/O images or not uchar *CPU_InputArray; uchar *CPU_OutputArray; // Sobel kernels: float H[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; float V[3][3] = { { -1, -2, -1 }, { 0, 0, 0 }, { 1, 2, 1 } }; __device__ double Gx[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; __device__ double Gy[3][3] = { { -1, -2, -1 }, { 0, 0, 0 }, { 1, 2, 1 } }; __device__ double Gauss[5][5] = { { 2, 4, 5, 4, 2 }, { 4, 9, 12, 9, 4 }, { 5, 12, 15, 12, 5 }, { 4, 9, 12, 9, 4 }, { 2, 4, 5, 4, 2 } }; // Function that takes BWImage and calculates the Gaussian filtered version // Saves the result in the GaussFilter[][] array __global__ void GaussianFilter(uchar *GPU_i, uchar *GPU_o, int M, int N, int offsetx, int offsety) { //extern __shared__ uchar GPU_i_shared[]; //long tn; // My thread number (ID) is stored here int i,j; double G; // temp to calculate the Gaussian filtered version //__shared__ double Gauss[25]; /*Gauss = { 2, 4, 5, 4, 2 , 4, 9, 12, 9, 4 , 5, 12, 15, 12, 5 , 4, 9, 12, 9, 4 , 2, 4, 5, 4, 2 };*/ //tn = *((int *) tid); // Calculate my Thread ID //tn *= ip.Vpixels/NumThreads; int rt = blockIdx.x * blockDim.x + threadIdx.x+offsetx; // row of image int ct = blockIdx.y * blockDim.y + threadIdx.y+offsety; // column of image //int k; int idx = rt*N+ct; // which pixel in full 1D array int idy; //int idz = threadIdx.x*blockDim.x+threadIdx.y; if (rt<M&&ct<N) { //GPU_i_shared[idz] = GPU_i[idx]; //printf("IDX : %d*%d+%d = %d , IDZ : %d\n",rt,N,ct,idx,idz); //for(row=tn; row<tn+M/NumThreads; row++) //{ //if (rt>=M || ct>=N) return; //__syncthreads(); if((rt>1) && (rt<(M-2))) { //col=2; if(ct<(N-2)&&ct>1){ G=0.0; for(i=-2; i<=2; i++){ for(j=-2; j<=2; j++){ idy = (rt+i)*N+ct+j; //idy = (threadIdx.x+i)*blockDim.x+threadIdx.y+j; G+=GPU_i[idy]*Gauss[i+2][j+2]; //printf("Gauss: %10.4f, GPU_i: %d, G: %10.4f\n",Gauss[i+2][j+2],GPU_i[idy],G); } } GPU_o[idx]=G/159.000; //col++; //printf("GPU_o %d : %d\n",idx,GPU_o[idx]); } } } //else GPU_o[idx] = 0; //} //pthread_exit(NULL); } // Function that calculates the Gradient and Theta for each pixel // Takes the Gauss[][] array and creates the Gradient[][] and Theta[][] arrays __global__ void Sobel(uchar *GPU_i, uchar *Gradient, int M, int N, int offsetx, int offsety) { //long tn; // My thread number (ID) is stored here int i,j; double GX,GY; int rt = blockIdx.x * blockDim.x + threadIdx.x+offsetx; // row of image int ct = blockIdx.y * blockDim.y + threadIdx.y+offsety; // column of image //int k; int idx = rt*N+ct; // which pixel in full 1D array int idy; /*for(row=tn; row<tn+ip.Vpixels/NumThreads; row++) { if((row<1) || (row>(ip.Vpixels-2))) continue; col=1; while(col<=(N-2)){*/ if (rt<M&&ct<N) { if((rt>0) && (rt<(M-1))) { if(ct<=(N-2)&&ct>0){ // calculate Gx and Gy GX=0.0; GY=0.0; for(i=-1; i<=1; i++){ for(j=-1; j<=1; j++){ idy = (rt+i)*N+ct+j; GX+=GPU_i[idy]*Gx[i+1][j+1]; GY+=GPU_i[idy]*Gy[i+1][j+1]; } } //if (rt == 5) printf("G = %f\n",sqrt(GX*GX+GY*GY)); Gradient[idx]=sqrt(GX*GX+GY*GY); //Theta[idx]=atan(GX/GY)*180.0/PI; //col++; } } } else return; //pthread_exit(NULL); } __global__ void Threshold(uchar *GPU_i, uchar *GPU_o, int M, int N, int Thresh, int offsetx, int offsety) { //long tn; // My thread number (ID) is stored here //int row,col; unsigned char PIXVAL; double L,G; int rt = blockIdx.x * blockDim.x + threadIdx.x+offsetx; // row of image int ct = blockIdx.y * blockDim.y + threadIdx.y+offsety; // column of image //int k; int idx = rt*N+ct; // which pixel in full 1D array if (rt>M-1 || ct>N-1) { //GPU_o[idx] = NOEDGE; return; } //for(row=tn; row<tn+ip.Vpixels/NumThreads; row++) //{ //if((row<1) || (row>(M-2))) continue; //col=1; //if(ct>0 && ct<=(N-2)){ L=(double)Thresh; //H=(double)ThreshHi; G=GPU_i[idx]; PIXVAL=NOEDGE; if(G<=L){ // no edge PIXVAL=NOEDGE; } else { // edge PIXVAL=EDGE; } GPU_o[idx]=PIXVAL; //CopyImage[row][col*3+1]=PIXVAL; //CopyImage[row][col*3+2]=PIXVAL; //col++; //} } //pthread_exit(NULL); int main(int argc, char *argv[]) { float GPURuntimes[4]; float GPUexetime = 0.0; // Parse input args: if ( argc != 3 ) { printf("Usage: %s <input output> <image> \n", argv[0]); printf(" where 'show images' is 0 or 1\n"); exit(EXIT_FAILURE); } BOX_SIZE = 16; //show_images = atoi( argv[5] ); Thresh = 0; int j = 1; // where the GPU should copy the data from/to: /*if ((CPU_InputArray == NULL) || (CPU_OutputArray == NULL)) { fprintf(stderr, "OOPS. Can't create I/O array(s) using malloc() ...\n"); exit(EXIT_FAILURE); }*/ // Load input image: Mat image; // see http://docs.opencv.org/modules/core/doc/basic_structures.html#mat image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); if(! image.data ) { fprintf(stderr, "Could not open or find the image.\n"); exit(EXIT_FAILURE); } printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.rows, image.cols, image.dims); // Set up global variables based on image size: M = image.rows; N = image.cols; TotalSize = M * N * sizeof(uchar); // Display the input image: //show_image(image, "input image"); // Copy the image to the input array. We'll duplicate it nframes times. //int i; //for (i=0; i<nframes; i++) { checkCuda( hipHostMalloc( (void**)&CPU_InputArray, TotalSize ) ); memcpy(CPU_InputArray, image.data, TotalSize); // always the same image // Allocate the output while we're at it: checkCuda( hipHostMalloc( (void**)&CPU_OutputArray, TotalSize ) ); //} for (Thresh=0;Thresh<=128;Thresh+=8) { // Run it: checkCuda( launch_helper(CPU_InputArray, CPU_OutputArray, GPURuntimes) ); printf("-----------------------------------------------------------------\n"); printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \n Total=%5.2f ms\n", GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]); printf("-----------------------------------------------------------------\n"); GPUexetime += GPURuntimes[2]; // Display the (last) output image: Mat result = Mat(M, N, CV_8UC1, CPU_OutputArray); //show_image(result, "output image"); // and save it to disk: string output_filename = argv[2]; //printf("i : %d\n",i); char n0, n1; if (j>9) { n0 = '1'; n1 = (j-10)+'0'; output_filename.insert(output_filename.end()-4,n0); output_filename.insert(output_filename.end()-4,n1); } else { n0 = j+'0'; output_filename.insert(output_filename.end()-4,n0); } if (!imwrite(output_filename, result)) { fprintf(stderr, "couldn't write output to disk!\n"); exit(EXIT_FAILURE); } printf("Saved image '%s', size = %dx%d (dims = %d).\n", output_filename.c_str(), result.rows, result.cols, result.dims); j++; } // Clean up memory: hipHostFree(CPU_InputArray); hipHostFree(CPU_OutputArray); //free(CPU_InputArray); //free(CPU_OutputArray); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete // traces. Don't call it before you're done using the pinned memory! checkCuda( hipDeviceReset() ); // Done. exit(EXIT_SUCCESS); } // Helper function for launching the CUDA kernel (including memcpy, etc.): hipError_t launch_helper(uchar *CPU_InputArray, uchar *CPU_OutputArray, float *Runtimes) { hipEvent_t time1, time2, time3, time4; // pointers to GPU data arrays: uchar *GPU_idata; uchar *GPU_odata; uchar *GaussImage; // uchar *Gradient; /*if ((GPU_idata == NULL) || (GPU_odata == NULL)) { fprintf(stderr, "OOPS. Can't create GPU I/O array(s) using malloc() ...\n"); return(hipErrorUnknown); // could do hipErrorMemoryAllocation, but we're // not really here due to a CUDA error }*/ // Number of blocks is ceil(M/threadsPerBlock), same for every block: dim3 threadsPerBlock; dim3 numBlocks; dim3 sharedBlocks; int shared_mem_size; dim3 streamSize; TotalSize_2 = (M/levels+4)*N*sizeof(uchar); threadsPerBlock = dim3(BOX_SIZE,BOX_SIZE); numBlocks = dim3(ceil((float)M / threadsPerBlock.x),ceil((float)N / threadsPerBlock.y)); sharedBlocks = dim3(ceil((float)numBlocks.x/levels),ceil((float)numBlocks.y/nStreams)); shared_mem_size = threadsPerBlock.x*threadsPerBlock.y; printf("NumThreads/Block: %d, NumBlocks: %d, %d, Shared Blocks: %d, %d\n",threadsPerBlock.x*threadsPerBlock.y,numBlocks.x,numBlocks.y,sharedBlocks.x,sharedBlocks.y); hipStream_t stream[nStreams+1]; //checkCuda( hipEventCreate(&startEvent) ); //checkCuda( hipEventCreate(&stopEvent) ); for (int i = 0; i < nStreams+1; ++i) { checkCuda( hipStreamCreate(&stream[i]) ); } hipEventCreate(&time1); hipEventCreate(&time2); hipEventCreate(&time3); hipEventCreate(&time4); //printf("TotalSize = %d\n",TotalSize); //printf("TotalSize_2 = %d\n",TotalSize_2); // Loop over frames: // Allocate GPU buffer for input and output: checkCuda( hipMalloc((void**)&GPU_idata, TotalSize) ); checkCuda( hipMalloc((void**)&GPU_odata, TotalSize) ); checkCuda( hipMalloc((void**)&GaussImage, TotalSize) ); // Copy this frame to the GPU: hipEventRecord(time1, 0); int offsetx, offsety; for (int i = 0; i < levels+1; i++) { if (i<levels) { if (i<levels-1) { //printf("\nCurrently on level: %d, Pinned memory offset: %d\n",i,TotalSize/levels*i); checkCuda( hipMemcpyAsync(&GPU_idata[TotalSize/levels*i], &CPU_InputArray[TotalSize/levels*i], TotalSize_2, hipMemcpyHostToDevice, stream[0]) ); } else if (i==levels-1) { checkCuda( hipMemcpyAsync(&GPU_idata[TotalSize/levels*i], &CPU_InputArray[TotalSize/levels*i], TotalSize/levels, hipMemcpyHostToDevice, stream[0]) ); } hipEventRecord(time2,0); // Launch kernel: offsetx = threadsPerBlock.x*sharedBlocks.x*i; for(int j = 0; j<nStreams; j++) { offsety = j*sharedBlocks.y*threadsPerBlock.y; hipLaunchKernelGGL(( GaussianFilter), dim3(sharedBlocks), dim3(threadsPerBlock), 0 , stream[j+1], GPU_idata, GPU_odata, M, N, offsetx, offsety); checkCuda( hipGetLastError() ); } } if (i>0) { offsetx = threadsPerBlock.x*sharedBlocks.x*(i-1); for(int j = 0; j<nStreams; j++) { offsety = j*sharedBlocks.y*threadsPerBlock.y; hipLaunchKernelGGL(( Sobel), dim3(sharedBlocks), dim3(threadsPerBlock), 0 , stream[j+1], GPU_odata, GaussImage, M, N, offsetx, offsety); checkCuda( hipGetLastError() ); } for(int j = 0; j<nStreams; j++) { offsety = j*sharedBlocks.y*threadsPerBlock.y; hipLaunchKernelGGL(( Threshold), dim3(sharedBlocks), dim3(threadsPerBlock), 0 , stream[j+1], GaussImage, GPU_odata, M, N, Thresh, offsetx, offsety); checkCuda( hipGetLastError() ); } hipEventRecord(time3, 0); // Copy result back to CPU: //checkCuda( hipMemcpyAsync(CPU_OutputArray, GPU_odata, TotalSize, // hipMemcpyDeviceToHost, stream[0]) ); checkCuda( hipMemcpyAsync(&CPU_OutputArray[TotalSize/levels*(i-1)], &GPU_odata[TotalSize/levels*(i-1)], TotalSize/levels, hipMemcpyDeviceToHost, stream[0]) ); hipEventRecord(time4, 0); } } // hipDeviceSynchronize waits for all preceding tasks to finish, and returns // an error if any of them failed: checkCuda( hipDeviceSynchronize() ); hipEventSynchronize(time1); hipEventSynchronize(time2); hipEventSynchronize(time3); hipEventSynchronize(time4); float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime; hipEventElapsedTime(&totalTime, time1, time4); hipEventElapsedTime(&tfrCPUtoGPU, time1, time2); hipEventElapsedTime(&kernelExecutionTime, time2, time3); hipEventElapsedTime(&tfrGPUtoCPU, time3, time4); Runtimes[0] = totalTime; Runtimes[1] = tfrCPUtoGPU; Runtimes[2] = kernelExecutionTime; Runtimes[3] = tfrGPUtoCPU; // Clean up memory: for (int i = 0; i < nStreams+1; ++i) { checkCuda( hipStreamDestroy(stream[i]) ); } hipFree(GPU_odata); hipFree(GPU_idata); hipFree(GaussImage); //free(GPU_odata); //free(GPU_idata); hipEventDestroy(time1); hipEventDestroy(time2); hipEventDestroy(time3); hipEventDestroy(time4); // Done. return hipSuccess; }
18b4eef0fc260ae74af0325657e780092163b286.cu
#include <stdio.h> #include <stdint.h> #include <ctype.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <iostream> // CUDA stuff: #include "cuda_runtime.h" #include "device_launch_parameters.h" // OpenCV stuff (note: C++ not C): #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> using namespace cv; // Convenience function for checking CUDA runtime API results can be wrapped // around any runtime API call. Source: // https://github.com/parallel-forall/code-samples inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); // We should be free()ing CPU+GPU memory here, but we're relying on the OS // to do it for us. cudaDeviceReset(); assert(result == cudaSuccess); } return result; } #define EDGE 255 #define NOEDGE 0 cudaError_t launch_helper(uchar *CPU_InputArray, uchar *CPU_OutputArray, float *GPURuntimes); long NumThreads; // Total number of threads working in parallel //int ThParam[MAXTHREADS]; // Thread parameters ... //double RotAngle; // rotation angle //void* (*RotateFunc)(void *arg); // Function pointer to rotate the image (multi-threaded) //int nframes; //int BOX_SIZE; //int version; int M; // number of rows in image int N; // number of columns in image int TotalSize; int TotalSize_2; int nStreams = 4; int levels = 8; int Thresh; int BOX_SIZE; // ThreadsPerBlock == BOX_SIZE*BOX_SIZE bool show_images; // whether we should pop up the I/O images or not uchar *CPU_InputArray; uchar *CPU_OutputArray; // Sobel kernels: float H[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; float V[3][3] = { { -1, -2, -1 }, { 0, 0, 0 }, { 1, 2, 1 } }; __device__ double Gx[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; __device__ double Gy[3][3] = { { -1, -2, -1 }, { 0, 0, 0 }, { 1, 2, 1 } }; __device__ double Gauss[5][5] = { { 2, 4, 5, 4, 2 }, { 4, 9, 12, 9, 4 }, { 5, 12, 15, 12, 5 }, { 4, 9, 12, 9, 4 }, { 2, 4, 5, 4, 2 } }; // Function that takes BWImage and calculates the Gaussian filtered version // Saves the result in the GaussFilter[][] array __global__ void GaussianFilter(uchar *GPU_i, uchar *GPU_o, int M, int N, int offsetx, int offsety) { //extern __shared__ uchar GPU_i_shared[]; //long tn; // My thread number (ID) is stored here int i,j; double G; // temp to calculate the Gaussian filtered version //__shared__ double Gauss[25]; /*Gauss = { 2, 4, 5, 4, 2 , 4, 9, 12, 9, 4 , 5, 12, 15, 12, 5 , 4, 9, 12, 9, 4 , 2, 4, 5, 4, 2 };*/ //tn = *((int *) tid); // Calculate my Thread ID //tn *= ip.Vpixels/NumThreads; int rt = blockIdx.x * blockDim.x + threadIdx.x+offsetx; // row of image int ct = blockIdx.y * blockDim.y + threadIdx.y+offsety; // column of image //int k; int idx = rt*N+ct; // which pixel in full 1D array int idy; //int idz = threadIdx.x*blockDim.x+threadIdx.y; if (rt<M&&ct<N) { //GPU_i_shared[idz] = GPU_i[idx]; //printf("IDX : %d*%d+%d = %d , IDZ : %d\n",rt,N,ct,idx,idz); //for(row=tn; row<tn+M/NumThreads; row++) //{ //if (rt>=M || ct>=N) return; //__syncthreads(); if((rt>1) && (rt<(M-2))) { //col=2; if(ct<(N-2)&&ct>1){ G=0.0; for(i=-2; i<=2; i++){ for(j=-2; j<=2; j++){ idy = (rt+i)*N+ct+j; //idy = (threadIdx.x+i)*blockDim.x+threadIdx.y+j; G+=GPU_i[idy]*Gauss[i+2][j+2]; //printf("Gauss: %10.4f, GPU_i: %d, G: %10.4f\n",Gauss[i+2][j+2],GPU_i[idy],G); } } GPU_o[idx]=G/159.000; //col++; //printf("GPU_o %d : %d\n",idx,GPU_o[idx]); } } } //else GPU_o[idx] = 0; //} //pthread_exit(NULL); } // Function that calculates the Gradient and Theta for each pixel // Takes the Gauss[][] array and creates the Gradient[][] and Theta[][] arrays __global__ void Sobel(uchar *GPU_i, uchar *Gradient, int M, int N, int offsetx, int offsety) { //long tn; // My thread number (ID) is stored here int i,j; double GX,GY; int rt = blockIdx.x * blockDim.x + threadIdx.x+offsetx; // row of image int ct = blockIdx.y * blockDim.y + threadIdx.y+offsety; // column of image //int k; int idx = rt*N+ct; // which pixel in full 1D array int idy; /*for(row=tn; row<tn+ip.Vpixels/NumThreads; row++) { if((row<1) || (row>(ip.Vpixels-2))) continue; col=1; while(col<=(N-2)){*/ if (rt<M&&ct<N) { if((rt>0) && (rt<(M-1))) { if(ct<=(N-2)&&ct>0){ // calculate Gx and Gy GX=0.0; GY=0.0; for(i=-1; i<=1; i++){ for(j=-1; j<=1; j++){ idy = (rt+i)*N+ct+j; GX+=GPU_i[idy]*Gx[i+1][j+1]; GY+=GPU_i[idy]*Gy[i+1][j+1]; } } //if (rt == 5) printf("G = %f\n",sqrt(GX*GX+GY*GY)); Gradient[idx]=sqrt(GX*GX+GY*GY); //Theta[idx]=atan(GX/GY)*180.0/PI; //col++; } } } else return; //pthread_exit(NULL); } __global__ void Threshold(uchar *GPU_i, uchar *GPU_o, int M, int N, int Thresh, int offsetx, int offsety) { //long tn; // My thread number (ID) is stored here //int row,col; unsigned char PIXVAL; double L,G; int rt = blockIdx.x * blockDim.x + threadIdx.x+offsetx; // row of image int ct = blockIdx.y * blockDim.y + threadIdx.y+offsety; // column of image //int k; int idx = rt*N+ct; // which pixel in full 1D array if (rt>M-1 || ct>N-1) { //GPU_o[idx] = NOEDGE; return; } //for(row=tn; row<tn+ip.Vpixels/NumThreads; row++) //{ //if((row<1) || (row>(M-2))) continue; //col=1; //if(ct>0 && ct<=(N-2)){ L=(double)Thresh; //H=(double)ThreshHi; G=GPU_i[idx]; PIXVAL=NOEDGE; if(G<=L){ // no edge PIXVAL=NOEDGE; } else { // edge PIXVAL=EDGE; } GPU_o[idx]=PIXVAL; //CopyImage[row][col*3+1]=PIXVAL; //CopyImage[row][col*3+2]=PIXVAL; //col++; //} } //pthread_exit(NULL); int main(int argc, char *argv[]) { float GPURuntimes[4]; float GPUexetime = 0.0; // Parse input args: if ( argc != 3 ) { printf("Usage: %s <input output> <image> \n", argv[0]); printf(" where 'show images' is 0 or 1\n"); exit(EXIT_FAILURE); } BOX_SIZE = 16; //show_images = atoi( argv[5] ); Thresh = 0; int j = 1; // where the GPU should copy the data from/to: /*if ((CPU_InputArray == NULL) || (CPU_OutputArray == NULL)) { fprintf(stderr, "OOPS. Can't create I/O array(s) using malloc() ...\n"); exit(EXIT_FAILURE); }*/ // Load input image: Mat image; // see http://docs.opencv.org/modules/core/doc/basic_structures.html#mat image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); if(! image.data ) { fprintf(stderr, "Could not open or find the image.\n"); exit(EXIT_FAILURE); } printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.rows, image.cols, image.dims); // Set up global variables based on image size: M = image.rows; N = image.cols; TotalSize = M * N * sizeof(uchar); // Display the input image: //show_image(image, "input image"); // Copy the image to the input array. We'll duplicate it nframes times. //int i; //for (i=0; i<nframes; i++) { checkCuda( cudaMallocHost( (void**)&CPU_InputArray, TotalSize ) ); memcpy(CPU_InputArray, image.data, TotalSize); // always the same image // Allocate the output while we're at it: checkCuda( cudaMallocHost( (void**)&CPU_OutputArray, TotalSize ) ); //} for (Thresh=0;Thresh<=128;Thresh+=8) { // Run it: checkCuda( launch_helper(CPU_InputArray, CPU_OutputArray, GPURuntimes) ); printf("-----------------------------------------------------------------\n"); printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \n Total=%5.2f ms\n", GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]); printf("-----------------------------------------------------------------\n"); GPUexetime += GPURuntimes[2]; // Display the (last) output image: Mat result = Mat(M, N, CV_8UC1, CPU_OutputArray); //show_image(result, "output image"); // and save it to disk: string output_filename = argv[2]; //printf("i : %d\n",i); char n0, n1; if (j>9) { n0 = '1'; n1 = (j-10)+'0'; output_filename.insert(output_filename.end()-4,n0); output_filename.insert(output_filename.end()-4,n1); } else { n0 = j+'0'; output_filename.insert(output_filename.end()-4,n0); } if (!imwrite(output_filename, result)) { fprintf(stderr, "couldn't write output to disk!\n"); exit(EXIT_FAILURE); } printf("Saved image '%s', size = %dx%d (dims = %d).\n", output_filename.c_str(), result.rows, result.cols, result.dims); j++; } // Clean up memory: cudaFreeHost(CPU_InputArray); cudaFreeHost(CPU_OutputArray); //free(CPU_InputArray); //free(CPU_OutputArray); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete // traces. Don't call it before you're done using the pinned memory! checkCuda( cudaDeviceReset() ); // Done. exit(EXIT_SUCCESS); } // Helper function for launching the CUDA kernel (including memcpy, etc.): cudaError_t launch_helper(uchar *CPU_InputArray, uchar *CPU_OutputArray, float *Runtimes) { cudaEvent_t time1, time2, time3, time4; // pointers to GPU data arrays: uchar *GPU_idata; uchar *GPU_odata; uchar *GaussImage; // uchar *Gradient; /*if ((GPU_idata == NULL) || (GPU_odata == NULL)) { fprintf(stderr, "OOPS. Can't create GPU I/O array(s) using malloc() ...\n"); return(cudaErrorUnknown); // could do cudaErrorMemoryAllocation, but we're // not really here due to a CUDA error }*/ // Number of blocks is ceil(M/threadsPerBlock), same for every block: dim3 threadsPerBlock; dim3 numBlocks; dim3 sharedBlocks; int shared_mem_size; dim3 streamSize; TotalSize_2 = (M/levels+4)*N*sizeof(uchar); threadsPerBlock = dim3(BOX_SIZE,BOX_SIZE); numBlocks = dim3(ceil((float)M / threadsPerBlock.x),ceil((float)N / threadsPerBlock.y)); sharedBlocks = dim3(ceil((float)numBlocks.x/levels),ceil((float)numBlocks.y/nStreams)); shared_mem_size = threadsPerBlock.x*threadsPerBlock.y; printf("NumThreads/Block: %d, NumBlocks: %d, %d, Shared Blocks: %d, %d\n",threadsPerBlock.x*threadsPerBlock.y,numBlocks.x,numBlocks.y,sharedBlocks.x,sharedBlocks.y); cudaStream_t stream[nStreams+1]; //checkCuda( cudaEventCreate(&startEvent) ); //checkCuda( cudaEventCreate(&stopEvent) ); for (int i = 0; i < nStreams+1; ++i) { checkCuda( cudaStreamCreate(&stream[i]) ); } cudaEventCreate(&time1); cudaEventCreate(&time2); cudaEventCreate(&time3); cudaEventCreate(&time4); //printf("TotalSize = %d\n",TotalSize); //printf("TotalSize_2 = %d\n",TotalSize_2); // Loop over frames: // Allocate GPU buffer for input and output: checkCuda( cudaMalloc((void**)&GPU_idata, TotalSize) ); checkCuda( cudaMalloc((void**)&GPU_odata, TotalSize) ); checkCuda( cudaMalloc((void**)&GaussImage, TotalSize) ); // Copy this frame to the GPU: cudaEventRecord(time1, 0); int offsetx, offsety; for (int i = 0; i < levels+1; i++) { if (i<levels) { if (i<levels-1) { //printf("\nCurrently on level: %d, Pinned memory offset: %d\n",i,TotalSize/levels*i); checkCuda( cudaMemcpyAsync(&GPU_idata[TotalSize/levels*i], &CPU_InputArray[TotalSize/levels*i], TotalSize_2, cudaMemcpyHostToDevice, stream[0]) ); } else if (i==levels-1) { checkCuda( cudaMemcpyAsync(&GPU_idata[TotalSize/levels*i], &CPU_InputArray[TotalSize/levels*i], TotalSize/levels, cudaMemcpyHostToDevice, stream[0]) ); } cudaEventRecord(time2,0); // Launch kernel: offsetx = threadsPerBlock.x*sharedBlocks.x*i; for(int j = 0; j<nStreams; j++) { offsety = j*sharedBlocks.y*threadsPerBlock.y; GaussianFilter<<<sharedBlocks, threadsPerBlock, 0 , stream[j+1]>>>(GPU_idata, GPU_odata, M, N, offsetx, offsety); checkCuda( cudaGetLastError() ); } } if (i>0) { offsetx = threadsPerBlock.x*sharedBlocks.x*(i-1); for(int j = 0; j<nStreams; j++) { offsety = j*sharedBlocks.y*threadsPerBlock.y; Sobel<<<sharedBlocks, threadsPerBlock, 0 , stream[j+1]>>>(GPU_odata, GaussImage, M, N, offsetx, offsety); checkCuda( cudaGetLastError() ); } for(int j = 0; j<nStreams; j++) { offsety = j*sharedBlocks.y*threadsPerBlock.y; Threshold<<<sharedBlocks, threadsPerBlock, 0 , stream[j+1]>>>(GaussImage, GPU_odata, M, N, Thresh, offsetx, offsety); checkCuda( cudaGetLastError() ); } cudaEventRecord(time3, 0); // Copy result back to CPU: //checkCuda( cudaMemcpyAsync(CPU_OutputArray, GPU_odata, TotalSize, // cudaMemcpyDeviceToHost, stream[0]) ); checkCuda( cudaMemcpyAsync(&CPU_OutputArray[TotalSize/levels*(i-1)], &GPU_odata[TotalSize/levels*(i-1)], TotalSize/levels, cudaMemcpyDeviceToHost, stream[0]) ); cudaEventRecord(time4, 0); } } // cudaDeviceSynchronize waits for all preceding tasks to finish, and returns // an error if any of them failed: checkCuda( cudaDeviceSynchronize() ); cudaEventSynchronize(time1); cudaEventSynchronize(time2); cudaEventSynchronize(time3); cudaEventSynchronize(time4); float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime; cudaEventElapsedTime(&totalTime, time1, time4); cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2); cudaEventElapsedTime(&kernelExecutionTime, time2, time3); cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4); Runtimes[0] = totalTime; Runtimes[1] = tfrCPUtoGPU; Runtimes[2] = kernelExecutionTime; Runtimes[3] = tfrGPUtoCPU; // Clean up memory: for (int i = 0; i < nStreams+1; ++i) { checkCuda( cudaStreamDestroy(stream[i]) ); } cudaFree(GPU_odata); cudaFree(GPU_idata); cudaFree(GaussImage); //free(GPU_odata); //free(GPU_idata); cudaEventDestroy(time1); cudaEventDestroy(time2); cudaEventDestroy(time3); cudaEventDestroy(time4); // Done. return cudaSuccess; }
5015c908b874cf285f84c03490f69fe51d5a1ca9.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> int main() { int dev; hipDeviceProp_t devprop; hipGetDevice(&dev); hipGetDeviceProperties(&devprop,dev); printf("name = %s\ntotal global mem = %1fM\nshared mem per block = %1fK\nregs per block = %d\nwarp size = %d\nclock rate = %1fGHz\nmax threads per block= %d\ntotal const mem = %1fK\nmultiprocessor count = %d\nmax threads per multiprocessor = %d\nl2 cache size = %1fK\n",devprop.name,devprop.totalGlobalMem/(1024*1024.0),devprop.sharedMemPerBlock/1024.0,devprop.regsPerBlock,devprop.warpSize,devprop.clockRate/(1000000.0),devprop.maxThreadsPerBlock,devprop.totalConstMem/1024.0,devprop.multiProcessorCount,devprop.maxThreadsPerMultiProcessor,devprop.l2CacheSize/1024.0); }
5015c908b874cf285f84c03490f69fe51d5a1ca9.cu
#include<stdio.h> int main() { int dev; cudaDeviceProp devprop; cudaGetDevice(&dev); cudaGetDeviceProperties(&devprop,dev); printf("name = %s\ntotal global mem = %1fM\nshared mem per block = %1fK\nregs per block = %d\nwarp size = %d\nclock rate = %1fGHz\nmax threads per block= %d\ntotal const mem = %1fK\nmultiprocessor count = %d\nmax threads per multiprocessor = %d\nl2 cache size = %1fK\n",devprop.name,devprop.totalGlobalMem/(1024*1024.0),devprop.sharedMemPerBlock/1024.0,devprop.regsPerBlock,devprop.warpSize,devprop.clockRate/(1000000.0),devprop.maxThreadsPerBlock,devprop.totalConstMem/1024.0,devprop.multiProcessorCount,devprop.maxThreadsPerMultiProcessor,devprop.l2CacheSize/1024.0); }
2281f234fda607f33cf853d020a81cd47f561a9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ double dnorm(float x, float mu, float sigma) { float std = (x - mu)/sigma; float e = exp( - 0.5 * std * std); return(e / ( sigma * sqrt(2 * 3.141592653589793))); } __global__ void dnorm_kernel(float *vals, int N, float mu, float sigma, float *out) { // Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd int myblock = blockIdx.x + blockIdx.y * gridDim.x; /* how big is each block within a grid */ int blocksize = blockDim.x * blockDim.y * blockDim.z; /* get thread within a block */ int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; if(idx < N) { float std = (vals[idx] - mu)/sigma; float e = exp( - 0.5 * std * std); out[idx] = e / ( sigma * sqrt(2 * 3.141592653589793)); } }
2281f234fda607f33cf853d020a81cd47f561a9a.cu
#include "includes.h" __device__ double dnorm(float x, float mu, float sigma) { float std = (x - mu)/sigma; float e = exp( - 0.5 * std * std); return(e / ( sigma * sqrt(2 * 3.141592653589793))); } __global__ void dnorm_kernel(float *vals, int N, float mu, float sigma, float *out) { // Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd int myblock = blockIdx.x + blockIdx.y * gridDim.x; /* how big is each block within a grid */ int blocksize = blockDim.x * blockDim.y * blockDim.z; /* get thread within a block */ int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; if(idx < N) { float std = (vals[idx] - mu)/sigma; float e = exp( - 0.5 * std * std); out[idx] = e / ( sigma * sqrt(2 * 3.141592653589793)); } }
0f6a3581b72e6ec8cc313c5e2b59fd09cc716692.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include "gpu_kernels.cuh" __global__ void subtract(int height, int width, float2 *initial_array, float *b, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx].x = initial_array[i*width+idx].x - b[idx]; } __global__ void subtract(int height, int width, uint16_t *a, float *b, float2 *result_array) { int idx = blockDim.x*blockIdx.x+threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx].x = (float)a[i*width+idx] - b[idx]; } __global__ void multiply(int height, int width, float *a, float *b, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) result_array[index] = a[index] * b[index]; } __global__ void divide(int height, int width, float *a, float b, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) result_array[index] = a[index] / b; } // This one should be removed after changing code in background to use above version. __global__ void divide(int height, int width, float *initial_array, float b, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx].x = initial_array[i*width+idx] / b; } __global__ void reciprocal(int height, int width, float2 *input_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) input_array[i*width+idx].x = 1.f/input_array[i*width+idx].x; } // multiply twice since the reciprocal will be passed in. __global__ void mult_divide(int height, int width, float *a, float2 *b, float2 *c, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx].x = a[idx] * b[i*width+idx].x * c[idx].x; } __global__ void subt_divide(int height, int width, float *a, float *b, float c, float *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx] = (a[i*width+idx] - b[idx])*c; } __global__ void phi_multiply(int height, int width, float2 *a, float2 *b, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) { result_array[i*width+idx].x = a[idx].x * b[i*width+idx].x; result_array[i*width+idx].y = a[idx].y * b[i*width+idx].x; } } __global__ void d_log(int height, int width, float *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) result_array[index] = logf(initial_array[index]); } __global__ void magnitude(int height, int width, float2 *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) { result_array[index] = initial_array[index].x * initial_array[index].x + initial_array[index].y * initial_array[index].y; result_array[index] = sqrtf(result_array[index]); } } __global__ void magnitude_db(int height, int width, float2 *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) { result_array[index] = initial_array[index].x * initial_array[index].x + initial_array[index].y * initial_array[index].y; result_array[index] = sqrtf(result_array[index]); result_array[index] = 20.0f * log10f(result_array[index]); } } __global__ void zero_pad(int height, int width, float2 *initial_array, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h*width*2 + w; int index_half = h*width + w; if ((w < ((width*0.5)-1)) && (h < height)) result_array[index] = initial_array[index_half]; } __global__ void scale_IFT(int height, int width, float scaler, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) { result_array[index].x *= scaler; result_array[index].y *= scaler; } } __global__ void scale_IFT_x(int height, int width, float scaler, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) result_array[index].x *= scaler; } __global__ void trim_width(int height, int width, int startPixel, int endPixel, float2 *initial_array, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; int index_trim = h * (endPixel - startPixel + 1) + w; if ((w < (endPixel-startPixel+1)) && (h < height)) result_array[index_trim] = initial_array[index+(startPixel-1)]; } __global__ void trim_height(int height, int width, int startPixel, int endPixel, float2 *initial_array, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; int index_trim = (h - (startPixel - 1)) * width + w; if ((w < width) && (h > (startPixel - 2)) && (h < endPixel)) result_array[index_trim] = initial_array[index]; } __global__ void transpose(int height, int width, float *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; int transposed_index = w * height + h; if ((w < width) && (h < height)) result_array[transposed_index] = initial_array[index]; } __global__ void multiframe_transpose(int height, int width, int simult_frames, float *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; if ((w < width) && (h < height)) for (int j = 0; j < simult_frames; j++) result_array[j*width*height+w*height+h] = initial_array[j*width*height+h*width+w]; } __global__ void repmat(int height, int width, float *input_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 1; i < height; i++) input_array[i*width+idx] = input_array[idx]; } __global__ void repmat(int height, int width, float2 *input_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 1; i < height; i++) input_array[i*width+idx] = input_array[idx]; } __global__ void interp_repmat(int height, int width, float *input_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 1; i < height; i++) input_array[i*width+idx] = (float)i*width+input_array[idx]; } __global__ void linear_interp(int height, int width, float *query_points, float2 *initial_array, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) { int k = floorf(query_points[idx]) - 1; //minus one since calibration file is made for matlab, which is 1 indexed. for (int i = 0; i < height; i++) result_array[i*width+idx].x = initial_array[i*width+k].x + ((initial_array[i*width+k+1].x - initial_array[i*width+k].x) * (query_points[idx] - (k+1))); } } __global__ void d_sum_elements(int height, int width, float *initial_array, float *result_array) { extern __shared__ float cache[]; int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; int cacheIndex = threadIdx.y * blockDim.x + threadIdx.x; float temp = 0; if ((w < width) && (h < height)) temp += initial_array[index]; cache[cacheIndex] = temp; __syncthreads(); int i = (blockDim.x * blockDim.y) / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) result_array[blockIdx.y * gridDim.x + blockIdx.x] = cache[0]; } __global__ void interp_1_1(int height, int width, float *HI, float *DD, float *RI, float2 *initial_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width - 2) { for (int i = 0; i < height; i++) { RI[i*(width-2)+idx] = (3.0f / HI[idx+1]) * (initial_array[i*width+idx+2].x - initial_array[i*width+idx+1].x) - (3.0f / HI[idx]) * (initial_array[i*width+idx+1].x - initial_array[i*width+idx].x); if (idx == 0) RI[i*(width-2)+idx] = RI[i*(width-2)+idx] / DD[idx]; } } } __global__ void interp_2_1(int height, int width, float *RI, float *C) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if ((idx > 0) && (idx < (width - 1))) for (int i = 0; i < height; i++) C[i*width+idx] = RI[i*(width-2)+idx-1]; } __global__ void interp_2_2(int height, int width, float *HI, float *C) { //confirm this section int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx == 0 || idx == (width - 1)) for (int i = 0; i < height; i++) { if (idx == 0) C[i*width+idx] = ((1.0f + (HI[idx] / HI[idx+1])) * C[i*width+idx+1]) - ((HI[idx] / HI[idx+1]) * C[i*width+idx+2]); if (idx == (width - 1)) C[i*width+idx] = ((1.0f + (HI[idx-1] / HI[idx-2])) * C[i*width+idx-1]) - ((HI[idx-1] / HI[idx-2]) * C[i*width+idx-2]); } } __global__ void interp_2_3 (int height, int width, float *HI, float *B, float *C, float *D, float2 *initial_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < (width - 1)) { for (int i = 0; i < height; i++) { D[i*width+idx] = (C[i*width+idx+1] - C[i*width+idx]) / (3.0f * HI[idx]); B[i*width+idx] = ((initial_array[i*width+idx+1].x - initial_array[i*width+idx].x) / HI[idx]) - ((HI[idx] * (C[i*width+idx+1] + 2.0f * C[i*width+idx])) / 3.0f); } } } __global__ void interp_3(int height, int width, float *X, float *B, float *C, float *D,float *query_points, float2 *initial_array, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; int piece = floor(query_points[idx]) - 1; float temp = query_points[idx] - X[piece]; if (idx < width) { //int piece = floor(query_points[idx]) - 1; //float temp = query_points[idx] - X[piece]; for (int i = 0; i < height; i++) { result_array[i*width+idx].x = initial_array[i*width+piece].x + (B[i*width+piece] * temp); result_array[i*width+idx].x = result_array[i*width+idx].x + (C[i*width+piece] * temp * temp); result_array[i*width+idx].x = result_array[i*width+idx].x + (D[i*width+piece] * temp * temp * temp); } } }
0f6a3581b72e6ec8cc313c5e2b59fd09cc716692.cu
#pragma once #include "gpu_kernels.cuh" __global__ void subtract(int height, int width, float2 *initial_array, float *b, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx].x = initial_array[i*width+idx].x - b[idx]; } __global__ void subtract(int height, int width, uint16_t *a, float *b, float2 *result_array) { int idx = blockDim.x*blockIdx.x+threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx].x = (float)a[i*width+idx] - b[idx]; } __global__ void multiply(int height, int width, float *a, float *b, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) result_array[index] = a[index] * b[index]; } __global__ void divide(int height, int width, float *a, float b, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) result_array[index] = a[index] / b; } // This one should be removed after changing code in background to use above version. __global__ void divide(int height, int width, float *initial_array, float b, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx].x = initial_array[i*width+idx] / b; } __global__ void reciprocal(int height, int width, float2 *input_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) input_array[i*width+idx].x = 1.f/input_array[i*width+idx].x; } // multiply twice since the reciprocal will be passed in. __global__ void mult_divide(int height, int width, float *a, float2 *b, float2 *c, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx].x = a[idx] * b[i*width+idx].x * c[idx].x; } __global__ void subt_divide(int height, int width, float *a, float *b, float c, float *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) result_array[i*width+idx] = (a[i*width+idx] - b[idx])*c; } __global__ void phi_multiply(int height, int width, float2 *a, float2 *b, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 0; i < height; i++) { result_array[i*width+idx].x = a[idx].x * b[i*width+idx].x; result_array[i*width+idx].y = a[idx].y * b[i*width+idx].x; } } __global__ void d_log(int height, int width, float *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) result_array[index] = logf(initial_array[index]); } __global__ void magnitude(int height, int width, float2 *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) { result_array[index] = initial_array[index].x * initial_array[index].x + initial_array[index].y * initial_array[index].y; result_array[index] = sqrtf(result_array[index]); } } __global__ void magnitude_db(int height, int width, float2 *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) { result_array[index] = initial_array[index].x * initial_array[index].x + initial_array[index].y * initial_array[index].y; result_array[index] = sqrtf(result_array[index]); result_array[index] = 20.0f * log10f(result_array[index]); } } __global__ void zero_pad(int height, int width, float2 *initial_array, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h*width*2 + w; int index_half = h*width + w; if ((w < ((width*0.5)-1)) && (h < height)) result_array[index] = initial_array[index_half]; } __global__ void scale_IFT(int height, int width, float scaler, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) { result_array[index].x *= scaler; result_array[index].y *= scaler; } } __global__ void scale_IFT_x(int height, int width, float scaler, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; if ((w < width) && (h < height)) result_array[index].x *= scaler; } __global__ void trim_width(int height, int width, int startPixel, int endPixel, float2 *initial_array, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; int index_trim = h * (endPixel - startPixel + 1) + w; if ((w < (endPixel-startPixel+1)) && (h < height)) result_array[index_trim] = initial_array[index+(startPixel-1)]; } __global__ void trim_height(int height, int width, int startPixel, int endPixel, float2 *initial_array, float2 *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; int index_trim = (h - (startPixel - 1)) * width + w; if ((w < width) && (h > (startPixel - 2)) && (h < endPixel)) result_array[index_trim] = initial_array[index]; } __global__ void transpose(int height, int width, float *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; int transposed_index = w * height + h; if ((w < width) && (h < height)) result_array[transposed_index] = initial_array[index]; } __global__ void multiframe_transpose(int height, int width, int simult_frames, float *initial_array, float *result_array) { int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; if ((w < width) && (h < height)) for (int j = 0; j < simult_frames; j++) result_array[j*width*height+w*height+h] = initial_array[j*width*height+h*width+w]; } __global__ void repmat(int height, int width, float *input_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 1; i < height; i++) input_array[i*width+idx] = input_array[idx]; } __global__ void repmat(int height, int width, float2 *input_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 1; i < height; i++) input_array[i*width+idx] = input_array[idx]; } __global__ void interp_repmat(int height, int width, float *input_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) for (int i = 1; i < height; i++) input_array[i*width+idx] = (float)i*width+input_array[idx]; } __global__ void linear_interp(int height, int width, float *query_points, float2 *initial_array, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width) { int k = floorf(query_points[idx]) - 1; //minus one since calibration file is made for matlab, which is 1 indexed. for (int i = 0; i < height; i++) result_array[i*width+idx].x = initial_array[i*width+k].x + ((initial_array[i*width+k+1].x - initial_array[i*width+k].x) * (query_points[idx] - (k+1))); } } __global__ void d_sum_elements(int height, int width, float *initial_array, float *result_array) { extern __shared__ float cache[]; int w = blockIdx.x * blockDim.x + threadIdx.x; int h = blockIdx.y * blockDim.y + threadIdx.y; int index = h * width + w; int cacheIndex = threadIdx.y * blockDim.x + threadIdx.x; float temp = 0; if ((w < width) && (h < height)) temp += initial_array[index]; cache[cacheIndex] = temp; __syncthreads(); int i = (blockDim.x * blockDim.y) / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) result_array[blockIdx.y * gridDim.x + blockIdx.x] = cache[0]; } __global__ void interp_1_1(int height, int width, float *HI, float *DD, float *RI, float2 *initial_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < width - 2) { for (int i = 0; i < height; i++) { RI[i*(width-2)+idx] = (3.0f / HI[idx+1]) * (initial_array[i*width+idx+2].x - initial_array[i*width+idx+1].x) - (3.0f / HI[idx]) * (initial_array[i*width+idx+1].x - initial_array[i*width+idx].x); if (idx == 0) RI[i*(width-2)+idx] = RI[i*(width-2)+idx] / DD[idx]; } } } __global__ void interp_2_1(int height, int width, float *RI, float *C) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if ((idx > 0) && (idx < (width - 1))) for (int i = 0; i < height; i++) C[i*width+idx] = RI[i*(width-2)+idx-1]; } __global__ void interp_2_2(int height, int width, float *HI, float *C) { //confirm this section int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx == 0 || idx == (width - 1)) for (int i = 0; i < height; i++) { if (idx == 0) C[i*width+idx] = ((1.0f + (HI[idx] / HI[idx+1])) * C[i*width+idx+1]) - ((HI[idx] / HI[idx+1]) * C[i*width+idx+2]); if (idx == (width - 1)) C[i*width+idx] = ((1.0f + (HI[idx-1] / HI[idx-2])) * C[i*width+idx-1]) - ((HI[idx-1] / HI[idx-2]) * C[i*width+idx-2]); } } __global__ void interp_2_3 (int height, int width, float *HI, float *B, float *C, float *D, float2 *initial_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < (width - 1)) { for (int i = 0; i < height; i++) { D[i*width+idx] = (C[i*width+idx+1] - C[i*width+idx]) / (3.0f * HI[idx]); B[i*width+idx] = ((initial_array[i*width+idx+1].x - initial_array[i*width+idx].x) / HI[idx]) - ((HI[idx] * (C[i*width+idx+1] + 2.0f * C[i*width+idx])) / 3.0f); } } } __global__ void interp_3(int height, int width, float *X, float *B, float *C, float *D,float *query_points, float2 *initial_array, float2 *result_array) { int idx = blockDim.x*blockIdx.x + threadIdx.x; int piece = floor(query_points[idx]) - 1; float temp = query_points[idx] - X[piece]; if (idx < width) { //int piece = floor(query_points[idx]) - 1; //float temp = query_points[idx] - X[piece]; for (int i = 0; i < height; i++) { result_array[i*width+idx].x = initial_array[i*width+piece].x + (B[i*width+piece] * temp); result_array[i*width+idx].x = result_array[i*width+idx].x + (C[i*width+piece] * temp * temp); result_array[i*width+idx].x = result_array[i*width+idx].x + (D[i*width+piece] * temp * temp * temp); } } }
21a9db6d4229d06932bb78cea749f11db7feaddb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vecAdd(double *a, double *b, double *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] + b[id]; }
21a9db6d4229d06932bb78cea749f11db7feaddb.cu
#include "includes.h" __global__ void vecAdd(double *a, double *b, double *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] + b[id]; }
caed9f61b1922f2181f8d6d1d1cc7ffa1cb28f05.hip
// !!! This is a file automatically generated by hipify!!! #include <cupy/complex.cuh> #include <hipcub/hipcub.hpp> #include <cub/device/device_segmented_reduce.cuh> #include "cupy_cub.h" #include <stdexcept> using namespace cub; // Minimum boilerplate to support complex numbers in sum(), min(), and max(): // - This works only because all data fields in the *Traits struct are not // used in <hipcub/hipcub.hpp>. // - DO NOT USE THIS STUB for supporting CUB sorting!!!!!! // - The Max() and Lowest() below are chosen to comply with NumPy's lexical // ordering; note that std::numeric_limits<T> does not support complex // numbers as in general the comparison is ill defined. template <> struct FpLimits<complex<float>> { static __host__ __device__ __forceinline__ complex<float> Max() { return (complex<float>(FLT_MAX, FLT_MAX)); } static __host__ __device__ __forceinline__ complex<float> Lowest() { return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1))); } }; template <> struct FpLimits<complex<double>> { static __host__ __device__ __forceinline__ complex<double> Max() { return (complex<double>(DBL_MAX, DBL_MAX)); } static __host__ __device__ __forceinline__ complex<double> Lowest() { return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1))); } }; template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {}; template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {}; // end of boilerplate // // **** dtype_dispatcher **** // // This is implemented with reference to the following implementation. // https://github.com/rapidsai/cudf/blob/branch-0.6/cpp/src/utilities/type_dispatcher.hpp // template <class functor_t, typename... Ts> void dtype_dispatcher(int dtype_id, functor_t f, Ts&&... args) { switch (dtype_id) { case CUPY_CUB_INT8: return f.template operator()<char>(std::forward<Ts>(args)...); case CUPY_CUB_INT16: return f.template operator()<short>(std::forward<Ts>(args)...); case CUPY_CUB_INT32: return f.template operator()<int>(std::forward<Ts>(args)...); case CUPY_CUB_INT64: return f.template operator()<long>(std::forward<Ts>(args)...); case CUPY_CUB_UINT8: return f.template operator()<unsigned char>(std::forward<Ts>(args)...); case CUPY_CUB_UINT16: return f.template operator()<unsigned short>(std::forward<Ts>(args)...); case CUPY_CUB_UINT32: return f.template operator()<unsigned int>(std::forward<Ts>(args)...); case CUPY_CUB_UINT64: return f.template operator()<unsigned long>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT32: return f.template operator()<float>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT64: return f.template operator()<double>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX64: return f.template operator()<complex<float>>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX128: return f.template operator()<complex<double>>(std::forward<Ts>(args)...); default: throw std::runtime_error("Unsupported dtype ID"); } } // // **** CUB Sum **** // struct _cub_reduce_sum { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_sum { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t s) { DeviceSegmentedReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB Min **** // struct _cub_reduce_min { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_min { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t s) { DeviceSegmentedReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB Max **** // struct _cub_reduce_max { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_max { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t s) { DeviceSegmentedReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB ArgMin **** // struct _cub_reduce_argmin { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x), static_cast<KeyValuePair<int, T>*>(y), num_items, s); } }; // TODO(leofang): add _cub_segmented_reduce_argmin // // **** CUB ArgMax **** // struct _cub_reduce_argmax { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x), static_cast<KeyValuePair<int, T>*>(y), num_items, s); } }; // TODO(leofang): add _cub_segmented_reduce_argmax // // APIs exposed to CuPy // /* -------- device reduce -------- */ void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t stream, int op, int dtype_id) { switch(op) { case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(), workspace, workspace_size, x, y, num_items, stream); default: throw std::runtime_error("Unsupported operation"); } } size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items, hipStream_t stream, int op, int dtype_id) { size_t workspace_size = 0; cub_device_reduce(NULL, workspace_size, x, y, num_items, stream, op, dtype_id); return workspace_size; } /* -------- device segmented reduce -------- */ void cub_device_segmented_reduce(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t stream, int op, int dtype_id) { switch(op) { case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); default: throw std::runtime_error("Unsupported operation"); } } size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t stream, int op, int dtype_id) { size_t workspace_size = 0; cub_device_segmented_reduce(NULL, workspace_size, x, y, num_segments, offset_start, offset_end, stream, op, dtype_id); return workspace_size; }
caed9f61b1922f2181f8d6d1d1cc7ffa1cb28f05.cu
#include <cupy/complex.cuh> #include <cub/device/device_reduce.cuh> #include <cub/device/device_segmented_reduce.cuh> #include "cupy_cub.h" #include <stdexcept> using namespace cub; // Minimum boilerplate to support complex numbers in sum(), min(), and max(): // - This works only because all data fields in the *Traits struct are not // used in <cub/device/device_reduce.cuh>. // - DO NOT USE THIS STUB for supporting CUB sorting!!!!!! // - The Max() and Lowest() below are chosen to comply with NumPy's lexical // ordering; note that std::numeric_limits<T> does not support complex // numbers as in general the comparison is ill defined. template <> struct FpLimits<complex<float>> { static __host__ __device__ __forceinline__ complex<float> Max() { return (complex<float>(FLT_MAX, FLT_MAX)); } static __host__ __device__ __forceinline__ complex<float> Lowest() { return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1))); } }; template <> struct FpLimits<complex<double>> { static __host__ __device__ __forceinline__ complex<double> Max() { return (complex<double>(DBL_MAX, DBL_MAX)); } static __host__ __device__ __forceinline__ complex<double> Lowest() { return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1))); } }; template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {}; template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {}; // end of boilerplate // // **** dtype_dispatcher **** // // This is implemented with reference to the following implementation. // https://github.com/rapidsai/cudf/blob/branch-0.6/cpp/src/utilities/type_dispatcher.hpp // template <class functor_t, typename... Ts> void dtype_dispatcher(int dtype_id, functor_t f, Ts&&... args) { switch (dtype_id) { case CUPY_CUB_INT8: return f.template operator()<char>(std::forward<Ts>(args)...); case CUPY_CUB_INT16: return f.template operator()<short>(std::forward<Ts>(args)...); case CUPY_CUB_INT32: return f.template operator()<int>(std::forward<Ts>(args)...); case CUPY_CUB_INT64: return f.template operator()<long>(std::forward<Ts>(args)...); case CUPY_CUB_UINT8: return f.template operator()<unsigned char>(std::forward<Ts>(args)...); case CUPY_CUB_UINT16: return f.template operator()<unsigned short>(std::forward<Ts>(args)...); case CUPY_CUB_UINT32: return f.template operator()<unsigned int>(std::forward<Ts>(args)...); case CUPY_CUB_UINT64: return f.template operator()<unsigned long>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT32: return f.template operator()<float>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT64: return f.template operator()<double>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX64: return f.template operator()<complex<float>>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX128: return f.template operator()<complex<double>>(std::forward<Ts>(args)...); default: throw std::runtime_error("Unsupported dtype ID"); } } // // **** CUB Sum **** // struct _cub_reduce_sum { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_sum { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t s) { DeviceSegmentedReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB Min **** // struct _cub_reduce_min { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_min { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t s) { DeviceSegmentedReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB Max **** // struct _cub_reduce_max { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_max { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t s) { DeviceSegmentedReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB ArgMin **** // struct _cub_reduce_argmin { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x), static_cast<KeyValuePair<int, T>*>(y), num_items, s); } }; // TODO(leofang): add _cub_segmented_reduce_argmin // // **** CUB ArgMax **** // struct _cub_reduce_argmax { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x), static_cast<KeyValuePair<int, T>*>(y), num_items, s); } }; // TODO(leofang): add _cub_segmented_reduce_argmax // // APIs exposed to CuPy // /* -------- device reduce -------- */ void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t stream, int op, int dtype_id) { switch(op) { case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(), workspace, workspace_size, x, y, num_items, stream); default: throw std::runtime_error("Unsupported operation"); } } size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items, cudaStream_t stream, int op, int dtype_id) { size_t workspace_size = 0; cub_device_reduce(NULL, workspace_size, x, y, num_items, stream, op, dtype_id); return workspace_size; } /* -------- device segmented reduce -------- */ void cub_device_segmented_reduce(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t stream, int op, int dtype_id) { switch(op) { case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); default: throw std::runtime_error("Unsupported operation"); } } size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t stream, int op, int dtype_id) { size_t workspace_size = 0; cub_device_segmented_reduce(NULL, workspace_size, x, y, num_segments, offset_start, offset_end, stream, op, dtype_id); return workspace_size; }
f0e0a2807a6d8405edb80faf830e671af3a98b4e.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <distributed/distributed_manager.h> #include <distributed/comms_mpi_gpudirect.h> #include <distributed/comms_mpi_hostbuffer_stream.h> #include <distributed/comms_visitors.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <thrust/unique.h> #include <thrust/binary_search.h> #include <thrust/iterator/constant_iterator.h> #include <basic_types.h> #include <error.h> #include <util.h> #include <types.h> #include <iostream> #include <sstream> #include <fstream> #include <assert.h> #include "hip/hip_runtime.h" #include "reorder_partition.h" #include "amgx_types/util.h" #include <algorithm> #include <iostream> //debug only: struct is_my_part : public thrust::unary_function<int, bool> { const int _my_part; is_my_part(int my_part) : _my_part(my_part) { } __host__ __device__ bool operator()(const int part) { return (part == _my_part); } }; using namespace std; namespace amgx { static int insertDiagonals = 1; template <typename index_type> static __device__ __forceinline__ index_type internal_index(index_type i, index_type j, index_type k, index_type nx, index_type ny, index_type nz) { return k * (nx * ny) + j * nx + i; } template <typename index_type> static __device__ __forceinline__ int64_t get_global_offset(index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows) { int rank_id = r * (P * Q) + q * P + p; return ((int64_t) rank_id) * ((int64_t) num_rows); } template <typename index_type> __global__ void poisson7pt_count_row_len(index_type *row_len, index_type nx, index_type ny, index_type nz, index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows) { for (int tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < num_rows ; tidx += blockDim.x * gridDim.x) { /* compute p,q,r from P,Q,R and myid */ int i = tidx % nx; // Position in x direction int j = (( tidx - i) / nx) % ny; // Position in y int k = ( tidx - i - nx * j) / ( nx * ny ); // Position in z int substract = ((i == 0) && (p == 0)); substract += ((i == nx - 1) && (p == P - 1)); substract += ((j == 0) && (q == 0)); substract += ((j == ny - 1) && (q == Q - 1)); substract += ((k == 0) && (r == 0)); substract += ((k == nz - 1) && (r == R - 1)); // Store 7 in position (num_rows+1), such that row_len[num_rows+1] = 0 //substract = (tidx == num_rows+1) ? 7 : substract; row_len[tidx] = 7 - substract; } } template <typename index_type, typename mat_value_type> __global__ void poisson7pt_set_col_values(const index_type *__restrict__ row_offsets, index_type *__restrict__ col_indices, mat_value_type *__restrict__ values, index_type *__restrict__ diag, int64_t *__restrict__ local_to_global, index_type nx, index_type ny, index_type nz, index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows) { for (int row = threadIdx.x + blockIdx.x * blockDim.x; row < num_rows ; row += blockDim.x * gridDim.x) { /* compute p,q,r from P,Q,R and myid */ int i = row % nx; // Position in x direction int j = (( row - i) / nx) % ny; // Position in y int k = ( row - i - nx * j) / ( nx * ny ); // Position in z int halo_offset = num_rows; int pos = row_offsets[row]; // Diagonal element diag[row] = pos; col_indices[pos] = row; values[pos++] = types::util<mat_value_type>::get_one() * 6.; // ---------------------------- // Neighbor at position i-1 // ---------------------------- if (i) { // Has a i-1 neighbor, which is an internal node at position (i-1,j,k) col_indices[pos] = internal_index(i - 1, j, k, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else if (p) { // Has a i-1 neighbor, which is a halo node int halo_index = halo_offset + k * ny + j; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p - 1, q, r, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(nx - 1, j, k, nx, ny, nz); } if (p) { halo_offset += ny * nz; } // ---------------------------- // Neighbor at position i+1 // ---------------------------- if (i < nx - 1) { // Has i+1 neighbor, which is an internal node at position (i+1,j,k) col_indices[pos] = internal_index(i + 1, j, k, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else { if (p < P - 1) { // Has i+1 neighbor, which is a halo node int halo_index = halo_offset + k * ny + j; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p + 1, q, r, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(0, j, k, nx, ny, nz); } } if (p < P - 1) { halo_offset += ny * nz; } // ---------------------------- // Neighbor at position j-1 // ---------------------------- if (j) { // Has a j-1 neighbor, which is an internal node at position (i,j-1,k) col_indices[pos] = internal_index(i, j - 1, k, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else if (q) { // Has a j-1 neighbor, which is a halo node int halo_index = halo_offset + k * nx + i; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p, q - 1, r, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(i, ny - 1, k, nx, ny, nz); } if (q) { halo_offset += nx * nz; } // ---------------------------- // Neighbor at position j+1 // ---------------------------- if (j < ny - 1) { // Has a j+1 neighbor, which is an internal node at position (i,j+1,k) col_indices[pos] = internal_index(i, j + 1, k, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else { if (q < Q - 1) { // Has a j+1 neighbor, which is a halo node int halo_index = halo_offset + k * nx + i; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p, q + 1, r, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(i, 0, k, nx, ny, nz); } } if (q < Q - 1) { halo_offset += nx * nz; } // ---------------------------- // Neighbor at position k-1 // ---------------------------- if (k) { // Has a k-1 neighbor, which is an internal node at position (i,j,k-1) col_indices[pos] = internal_index(i, j, k - 1, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else if (r) { // Has a k-1 neighbor, which is a halo node int halo_index = halo_offset + j * nx + i; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p, q, r - 1, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(i, j, nz - 1, nx, ny, nz); } if (r) { halo_offset += nx * ny; } // ---------------------------- // Neighbor at position k+1 // ---------------------------- if (k < nz - 1) { // Has a k+1 neighbor, which is an internal node at position (i,j,k+1) col_indices[pos] = internal_index(i, j, k + 1, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else { if (r < R - 1) { // Has a k+1 neighbor, which is a halo node int halo_index = halo_offset + j * nx + i; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p, q, r + 1, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(i, j, 0, nx, ny, nz); } } if (r < R - 1) { halo_offset += nx * ny; } } } template <typename mat_value_type> __global__ void set_halo_cols_values(int *row_offsets, int *col_indices, mat_value_type *values, int n, int total_rows, int bsize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < (total_rows - n) ) { int offset = row_offsets[n + tid]; col_indices[offset] = n + tid; #pragma unroll for (int i = 0; i < bsize; i++) { values[offset * bsize + i] = types::util<mat_value_type>::get_one(); // This is arbitrary } tid += gridDim.x * blockDim.x; } } template <typename mat_value_type> __global__ void zero_copy_row_lengths_ids_offsets(int *d_old_row_offsets, int *root_row_offsets, int *d_row_ids, int n, int total_num_halos, mat_value_type *diag) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < n + total_num_halos) { int new_row_id = d_row_ids[tid]; if (tid < n) { int start = d_old_row_offsets[tid]; int row_length = d_old_row_offsets[tid + 1] - start; // zero-copy if (diag != NULL) // will insert the diagonal { row_length++; } root_row_offsets[new_row_id] = row_length; } tid += gridDim.x * blockDim.x; } } template< typename mat_value_type> __global__ void ipc_consolidation_upload_matrix(int num_rows, int *row_ids, const int *old_row_offsets, int *new_row_offsets, const int *h_old_col_indices, int *new_col_indices, const mat_value_type *h_old_values, mat_value_type *new_values, const mat_value_type *h_old_diag, int bsize) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < num_rows) { int new_row = row_ids[row]; int src_base = old_row_offsets[row]; int dst_base = new_row_offsets[new_row]; // Insert the diagonal at the beginning of each row if (h_old_diag != NULL) { new_col_indices[dst_base] = new_row; #pragma unroll for (int j = 0; j < bsize; j++) { new_values[dst_base * bsize + j] = h_old_diag[row * bsize + j]; } // Increment dst_base by one dst_base++; } int end = old_row_offsets[row + 1] - src_base; for (int i = 0; i < end; i++) { int old_col = h_old_col_indices[src_base + i]; int new_col = row_ids[old_col]; new_col_indices[dst_base + i] = new_col; #pragma unroll for (int j = 0; j < bsize; j++) { new_values[ (dst_base + i)*bsize + j ] = h_old_values[ (src_base + i) * bsize + j ]; } } row += gridDim.x * blockDim.x; } } template< typename mat_value_type> __global__ void ipc_consolidation_replace_values(int num_rows, int *row_ids, const int *old_row_offsets, int *new_row_offsets, const mat_value_type *h_old_values, mat_value_type *new_values, const mat_value_type *h_old_diag, int bsize) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < num_rows) { int new_row = row_ids[row]; int src_base = old_row_offsets[row]; int dst_base = new_row_offsets[new_row]; // Insert the diagonal at the beginning of each row if (h_old_diag != NULL) { #pragma unroll for (int j = 0; j < bsize; j++) { new_values[dst_base * bsize + j] = h_old_diag[row * bsize + j]; } // Increment dst_base by one dst_base++; } int end = old_row_offsets[row + 1] - src_base; for (int i = 0; i < end; i++) { #pragma unroll for (int j = 0; j < bsize; j++) { new_values[ (dst_base + i)*bsize + j ] = h_old_values[ (src_base + i) * bsize + j ]; } } row += gridDim.x * blockDim.x; } } __global__ void flag_halo_ids_kernel(INDEX_TYPE *flags, INDEX_TYPE *ids, INDEX_TYPE offset, INDEX_TYPE size, INDEX_TYPE upper) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { flags[ids[idx] - offset] = 1; idx += blockDim.x * gridDim.x; } } __global__ void read_halo_ids_kernel(INDEX_TYPE *flags, INDEX_TYPE *ids, INDEX_TYPE offset, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { ids[idx] = flags[ids[idx] - offset]; idx += blockDim.x * gridDim.x; } } template<class T> __global__ void reorder_vector_values(T *dst, const T *src, const INDEX_TYPE *map, INDEX_TYPE blocksize, INDEX_TYPE num_rows) { int row = blockIdx.x * (blockDim.x / blocksize) + threadIdx.x / blocksize; //vectorised by block size int vec_id = threadIdx.x % blocksize; if (threadIdx.x >= (blockDim.x / blocksize)*blocksize ) { return; } while (row < num_rows) { dst[map[row]*blocksize + vec_id] = src[row * blocksize + vec_id]; row += gridDim.x * (blockDim.x / blocksize); } } template<class T> __global__ void inverse_reorder_vector_values(T *dst, T *src, INDEX_TYPE *map, INDEX_TYPE blocksize, INDEX_TYPE num_rows) { int row = blockIdx.x * (blockDim.x / blocksize) + threadIdx.x / blocksize; int vec_id = threadIdx.x % blocksize; if (threadIdx.x >= (blockDim.x / blocksize)*blocksize ) { return; } while (row < num_rows) { dst[row * blocksize + vec_id] = src[map[row] * blocksize + vec_id]; row += gridDim.x * (blockDim.x / blocksize); } } __global__ void remove_boundary_kernel(INDEX_TYPE *flags, INDEX_TYPE *maps, INDEX_TYPE size) { int element = blockIdx.x * blockDim.x + threadIdx.x; while (element < size) { flags[maps[element]] = 0; //this won't be a problem, because we are overwriting the same thing element += blockDim.x * gridDim.x; } } __global__ void get_unassigned_kernel(INDEX_TYPE *unassigned_flags, INDEX_TYPE *map, INDEX_TYPE *output, INDEX_TYPE part_size, INDEX_TYPE uf_size ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < part_size) { if (map[idx] < uf_size) { if (unassigned_flags[map[idx]] == 0) { unassigned_flags[map[idx]] = 1; output[idx] = 1; } } idx += blockDim.x * gridDim.x; } } __global__ void set_unassigned_kernel(INDEX_TYPE *part_assigned_flags, INDEX_TYPE *part_num, INDEX_TYPE *map, INDEX_TYPE *renum, INDEX_TYPE part_size, INDEX_TYPE max_element, INDEX_TYPE renum_size /*, INDEX_TYPE rank*/) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < part_size) { if (map[idx] < renum_size) { if (part_assigned_flags[idx] == 1) { renum[map[idx]] = max_element + part_num[idx]; } //also update the B2L map map[idx] = renum[map[idx]]; } idx += blockDim.x * gridDim.x; } } __global__ void renumber_b2l_maps(INDEX_TYPE *map, INDEX_TYPE *renum, INDEX_TYPE part_size, INDEX_TYPE renum_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < part_size) { if (map[idx] < renum_size) { //update the B2L map map[idx] = renum[map[idx]]; idx += blockDim.x * gridDim.x; } } } __global__ void calc_inverse_renumbering(INDEX_TYPE *renum, INDEX_TYPE *irenum, INDEX_TYPE max_element) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < max_element) { if (renum[idx] < 0 || renum[idx] >= max_element) { printf("Renumbering error: %d %d\n", renum[idx], max_element); } irenum[renum[idx]] = idx; idx += blockDim.x * gridDim.x; } } __global__ void create_halo_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, int64_t base_index, INDEX_TYPE map_offset, INDEX_TYPE size) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < size) { int idx = node_list[row] - base_index; mapping[idx] = map_offset + row; row += blockDim.x * gridDim.x; } } __global__ void apply_h2l2b_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, int64_t base_index, INDEX_TYPE *b2l_map, INDEX_TYPE size) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < size) { int idx = node_list[row] - base_index; mapping[idx] = b2l_map[row]; row += blockDim.x * gridDim.x; } } template <int coop> __global__ void map_col_indices_and_count_rowlen(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *row_length, INDEX_TYPE *mapping, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal) { extern __shared__ volatile int reduction[]; int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { int valid = 0; for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += coop) //this may look horrible, but I expect low branch divergence, because col indices in a row usually belong to the same partition (or at most one more) { int colIdx = col_indices[idx]; int new_col_idx = mapping[colIdx]; if (new_col_idx >= 0) { valid++; col_indices[idx] = new_col_idx; } else { col_indices[idx] = -1; } } reduction[threadIdx.x] = valid; for (int s = 2; s > 0; s >>= 1) { if (coopIdx < s) { reduction[threadIdx.x] += reduction[threadIdx.x + s]; } __syncthreads(); } if (coopIdx == 0) { row_length[row] = reduction[threadIdx.x] + insert_diagonal; } row += gridDim.x * blockDim.x / coop; } } __global__ void renumber_P_col_indices(INDEX_TYPE *__restrict__ col_indices, const INDEX_TYPE *__restrict__ renum, INDEX_TYPE num_owned_coarse_pts, INDEX_TYPE num_owned_fine_pts) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < num_owned_fine_pts ) { INDEX_TYPE col_id = col_indices[idx]; if (col_id < num_owned_coarse_pts) { col_indices[idx] = renum[col_id]; } idx += blockDim.x * gridDim.x; } } template <int coop, class T> __global__ void reorder_R_matrix(const INDEX_TYPE *old_rows, const INDEX_TYPE *old_cols, const T *old_vals, const INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE num_owned_rows) { int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst_base = row < num_owned_rows ? rows[renumbering[row]] : src_base; for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop) { vals[dst_base * bsize + i] = old_vals[src_base * bsize + i]; } for (int i = coopIdx; i < old_rows[row + 1] - src_base; i += coop) { cols[dst_base + i] = old_cols[src_base + i]; } row += blockDim.x * gridDim.x / coop; } } template <int coop, class T> __global__ void reorder_whole_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal) { int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst_base = rows[renumbering[row]]; if (insert_diagonal) { if (coopIdx == 0) { cols[dst_base] = renumbering[row]; } for (int i = coopIdx; i < bsize; i += coop) { vals[dst_base * bsize + i] = old_vals[(old_rows[num_rows] + row) * bsize + i]; } dst_base++; } for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop) { vals[dst_base * bsize + i] = old_vals[src_base * bsize + i]; } for (int i = coopIdx; i < old_rows[row + 1] - src_base; i += coop) { cols[dst_base + i] = old_cols[src_base + i]; } row += blockDim.x * gridDim.x / coop; } } template <int coop, class T> __global__ void replace_values_matrix(const T *src_vals_h, const T *src_diag_h, const INDEX_TYPE *old_rows, const INDEX_TYPE *rows, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows) { int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst_base = rows[renumbering[row]]; for (int i = coopIdx; i < bsize; i += coop) { vals[dst_base * bsize + i] = src_diag_h[row * bsize + i]; } dst_base++; for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop) { vals[dst_base * bsize + i] = src_vals_h[src_base * bsize + i]; } row += blockDim.x * gridDim.x / coop; } } template <int coop, class T> __global__ void replace_values_matrix(const T *src_vals_h, const INDEX_TYPE *old_rows, const INDEX_TYPE *rows, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows) { int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst_base = rows[renumbering[row]]; for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop) { vals[dst_base * bsize + i] = src_vals_h[src_base * bsize + i]; } row += blockDim.x * gridDim.x / coop; } } //TODO: optimize by vectorizing template <class T> __global__ void reorder_whole_halo_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal, INDEX_TYPE global_offset, INDEX_TYPE local_offset, INDEX_TYPE halo_rows) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst = rows[row]; if (insert_diagonal) { cols[dst] = global_offset + row; for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(old_rows[halo_rows - local_offset] + local_offset + row) * bsize + j]; } dst++; } for (int i = 0; i < old_rows[row + 1] - src_base; i++) { INDEX_TYPE colIdx = old_cols[src_base + i]; if (colIdx >= 0) { cols[dst] = colIdx; for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(src_base + i) * bsize + j]; } dst++; } } row += blockDim.x * gridDim.x; } } __global__ void calc_rowlen_reorder(INDEX_TYPE *row_offsets, INDEX_TYPE *row_len, INDEX_TYPE *map, INDEX_TYPE size, INDEX_TYPE insert_diag) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { row_len[map[idx]] = row_offsets[idx + 1] - row_offsets[idx] + insert_diag; idx += blockDim.x * gridDim.x; } } template < class TConfig > void DistributedManagerBase<TConfig>::remove_boundary(IVector_d &flagArray, IVector_d &B2L_map, int size) { int num_blocks = min(4096, (size + 127) / 128); hipLaunchKernelGGL(( remove_boundary_kernel) , dim3(num_blocks), dim3(128), 0, 0, flagArray.raw(), B2L_map.raw(), size); cudaCheckError(); } template < class TConfig > void DistributedManagerBase<TConfig>::get_unassigned(IVector_d &flagArray, IVector_d &B2L_map, IVector_d &partition_flags, int size, int global_size /*, int rank*/) { int num_blocks = min(4096, (size + 191) / 192); hipLaunchKernelGGL(( get_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, flagArray.raw(), B2L_map.raw(), partition_flags.raw(), size, global_size /*, rank*/); cudaCheckError(); } template < class TConfig > void DistributedManagerBase<TConfig>::set_unassigned(IVector_d &partition_flags, IVector_d &partition_renum, IVector_d &B2L_map, IVector_d &renumbering, int size, int max_element, int global_size /*, int rank*/) { int num_blocks = min(4096, (size + 191) / 192); hipLaunchKernelGGL(( set_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, partition_flags.raw(), partition_renum.raw(), B2L_map.raw(), renumbering.raw(), size, max_element, global_size /*,rank*/); cudaCheckError(); } template <class TConfig > inline void DistributedManagerBase<TConfig>::set_initialized(IVector &row_offsets) { // For P and R sizes the sizes are fixed at creation if(m_fixed_view_size) { return; } if (neighbors.size() > 0) { //distributed: cache num_rows/num_nz for different views _num_rows_interior = _num_interior_nodes; _num_nz_interior = row_offsets[_num_rows_interior]; _num_rows_owned = _num_interior_nodes + _num_boundary_nodes; _num_nz_owned = row_offsets[_num_rows_owned]; _num_rows_full = halo_offsets[neighbors.size()]; if (_num_rows_full >= row_offsets.size()) { _num_nz_full = row_offsets[row_offsets.size() - 1]; } else { _num_nz_full = row_offsets[_num_rows_full]; } _num_rows_all = halo_offsets[halo_offsets.size() - 1]; _num_nz_all = _num_nz_full; } else { _num_rows_interior = _num_interior_nodes; _num_nz_interior = row_offsets[_num_rows_interior]; _num_rows_owned = _num_interior_nodes; _num_nz_owned = row_offsets[_num_rows_owned]; _num_rows_full = _num_rows_owned; _num_nz_full = _num_nz_owned; _num_rows_all = _num_rows_owned; _num_nz_all = _num_nz_owned; } } template <class TConfig > void DistributedManagerBase<TConfig>::createAggregatesRenumbering(IVector_h &renumbering, IVector_h_vector &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings) { createAggRenumbering(renumbering, B2L_maps, size, num_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings); } template <class TConfig > void DistributedManagerBase<TConfig>::createAggregatesRenumbering(IVector_d &renumbering, IVector_d_vector &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings) { createAggRenumbering(renumbering, B2L_maps, size, num_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings); } template <class TConfig > template <class IVector_hd> void DistributedManagerBase<TConfig>::createAggRenumbering(IVector_hd &renumbering, std::vector<IVector_hd> &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings) { if (num_rings != 1) { FatalError("num_rings > 1 not supported in consolidation", AMGX_ERR_NOT_IMPLEMENTED); } //int num_neighbors = this->neighbors.size(); if (num_neighbors == 0) { num_boundary_aggregates = 0; num_interior_aggregates = size; return; } //initial size to size+1 so we have the total size after a scan int global_size = size; renumbering.resize(size + 1); // // Step 1 - in the main matrix, separate interior and boundary nodes (1/0 in flagArray), renumber interior ones with an exclusive scan // IVector_hd flagArray(size + 1); thrust::fill(flagArray.begin(), flagArray.begin() + size + 1, 1); cudaCheckError(); //sets 1 for interior nodes, 0 for boundary node for (int i = 0; i < num_neighbors; i++ ) { int size = B2L_maps[i].size(); remove_boundary(flagArray, B2L_maps[i], size); } //gets the renumbering of interior nodes thrust::exclusive_scan(flagArray.begin(), flagArray.begin() + size + 1, renumbering.begin()); cudaCheckError(); // // Step 2 - Renumber nodes that are in the boundary, stepping through each B2L map, and renumbering ones that have not been renumbered yet // //what is the biggest B2L size INDEX_TYPE max_size = 0; for (int i = 0; i < num_neighbors; i++) { max_size = max_size > B2L_maps[i].size() ? max_size : B2L_maps[i].size(); } //allocate work vectors (should be pretty small) IVector_hd partition_flags(max_size); IVector_hd partition_renum(max_size); //the number of renumbered nodes so far int max_element = renumbering[size]; num_interior_aggregates = max_element; num_boundary_aggregates = size - max_element; renumbering.resize(size); for (int i = 0; i < num_neighbors; i++) { //find nodes that are part of the current boundary and they haven't been renumbered yet thrust::fill(partition_flags.begin(), partition_flags.begin() + max_size, 0); int size = B2L_maps[i].size(); get_unassigned(flagArray, B2L_maps[i], partition_flags, size, global_size/*,0*/); //calculate the local renumbering (within this boundary region) of these nodes thrust::exclusive_scan(partition_flags.begin(), partition_flags.begin() + max_size, partition_renum.begin()); //apply renumbering to the big numbering table set_unassigned(partition_flags, partition_renum, B2L_maps[i], renumbering, size, max_element, global_size/*,0*/); //update the number of renumbered nodes max_element += partition_renum[max_size - 1] + partition_flags[max_size - 1]; } cudaCheckError(); } template <class TConfig> inline DistributedManagerBase<TConfig>::DistributedManagerBase(Matrix<TConfig> &a) : m_fine_level_comms(NULL), A(&a), m_pinned_buffer_size(0), m_pinned_buffer(NULL), _num_interior_nodes(0), _num_boundary_nodes(0), _comms(NULL), has_B2L(false), neighbors(_neighbors), B2L_maps(_B2L_maps), L2H_maps(_L2H_maps), B2L_rings(_B2L_rings), halo_rows_ref_count(0), halo_btl_ref_count(0), halo_ranges(_halo_ranges), halo_ranges_h(_halo_ranges_h), part_offsets(_part_offsets), part_offsets_h(_part_offsets_h), halo_rows(NULL), halo_btl(NULL), m_is_root_partition(false), m_is_glued(false), m_is_fine_level_glued(false), m_is_fine_level_consolidated(false), m_is_fine_level_root_partition(false), m_use_cuda_ipc_consolidation(false), m_fixed_view_size(false) { hipEventCreate(&comm_event); hipStreamCreateWithFlags(&m_int_stream, hipStreamNonBlocking); hipStreamCreateWithFlags(&m_bdy_stream, hipStreamNonBlocking); this->createComms(A->getResources()); int my_id = this->getComms()->get_global_id(); int num_parts = this->getComms()->get_num_partitions(); this->set_global_id(my_id); this->set_num_partitions(num_parts); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::generatePoisson7pt(int nx, int ny, int nz, int P, int Q, int R) { int my_id = this->getComms()->get_global_id(); int p, q, r; if (nx < P || ny < Q || nz < R) { FatalError("(nx < P) or (ny < Q) or (nz < R) not supported\n", AMGX_ERR_NOT_IMPLEMENTED); } /* compute p,q,r from P,Q,R and myid */ p = my_id % P; // Position in x direction q = (( my_id - p) / P) % Q; // Position in y r = ( my_id - p - P * q) / ( P * Q ); // Position in z // Create A.row_indices, A.col_indices, A.values, A.diag int num_rows = nx * ny * nz; int num_nonzeros = num_rows * 7; // Ignoring any boundary, 7 nnz per row int num_substract = 0; if (p == 0) { num_substract += ny * nz; } if (p == P - 1) { num_substract += ny * nz; } if (q == 0) { num_substract += nx * nz; } if (q == Q - 1) { num_substract += nx * nz; } if (r == 0) { num_substract += nx * ny; } if (r == R - 1) { num_substract += nx * ny; } num_nonzeros -= num_substract; int num_halo_nodes = 2 * (ny * nz + nx * nz + nx * ny) - num_substract; this->local_to_global_map.resize(num_halo_nodes); this->A->set_initialized(0); this->A->resize(0, 0, 0, 1, 1, 1); this->A->addProps(CSR); this->A->resize(num_rows, num_rows + num_halo_nodes, num_nonzeros, 1, 1, 1); const int cta_size = 128; const int grid_size = ::min( 4096, (num_rows + cta_size - 1) / cta_size ); hipLaunchKernelGGL(( poisson7pt_count_row_len) , dim3(grid_size), dim3(cta_size), 0, 0, this->A->row_offsets.raw(), nx, ny, nz, p, q, r, P, Q, R, num_rows); thrust::exclusive_scan(this->A->row_offsets.begin(), this->A->row_offsets.end(), this->A->row_offsets.begin()); cudaCheckError(); // Now set nonzeros columns and values // TODO: vectorize this const int grid_size2 = ::min( 4096, (num_rows + cta_size - 1) / cta_size ); hipLaunchKernelGGL(( poisson7pt_set_col_values) , dim3(grid_size2), dim3(cta_size), 0, 0, this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), this->A->diag.raw(), this->local_to_global_map.raw(), nx, ny, nz, p, q, r, P, Q, R, num_rows); cudaCheckError(); // fill parts_offsets_h // All ranks have same number of nodes int num_ranks = P * Q * R; this->part_offsets_h.resize(num_ranks + 1); this->part_offsets_h[0] = (int64_t) 0; for (int i = 1; i < num_ranks + 1; i++) { this->part_offsets_h[i] = this->part_offsets_h[i - 1] + (int64_t) num_rows; } // Device to host copy this->part_offsets = this->part_offsets_h; this->num_rows_global = P * Q * R * nx * ny * nz; // this->A->set_initialized(1); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_SetOffsets( int num_ranks, int num_rows_global, const t_colIndex* partition_offsets) { // fill part offsets internal data structures this->part_offsets_h.resize(num_ranks + 1); for (int i = 0; i <= num_ranks; i++) { this->part_offsets_h[i] = partition_offsets[i]; } // copy to device this->part_offsets = this->part_offsets_h; // set num of global rows this->num_rows_global = num_rows_global; cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> map<t_colIndex, int> DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_LocalToGlobal(int num_rows, I64Vector_h &off_diag_cols) { // sort global column indices thrust::sort(off_diag_cols.begin(), off_diag_cols.end()); // find unique columns and set local <-> global mappings // 1) Removed unneeded vector 2) Create map on host first, upload later (less thrust calls) I64Vector_h local_to_global_h; map<t_colIndex, int> global_to_local; // temporary if (off_diag_cols.size() > 0) { global_to_local[off_diag_cols[0]] = num_rows; local_to_global_h.push_back(off_diag_cols[0]); } for (int i = 1; i < off_diag_cols.size(); i++) { if (off_diag_cols[i] != off_diag_cols[i - 1]) { global_to_local[off_diag_cols[i]] = num_rows + local_to_global_h.size(); local_to_global_h.push_back(off_diag_cols[i]); } } // Upload finished map in one piece this->local_to_global_map.resize(local_to_global_h.size()); thrust::copy(local_to_global_h.begin(), local_to_global_h.end(), this->local_to_global_map.begin()); return global_to_local; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_InitLocalMatrix( IVector_h local_col_indices, int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const mat_value_type *values, const void *diag) { // init local matrix this->A->set_initialized(0); this->A->resize(0, 0, 0, 1, 1, 1); this->A->addProps(CSR); if (diag) { this->A->addProps(DIAG); } this->A->resize(num_rows, num_rows + this->local_to_global_map.size(), num_nonzeros, block_dimx, block_dimy, 1); cudaCheckError(); // set local matrix thrust::copy(row_offsets, row_offsets + num_rows + 1, this->A->row_offsets.begin()); this->A->col_indices = local_col_indices; thrust::copy(values, values + num_nonzeros * block_dimx * block_dimy, this->A->values.begin()); cudaCheckError(); // setup diagonal if (diag) { hipMemcpy(this->A->values.raw() + this->A->diagOffset()*this->A->get_block_size(), diag, sizeof(mat_value_type) * num_rows * block_dimx * block_dimy, hipMemcpyDefault); } else { this->A->computeDiagonal(); } cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrixPartitionVec( int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const int *partition) { // fetch my rank int my_id = this->getComms()->get_global_id(); // setup partition vector IVector_h partitionVec(num_rows_global); if (partition == NULL) { // initialize equal partitioning IVector_h scanPartSize(num_ranks + 1); for (int p = 0; p < num_ranks; p++) { scanPartSize[p] = p * num_rows_global / num_ranks; } scanPartSize[num_ranks] = num_rows_global; int p = 0; for (int i = 0; i < num_rows_global; i++) { if (i >= scanPartSize[p + 1]) { p++; } partitionVec[i] = p; } } else { // use existing partition info for (int i = 0; i < num_rows_global; i++) { partitionVec[i] = partition[i]; } } // compute partition offsets (based on number of elements per partition). Will be modified when calculating partition map. t_colIndex *partition_offsets = (t_colIndex *)calloc(num_ranks + 1, sizeof(t_colIndex)); for (int i = 0; i < num_rows_global; i++) { int pvi = partitionVec[i]; partition_offsets[pvi + 1]++; } thrust::inclusive_scan(partition_offsets, partition_offsets + num_ranks + 1, partition_offsets); loadDistributed_SetOffsets(num_ranks, num_rows_global, partition_offsets); // compute partition map (which tells you how the global elements are mapped into the partitions) t_colIndex *partition_map = (t_colIndex *)calloc(num_rows_global, sizeof(t_colIndex)); for (int i = 0; i < num_rows_global; i++) { int pvi = partitionVec[i]; t_colIndex poi = partition_offsets[pvi]; partition_map[poi] = i; partition_offsets[pvi]++; } free(partition_offsets); // compute the inverse partition map t_colIndex *ipartition_map = (t_colIndex *)calloc(num_rows_global, sizeof(t_colIndex)); for (int i = 0; i < num_rows_global; i++) { ipartition_map[partition_map[i]] = i; } free(partition_map); int h_cidx_allocated = 0; const t_colIndex *h_col_indices_global = (const t_colIndex *)this->getHostPointerForData(col_indices, num_nonzeros * sizeof(t_colIndex), &h_cidx_allocated); // gather all off-diag columns I64Vector_h off_diag_cols; for (int i = 0; i < num_nonzeros; i++) { if (partitionVec[h_col_indices_global[i]] != my_id) { off_diag_cols.push_back(ipartition_map[h_col_indices_global[i]]); } } auto global_to_local = loadDistributed_LocalToGlobal<t_colIndex>(num_rows, off_diag_cols); // set 1, then scan to compute local row indices IVector_h my_indices(num_rows_global); for (int i = 0; i < num_nonzeros; i++) { if (partitionVec[h_col_indices_global[i]] == my_id) // find my local columns and set to 1 { my_indices[ipartition_map[h_col_indices_global[i]]] = 1; } } thrust::exclusive_scan(my_indices.begin(), my_indices.end(), my_indices.begin()); // remap colums to local IVector_h local_col_indices(num_nonzeros); for (int i = 0; i < num_nonzeros; i++) { if (partitionVec[h_col_indices_global[i]] != my_id) { // off-diag local_col_indices[i] = global_to_local[ipartition_map[h_col_indices_global[i]]]; } else { // diag local_col_indices[i] = my_indices[ipartition_map[h_col_indices_global[i]]]; } } free(ipartition_map); loadDistributed_InitLocalMatrix(local_col_indices, num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, values, diag); cudaCheckError(); // don't free possibly allocated pinned buffer, since it could be used later. if it would not - it would be deallocated automatically /*if (h_cidx_allocated) { free((void*)h_col_indices_global); }*/ } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrixPartitionOffsets( int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const t_colIndex *partition_offsets) { // fetch my rank int my_id = this->getComms()->get_global_id(); // sanity check, cheap to perform, and helps prevent harder-to-debug errors later on if (!std::is_sorted(partition_offsets, partition_offsets + num_ranks + 1)) { FatalError("Partition offsets are not sorted.", AMGX_ERR_BAD_PARAMETERS); } loadDistributed_SetOffsets(num_ranks, num_rows_global, partition_offsets); // Create predicate to determine if a column is in the local diagonal block t_colIndex my_first_col = this->part_offsets_h[my_id]; t_colIndex one_past_my_last_col = this->part_offsets_h[my_id + 1]; auto in_local_diagonal_block = [my_first_col, one_past_my_last_col](const t_colIndex col_index) { return col_index >= my_first_col && col_index < one_past_my_last_col; }; int h_cidx_allocated = 0; const t_colIndex *h_col_indices_global = (const t_colIndex *)this->getHostPointerForData(col_indices, num_nonzeros * sizeof(t_colIndex), &h_cidx_allocated); // gather all off-diag columns I64Vector_h off_diag_cols; for (int i = 0; i < num_nonzeros; i++) { if (!in_local_diagonal_block(h_col_indices_global[i])) { off_diag_cols.push_back(h_col_indices_global[i]); } } auto global_to_local = loadDistributed_LocalToGlobal<t_colIndex>(num_rows, off_diag_cols); // set 1, then scan to compute local row indices // "coordinate-shift" columns so they lie in much smaller range of my diagonal indices int diagonal_size = this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]; IVector_h my_indices(diagonal_size); for (int i = 0; i < num_nonzeros; i++) { t_colIndex col_index = h_col_indices_global[i]; if (in_local_diagonal_block(h_col_indices_global[i])) // find my local columns and set to 1 { // olumns that are on *my* diag partition cannot have an index from 0..num_rows_global // instead, part_offsets_h[my_id] <= col_index < part_offsets[my_id+1] col_index -= this->part_offsets_h[my_id]; my_indices[col_index] = 1; } } thrust::exclusive_scan(my_indices.begin(), my_indices.end(), my_indices.begin()); // remap colums to local IVector_h local_col_indices(num_nonzeros); for (int i = 0; i < num_nonzeros; i++) { t_colIndex col_index = h_col_indices_global[i]; if (!in_local_diagonal_block(col_index)) { // off-diag local_col_indices[i] = global_to_local[col_index]; } else { // diag col_index -= this->part_offsets_h[my_id]; local_col_indices[i] = my_indices[col_index]; } } loadDistributed_InitLocalMatrix(local_col_indices, num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, values, diag); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrix( int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const MatrixDistribution &dist) { using PI = MatrixDistribution::PartitionInformation; switch (dist.getPartitionInformationStyle()) { case PI::PartitionVec: loadDistributedMatrixPartitionVec(num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, col_indices, values, num_ranks, num_rows_global, diag, (const int*) dist.getPartitionData()); break; case PI::PartitionOffsets: loadDistributedMatrixPartitionOffsets(num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, col_indices, values, num_ranks, num_rows_global, diag, (const t_colIndex*) dist.getPartitionData()); break; default: FatalError("Unsupported partitioning data format used with loadDistributedMatrix", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::renumberMatrixOneRing(int update_neighbours) { FatalError("Distributed classical AMG not implemented on host", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::renumberMatrixOneRing(int update_neighbours) { // Step 1: Using halo_ranges, flag neighbors and at the same time, flag halo_nodes (flag_halo_nodes_local) int my_id = this->global_id(); int num_parts = this->get_num_partitions(); this->set_base_index(this->part_offsets_h[my_id]); this->set_index_range(this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]); DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; // Create/update list of neighbors if (update_neighbours) { typedef typename TConfig::template setVecPrec<AMGX_vecInt64>::Type i64vec_value_type; typedef Vector<i64vec_value_type> I64Vector; typedef typename Matrix<TConfig>::MVector MVector; std::vector<IVector> halo_row_offsets(this->neighbors.size()); std::vector<I64Vector> halo_global_indices(this->neighbors.size()); std::vector<MVector> halo_values(this->neighbors.size()); prep->create_halo_rows_global_indices(*(this->A), halo_row_offsets, halo_global_indices, halo_values); prep->update_neighbors_list(*(this->A), this->neighbors, this->halo_ranges_h, this->halo_ranges, this->part_offsets_h, this->part_offsets, halo_row_offsets, halo_global_indices); } else { prep->create_neighbors_v2(*(this->A)); } this->getComms()->set_neighbors(this->neighbors.size()); // Create B2L_maps and L2H_maps prep->create_boundary_lists_v3(*(this->A)); // halo_offsets int neighbors = this->A->manager->num_neighbors(); int A_num_rows, offset; this->A->getOffsetAndSizeForView(OWNED, &offset, &A_num_rows); this->halo_offsets.resize(neighbors + 1, 0); this->halo_offsets[0] = A_num_rows; for (int i = 0; i < neighbors; i++) { this->halo_offsets[i + 1] = this->halo_offsets[i] + this->B2L_maps[i].size(); } this->getComms()->exchange_vectors(this->A->manager->B2L_maps, *(this->A), 0); // Initialize B2L_rings int num_neighbors = this->neighbors.size(); this->B2L_rings.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { this->B2L_rings[i].resize(2); this->B2L_rings[i][0] = 0; this->B2L_rings[i][1] = this->B2L_maps[i].size(); } prep->initialize_B2L_maps_offsets(*(this->A), 1); delete prep; //Use the exchanged halo row matrices and the boundary/halo index lists to renumber and consolidate the matrix // Step 5: renumber all owned rows and columns this->reorder_matrix_owned(); // Step 6: renumber local_to_global_map int num_owned_rows = this->A->manager->halo_offsets[0]; int size_one_ring; this->A->getOffsetAndSizeForView(FULL, &offset, &size_one_ring); I64Vector_d global_col_indices(size_one_ring); thrust::sequence(global_col_indices.begin(), global_col_indices.begin() + num_owned_rows, this->base_index() ); cudaCheckError(); global_col_indices.dirtybit = 1; this->exchange_halo(global_col_indices, global_col_indices.tag); thrust::copy(global_col_indices.begin() + num_owned_rows, global_col_indices.begin() + size_one_ring, this->local_to_global_map.begin()); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::renumber_P_R(Matrix_h &P, Matrix_h &R, Matrix_h &A_fine) { FatalError("Distributed classical AMG not implemented on host", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::renumber_P_R(Matrix_d &P, Matrix_d &R, Matrix_d &A_fine) { int cta_size = 256; int num_owned_fine_pts = A_fine.manager->halo_offsets[0]; int num_owned_coarse_pts, offset; // matrix Ac this->A->getOffsetAndSizeForView(OWNED, &offset, &num_owned_coarse_pts); // Renumber the owned col indices of P (not the halo columns ,since P.manager was created assunming some other numbering) int nnz_owned_fine_pts = P.row_offsets[num_owned_fine_pts]; int num_blocks_fine = min(4096, (nnz_owned_fine_pts + cta_size - 1) / cta_size); if (num_blocks_fine > 0) { hipLaunchKernelGGL(( renumber_P_col_indices) , dim3(num_blocks_fine), dim3(cta_size), 0, 0, P.col_indices.raw(), this->renumbering.raw(), num_owned_coarse_pts, nnz_owned_fine_pts); cudaCheckError(); } // Renumber the B2L_maps of P for (int i = 0; i < P.manager->neighbors.size(); i++) { thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), P.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator(this->renumbering.begin(), P.manager->B2L_maps[i].end()), P.manager->B2L_maps[i].begin()); } cudaCheckError(); // Don't renumber the L2H_maps or the halo // Renumber the local_to_global_map of matrix P (since neighbors renumbered their owned rows) // Swap owned rows of R IVector new_row_offsets(R.row_offsets.size()); int insert = 0; // Only renumber the owned rows int num_blocks_owned = min(4096, (num_owned_coarse_pts + cta_size - 1) / cta_size); if (num_blocks_owned > 0) { hipLaunchKernelGGL(( calc_rowlen_reorder) , dim3(num_blocks_owned), dim3(cta_size) , 0, 0, R.row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), num_owned_coarse_pts, insert); cudaCheckError(); } thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + num_owned_coarse_pts + 1, new_row_offsets.begin()); cudaCheckError(); // Copy the row_offsets for halo rows thrust::copy(R.row_offsets.begin() + num_owned_coarse_pts, R.row_offsets.end(), new_row_offsets.begin() + num_owned_coarse_pts); cudaCheckError(); // Reorder the rows of R (no need to reorder the column indices) int new_nnz = new_row_offsets[new_row_offsets.size() - 1]; int halo_offset = new_row_offsets[num_owned_coarse_pts]; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; VVector new_values(new_nnz * R.get_block_size(), types::util< ValueTypeA >::get_zero()); IVector new_col_indices(new_nnz, 0); int num_blocks_total = min(4096, (R.get_num_rows() + cta_size - 1) / cta_size); if (num_blocks_total > 0) { hipLaunchKernelGGL(( reorder_R_matrix <32>) , dim3(num_blocks_total), dim3(512), 0, 0, R.row_offsets.raw(), R.col_indices.raw(), R.values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), R.get_block_size(), R.get_num_rows(), num_owned_coarse_pts); cudaCheckError(); } R.col_indices.swap(new_col_indices); R.row_offsets.swap(new_row_offsets); R.values.swap(new_values); // Renumber the local_to_global_map (since neighbors have changed their owned numbering) if (P.manager->neighbors.size() != 0) { int size_one_ring = P.manager->halo_offsets[P.manager->neighbors.size()]; I64Vector_d global_col_indices(size_one_ring); thrust::sequence(global_col_indices.begin(), global_col_indices.begin() + num_owned_coarse_pts, this->base_index()); cudaCheckError(); global_col_indices.dirtybit = 1; P.manager->exchange_halo(global_col_indices, global_col_indices.tag); thrust::copy(global_col_indices.begin() + num_owned_coarse_pts, global_col_indices.begin() + size_one_ring, P.manager->local_to_global_map.begin()); cudaCheckError(); } DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; prep->initialize_B2L_maps_offsets(P, 1); delete prep; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingB2Lmaps() { // Input: // a matrix with N rows, whose column indices are local indices from 0 to N+M-1, // where M is a number of 1-ring halo vertices // The matrix also contains array "local_to_global_map" of size M, which stores the global index of each halo index // Ex: assuming a column has index N+K, where 0 <= K < M, then it's global id is local_to_global_map[K] // The matrix also contains part_offsets_h and part_offsets array, which stores where each partition begins // Output: // This function creates all the necessary data to to 1-ring exchanges // i.e. list of 1-ring neighbors, B2L_maps for 1-ring, halo_offsets for 1-ring, // Also, the function reorders the halo indices, such that 1-ring indices are in the order // of neighbors, and therefore, exchange_halo doesn't have to be changed (i.e. L2H = identity) // What is does: // Based on the global indices of its halo vertices, count the number of neighbors // For each neighbor, receive the halo indices that will be needed by neighbor // From those, create B2L_maps[0], which contains for all neighbors // This function assumes that: // part_offset is defined // B2L_maps int my_id = this->global_id(); int num_parts = this->get_num_partitions(); this->set_base_index(this->part_offsets_h[my_id]); this->set_index_range(this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]); DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; // This function creates the array neighbors, which contains a list of partitions to which data // needs to be sent and/or received prep->create_neighbors_v2(*(this->A)); // Here change the manager if some partitions have no neighbors this->getComms()->set_neighbors(this->neighbors.size()); prep->create_B2L_one_ring(*(this->A)); delete prep; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingHaloRows() { // Input: // A matrix with 1-ring B2L_maps, 1-ring halo_offsets // Outputs: // A matrix with: 1-ring rows, // 2-ring B2L_maps, // 2-ring halo_offsets // 2-ring neighbors // Implement here: // Look at function create_B2L_from_maps, which calls create_rings, create_halo_btl, create_halo_rows and comms->exchange_matrix_halo DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; prep->create_one_ring_halo_rows(*(this->A)); // I believe this can be removed since we don't use masked SpMV anymore prep->createRowsLists(*(this->A), false); delete prep; // this is not necessary anymore becasue we don't use latency hiding // however in future we might want to get back to this in case we want to use latency hiding //this->reorder_matrix(); } template <class TConfig> inline DistributedManagerBase<TConfig>::DistributedManagerBase( Matrix<TConfig> &a, INDEX_TYPE allocated_halo_depth, INDEX_TYPE num_import_rings, int num_neighbors, const VecInt_t *neighbors_) : m_fine_level_comms(NULL), A(&a), m_pinned_buffer_size(0), m_pinned_buffer(NULL), _num_interior_nodes(0), _num_boundary_nodes(0), _comms(NULL), has_B2L(false), neighbors(_neighbors), halo_rows_ref_count(0), halo_rows(NULL), halo_btl_ref_count(0), halo_btl(NULL), halo_ranges(_halo_ranges), halo_ranges_h(_halo_ranges_h), part_offsets(_part_offsets), part_offsets_h(_part_offsets_h), B2L_maps(_B2L_maps), L2H_maps(_L2H_maps), B2L_rings(_B2L_rings), m_is_root_partition(false), m_is_glued(false), m_is_fine_level_glued(false), m_is_fine_level_consolidated(false), m_is_fine_level_root_partition(false), m_use_cuda_ipc_consolidation(false), m_fixed_view_size(false) { hipStreamCreateWithFlags(&m_int_stream, hipStreamNonBlocking); hipStreamCreateWithFlags(&m_bdy_stream, hipStreamNonBlocking); if (num_import_rings != 1) { FatalError("num_rings > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED); } if (allocated_halo_depth != 1) { FatalError("allocated_halo_depth > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED); } this->set_num_halo_rings(num_import_rings); neighbors.resize(num_neighbors); hipMemcpy(neighbors.raw(), neighbors_, num_neighbors * sizeof(VecInt_t), hipMemcpyDefault); cudaCheckError(); } template <class TConfig> inline void DistributedManagerBase<TConfig>::cacheMaps(const VecInt_t *b2l_maps, const VecInt_t *b2l_ptrs, const VecInt_t *l2h_maps, const VecInt_t *l2h_ptrs) { int num_neighbors = this->neighbors.size(); this->cached_B2L_maps.resize(num_neighbors); this->cached_L2H_maps.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { int size = b2l_ptrs[i + 1] - b2l_ptrs[i]; this->cached_B2L_maps[i].resize(size); int count = 0; for (int j = b2l_ptrs[i]; j < b2l_ptrs[i + 1]; j++) { this->cached_B2L_maps[i][count] = b2l_maps[j]; count++; } size = l2h_ptrs[i + 1] - l2h_ptrs[i]; this->cached_L2H_maps[i].resize(size); count = 0; for (int j = l2h_ptrs[i]; j < l2h_ptrs[i + 1]; j++) { this->cached_L2H_maps[i][count] = l2h_maps[j]; count++; } } } template <class TConfig> inline void DistributedManagerBase<TConfig>::cacheMapsOneRing() { int num_neighbors = this->neighbors.size(); this->cached_B2L_maps.resize(num_neighbors); this->cached_L2H_maps.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { this->cached_B2L_maps[i] = this->B2L_maps[i]; this->cached_L2H_maps[i] = this->L2H_maps[i]; } } template <class TConfig> inline void DistributedManagerBase<TConfig>::cacheMapsOneRing(const VecInt_t **b2l_maps, const VecInt_t *b2l_sizes, const VecInt_t **l2h_maps, const VecInt_t *l2h_sizes) { int num_neighbors = this->neighbors.size(); this->cached_B2L_maps.resize(num_neighbors); this->cached_L2H_maps.resize(num_neighbors); // buffering in the case of GPU data. This shouldn't much affect performance std::vector<VecInt_t *> b2l_buffer, l2h_buffer; std::vector<VecInt_t> b2l_sizes_buffer, l2h_sizes_buffer; b2l_buffer.resize(num_neighbors); l2h_buffer.resize(num_neighbors); b2l_sizes_buffer.resize(num_neighbors); l2h_sizes_buffer.resize(num_neighbors); hipMemcpy(&(b2l_sizes_buffer[0]), b2l_sizes, sizeof(VecInt_t) * num_neighbors, hipMemcpyDefault); hipMemcpy(&(l2h_sizes_buffer[0]), l2h_sizes, sizeof(VecInt_t) * num_neighbors, hipMemcpyDefault); hipMemcpy(&(b2l_buffer[0]), b2l_maps, sizeof(VecInt_t *) * num_neighbors, hipMemcpyDefault); hipMemcpy(&(l2h_buffer[0]), l2h_maps, sizeof(VecInt_t *) * num_neighbors, hipMemcpyDefault); // caching all of the maps for (int i = 0; i < num_neighbors; i++) { int size = b2l_sizes_buffer[i]; this->cached_B2L_maps[i].resize(size); hipMemcpy(&(this->cached_B2L_maps[i][0]), b2l_buffer[i], sizeof(VecInt_t) * size, hipMemcpyDefault); cudaCheckError(); size = l2h_sizes_buffer[i]; this->cached_L2H_maps[i].resize(size); hipMemcpy(&(this->cached_L2H_maps[i][0]), l2h_buffer[i], sizeof(VecInt_t) * size, hipMemcpyDefault); cudaCheckError(); } } template <class TConfig> void DistributedManagerBase<TConfig>::setAConsolidationFlags( Matrix<TConfig> &in_A) { this->A = &in_A; AMG_Config *rsrc_cfg = this->A->getResources()->getResourcesConfig(); std::string scope; int consolidate_flag, cuda_ipc_flag; rsrc_cfg->getParameter<int>("fine_level_consolidation", consolidate_flag, "default", scope); rsrc_cfg->getParameter<int>("use_cuda_ipc_consolidation", cuda_ipc_flag, "default", scope); this->m_is_fine_level_consolidated = (consolidate_flag != 0); this->m_use_cuda_ipc_consolidation = (cuda_ipc_flag != 0); } template <class TConfig> void DistributedManagerBase<TConfig>::uploadMatrix(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A) { this->setAConsolidationFlags(in_A); if (this->m_is_fine_level_consolidated) { this->A->manager->consolidateAndUploadAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A)); } else { this->A->manager->initializeUploadReorderAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A)); } } template <class TConfig> void DistributedManagerBase<TConfig>::checkPinnedBuffer(size_t size) { if ((m_pinned_buffer_size < size) && (m_pinned_buffer != NULL)) { hipHostFree(m_pinned_buffer); m_pinned_buffer = NULL; m_pinned_buffer_size = 0; } if (m_pinned_buffer == NULL) { m_pinned_buffer_size = (size_t)(size * 1.1); hipHostMalloc(&m_pinned_buffer, m_pinned_buffer_size); } } template <class TConfig> DistributedManagerBase<TConfig>::~DistributedManagerBase() { if (m_pinned_buffer != NULL) { hipHostFree(m_pinned_buffer); } destroyComms(); // from childrens: hipStreamDestroy(this->m_int_stream); hipStreamDestroy(this->m_bdy_stream); if (!this->halo_rows_ref_count && this->halo_rows != NULL) { delete this->halo_rows; this->halo_rows = NULL; } if (!this->halo_btl_ref_count && this->halo_btl != NULL) { delete this->halo_btl; this->halo_btl = NULL; } } // if pointer is host pointer - returns data. If it is device pointer - copies it to the m_pinned_buffer and returns pointer to m_pinned_buffer template <class TConfig> void *DistributedManagerBase<TConfig>::getHostPointerForData(void *ptr, size_t size, int *allocated) { hipError_t rc; hipPointerAttribute_t att; void *ptr_h; cudaCheckError(); /* WARNING: We may accept the following types of allocation for ptr: 1. malloc [host memory] 2. hipMalloc [device memory] 3. malloc + hipHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory] 4. hipHostMalloc [pinned host memory form the beginning] The correct way to conver these cases is the following: hipPointerAttribute_t att; hipError_t st = hipPointerGetAttributes(&att, ptr); if (st == hipSuccess) { //you are in case 2, 3 or 4. } else{ //you are in case 1. } The following pattern of checks should be implemented hipPointerAttribute_t att; hipError_t st = hipPointerGetAttributes(&att, ptr); if (st == hipSuccess) { //you are in case 2 or 4. } else{ st = hipHostGetDevicePointer(ptr_on_device, ptr, 0); if (st == hipSuccess){ //you are in case 3. } else{ //you are in case 1. } } The above pattern will be used whenever we need to process input data. Obs.: parameter size is in bytes and parameter allocated indicates whether memory was allocated and needs to be release later on. */ /* // original implementation hipPointerGetAttributes(&att, ptr); if (att.hostPointer == NULL) { checkPinnedBuffer(size); hipMemcpy(m_pinned_buffer, ptr, size, hipMemcpyDefault); return m_pinned_buffer; } else { return ptr; } */ *allocated = 0; // get pointer to values on the device rc = hipPointerGetAttributes(&att, ptr); if (rc == hipSuccess) { //you are in case 2 or 4 from the above comment. if (att.hostPointer == NULL) { //you are in case 2 checkPinnedBuffer(size); rc = hipMemcpy(m_pinned_buffer, ptr, size, hipMemcpyDefault); if (rc != hipSuccess) { FatalError("Could not copy into the temporary (host) storage. Try pinning the memory to avoid the hipMemcpy.", AMGX_ERR_BAD_PARAMETERS); } ptr_h = m_pinned_buffer; *allocated = 1; } else { //you are in case 4 ptr_h = ptr; } } else { //you are in case 1 or 3 from the above comment ptr_h = ptr; } hipGetLastError(); //to reset last error /* check for null pointers */ if (ptr_h == NULL) { FatalError("Result of (host) allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } return ptr_h; } // if pointer is host pointer - returns data. If it is device pointer - copies it to the m_pinned_buffer and returns pointer to m_pinned_buffer template <class TConfig> const void *DistributedManagerBase<TConfig>::getHostPointerForData(const void *ptr, size_t size, int *allocated) { hipError_t rc; hipPointerAttribute_t att; void *ptr_h; cudaCheckError(); /* WARNING: We may accept the following types of allocation for ptr: 1. malloc [host memory] 2. hipMalloc [device memory] 3. malloc + hipHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory] 4. hipHostMalloc [pinned host memory form the beginning] The correct way to conver these cases is the following: hipPointerAttribute_t att; hipError_t st = hipPointerGetAttributes(&att, ptr); if (st == hipSuccess) { //you are in case 2, 3 or 4. } else{ //you are in case 1. } The following pattern of checks should be implemented hipPointerAttribute_t att; hipError_t st = hipPointerGetAttributes(&att, ptr); if (st == hipSuccess) { //you are in case 2 or 4. } else{ st = hipHostGetDevicePointer(ptr_on_device, ptr, 0); if (st == hipSuccess){ //you are in case 3. } else{ //you are in case 1. } } The above pattern will be used whenever we need to process input data. Obs.: parameter size is in bytes and parameter allocated indicates whether memory was allocated and needs to be release later on. */ *allocated = 0; // get pointer to values on the device rc = hipPointerGetAttributes(&att, ptr); if (rc == hipSuccess) { //you are in case 2 or 4 from the above comment. if (att.hostPointer == NULL) { //you are in case 2 checkPinnedBuffer(size); rc = hipMemcpy(m_pinned_buffer, ptr, size, hipMemcpyDefault); if (rc != hipSuccess) { FatalError("Could not copy into the temporary (host) storage. Try pinning the memory to avoid the hipMemcpy.", AMGX_ERR_BAD_PARAMETERS); } ptr_h = m_pinned_buffer; *allocated = 1; hipGetLastError(); //to reset last error return ptr_h; } else { //you are in case 4 hipGetLastError(); //to reset last error return ptr; } } else { hipGetLastError(); //to reset last error //you are in case 1 or 3 from the above comment return ptr; } } template <class TConfig> void *DistributedManagerBase<TConfig>::getDevicePointerForData(void *ptr, size_t size, int *allocated) { hipError_t rc; hipPointerAttribute_t att; void *ptr_d; cudaCheckError(); /* WARNING: We may accept the following types of allocation for ptr: 1. malloc [host memory] 2. hipMalloc [device memory] 3. malloc + hipHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory] 4. hipHostMalloc [pinned host memory form the beginning] The correct way to conver these cases is the following: hipPointerAttribute_t att; hipError_t st = hipPointerGetAttributes(&att, ptr); if (st == hipSuccess) { //you are in case 2, 3 or 4. } else{ //you are in case 1. } The following pattern of checks should be implemented hipPointerAttribute_t att; hipError_t st = hipPointerGetAttributes(&att, ptr); if (st == hipSuccess) { //you are in case 2 or 4. } else{ st = hipHostGetDevicePointer(ptr_on_device, ptr, 0); if (st == hipSuccess){ //you are in case 3. } else{ //you are in case 1. } } The above pattern will be used whenever we need to process input data. Obs.: parameter size is in bytes and parameter allocated indicates whether memory was allocated and needs to be release later on. */ *allocated = 0; // get pointer to values on the device rc = hipPointerGetAttributes(&att, ptr); if (rc == hipSuccess) { //you are in case 2 or 4 from the above comment. ptr_d = (void *)att.devicePointer; } else { //you are in case 1 or 3 from the above comment rc = hipHostGetDevicePointer(&ptr_d, ptr, 0); if (rc != hipSuccess) { //you are in case 1 rc = hipMalloc(&ptr_d, size); if (rc != hipSuccess) { FatalError("Could not allocate required temporary storage. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } rc = hipMemcpy(ptr_d, ptr, size, hipMemcpyDefault); if (rc != hipSuccess) { FatalError("Could not copy into the temporary storage. Try pinning the memory to avoid the hipMemcpy.", AMGX_ERR_BAD_PARAMETERS); } *allocated = 1; } } /* check for null pointers */ if (ptr_d == NULL) { FatalError("Result of allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } hipGetLastError(); //to reset last error return ptr_d; } template <class TConfig> const void *DistributedManagerBase<TConfig>::getDevicePointerForData(const void *ptr, size_t size, int *allocated) { hipError_t rc; hipPointerAttribute_t att; void *ptr_d; cudaCheckError(); /* WARNING: We may accept the following types of allocation for ptr: 1. malloc [host memory] 2. hipMalloc [device memory] 3. malloc + hipHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory] 4. hipHostMalloc [pinned host memory form the beginning] The correct way to conver these cases is the following: hipPointerAttribute_t att; hipError_t st = hipPointerGetAttributes(&att, ptr); if (st == hipSuccess) { //you are in case 2, 3 or 4. } else{ //you are in case 1. } The following pattern of checks should be implemented hipPointerAttribute_t att; hipError_t st = hipPointerGetAttributes(&att, ptr); if (st == hipSuccess) { //you are in case 2 or 4. } else{ st = hipHostGetDevicePointer(ptr_on_device, ptr, 0); if (st == hipSuccess){ //you are in case 3. } else{ //you are in case 1. } } The above pattern will be used whenever we need to process input data. Obs.: parameter size is in bytes and parameter allocated indicates whether memory was allocated and needs to be release later on. */ *allocated = 0; // get pointer to values on the device rc = hipPointerGetAttributes(&att, ptr); if (rc == hipSuccess) { //you are in case 2 or 4 from the above comment. hipGetLastError(); //to reset last error return (const void *)att.devicePointer; } else { //you are in case 1 or 3 from the above comment rc = hipHostGetDevicePointer(&ptr_d, (void *)ptr, 0); if (rc != hipSuccess) { //you are in case 1 rc = hipMalloc(&ptr_d, size); if (rc != hipSuccess) { FatalError("Could not allocate required temporary storage. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } rc = hipMemcpy(ptr_d, ptr, size, hipMemcpyDefault); if (rc != hipSuccess) { FatalError("Could not copy into the temporary storage. Try pinning the memory to avoid the hipMemcpy.", AMGX_ERR_BAD_PARAMETERS); } *allocated = 1; hipGetLastError(); //to reset last error return (const void *)ptr_d; } } /* check for null pointers */ if (ptr_d == NULL) { FatalError("Result of allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } // shouldn't get there hipGetLastError(); //to reset last error return NULL; } template <class TConfig> void initializeMatrixCopyAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> *A) { typedef typename TConfig::MatPrec mat_value_type; A->resize( n, n, nnz, block_dimx, block_dimy ); //Upload the entire matrix hipMemcpy( A->row_offsets.raw(), row_ptrs, (n + 1) * sizeof(int), hipMemcpyDefault ); cudaCheckError(); hipMemcpy( A->col_indices.raw(), col_indices, (nnz) * sizeof(int), hipMemcpyDefault ); cudaCheckError(); hipMemcpy( A->values.raw(), (mat_value_type *)data, (nnz * block_dimx * block_dimy) * sizeof(mat_value_type), hipMemcpyDefault ); cudaCheckError(); if (diag) { hipMemcpy( A->values.raw() + A->diagOffset()*A->get_block_size(), (mat_value_type *)diag, (n * block_dimx * block_dimy) * sizeof(mat_value_type), hipMemcpyDefault ); } else { A->computeDiagonal(); } cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::updateMapsReorder() { int my_id = this->getComms()->get_global_id(); DistributedComms<TConfig> *comms_tmp = this->getComms(); DistributedComms<TConfig> **comms_ = &comms_tmp; // Copy B2L_maps in their final place int num_neighbors = this->neighbors.size(); B2L_maps.resize(num_neighbors); L2H_maps.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { B2L_maps[i] = this->cached_B2L_maps[i]; L2H_maps[i] = this->cached_L2H_maps[i]; } //Create a DistributedArranger object to map further halo rings and to construct halo row matrices and exchange them (if halo_coloring != LAST) DistributedArranger<TConfig> *prep = new DistributedArranger<TConfig>; prep->create_B2L_from_maps( (*(this->A)), my_id, this->num_halo_rings(), neighbors, B2L_maps, L2H_maps, B2L_rings, comms_, &halo_rows, &halo_btl); DistributedManagerBaseInit(my_id, 0, this->A->get_num_rows(), *(this->A), comms_, NULL, NULL); //Use the exchanged halo row matrices and the boundary/halo index lists to renumber and consolidate the matrix this->reorder_matrix(); prep->initialize_B2L_maps_offsets(*(this->A), this->num_halo_rings()); delete prep; } template <class TConfig> void DistributedManagerBase<TConfig>::initializeUploadReorderAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A) { this->A = &in_A; initializeMatrixCopyAll<TConfig>(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, this->A); this->updateMapsReorder(); } template <class TConfig> void DistributedManagerBase<TConfig>::destroyComms() { if ( (this->_comms) != NULL ) { if (this->_comms->decr_ref_count()) { delete (this->_comms); this->_comms = NULL; } } if ( (this->m_fine_level_comms) != NULL) { if (this->m_fine_level_comms->decr_ref_count()) { delete (this->m_fine_level_comms); this->m_fine_level_comms = NULL; } } } template <class TConfig> void DistributedManagerBase<TConfig>::initComms(Resources *rsrc) { this->createComms(rsrc); int my_id = this->getComms()->get_global_id(); int num_parts = this->getComms()->get_num_partitions(); this->set_global_id(my_id); this->set_num_partitions(num_parts); } template <class TConfig> void DistributedManagerBase<TConfig>::createComms(Resources *rsrc) { // create communicator #ifdef AMGX_WITH_MPI destroyComms(); if (rsrc == NULL) FatalError("Resources should not be NULL", AMGX_ERR_INTERNAL); MPI_Comm *mpi_comm = rsrc->getMpiComm(); AMG_Config *cfg = rsrc->getResourcesConfig(); std::string comm_value, comm_scope; cfg->getParameter<std::string>("communicator", comm_value, "default", comm_scope); int rank = -1; MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (comm_value == "MPI_DIRECT") { _comms = new CommsMPIDirect<TConfig>(*cfg, comm_scope, mpi_comm); if ( rank == 0 ) { std::cout << "Using CUDA-Aware MPI (GPU Direct) communicator..." << std::endl; } } else if (comm_value == "MPI") { CommsMPIHostBufferStream<TConfig> *ptr_comm = new CommsMPIHostBufferStream<TConfig>(*cfg, comm_scope, mpi_comm); _comms = ptr_comm; if ( rank == 0 ) { std::cout << "Using Normal MPI (Hostbuffer) communicator..." << std::endl; } } else { throw std::string("Bad communicator value"); } #endif } template <class TConfig> void DistributedManagerBase<TConfig>::malloc_export_maps(VecInt_t ***b2l_maps_e, VecInt_t **b2l_maps_sizes_e, VecInt_t ***l2h_maps_e, VecInt_t **l2h_maps_sizes_e) { *b2l_maps_e = (VecInt_t **) malloc(sizeof(VecInt_t *)*this->num_neighbors()); *l2h_maps_e = (VecInt_t **) malloc(sizeof(VecInt_t *)*this->num_neighbors()); *b2l_maps_sizes_e = (VecInt_t *) malloc(sizeof(VecInt_t) * (this->num_neighbors())); *l2h_maps_sizes_e = (VecInt_t *) malloc(sizeof(VecInt_t) * (this->num_neighbors())); for (int i = 0; i < this->num_neighbors(); i++) { (*b2l_maps_sizes_e)[i] = B2L_maps[i].size(); (*l2h_maps_sizes_e)[i] = L2H_maps[i].size(); (*b2l_maps_e)[i] = (VecInt_t *) malloc(sizeof(VecInt_t) * ( (*b2l_maps_sizes_e)[i]) ); if (L2H_maps[i].size() != 0) { (*l2h_maps_e)[i] = (VecInt_t *) malloc(sizeof(VecInt_t) * ( (*l2h_maps_sizes_e)[i]) ); thrust::copy(L2H_maps[i].begin(), L2H_maps[i].end(), (*l2h_maps_e)[i]); } thrust::copy(B2L_maps[i].begin(), B2L_maps[i].end(), (*b2l_maps_e)[i]); } cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createRenumbering(IVector &renumbering) { int num_neighbors = this->neighbors.size(); // still renumber if the number of neighbors = 0, to support non-symmetric matrices // if (num_neighbors == 0) return; /* EXAMPLE Example matrix, partition 1 arrives with state: A.row_offsets = [0 4 11 15 20] A.col_indices = [4 0 1 2 4 5 0 1 2 3 7 0 1 2 3 1 2 3 6 7] num_neighbors=2; neighbors = [0 2] B2L_rings[[0 2 4][0 2 4]] B2L_maps[[0 1| 2 3][1 3| 0 2]] L2H_maps (and halo_lists) [[4 5][6 7]] */ int size = 0; if (this->L2H_maps.size()) { size = thrust::reduce(this->A->col_indices.begin(), this->A->col_indices.end(), int(0), thrust::maximum<int>()) + 1; //Sufficient to do reduction on lth maps cudaCheckError(); } else { size = this->A->get_num_rows(); } int rings = (this->B2L_rings.size() > 0) ? this->B2L_rings[0].size() - 1 : 0; //initial size to size+1 so we have the total size after a scan renumbering.resize(size + 1); int global_size = size; // // Step 1 - in the main matrix, separate interior and boundary nodes (1/0 in flagArray), renumber interior ones with an exclusive scan // IVector flagArray(size + 1); thrust::fill(flagArray.begin(), flagArray.begin() + size + 1, 1); cudaCheckError(); //sets 1 for interior nodes, 0 for boundary node for (int i = 0; i < num_neighbors; i++ ) { int size = this->B2L_rings[i][1]; int num_blocks = min(4096, (size + 127) / 128); if (size > 0) { hipLaunchKernelGGL(( remove_boundary_kernel) , dim3(num_blocks), dim3(128), 0, 0, flagArray.raw(), this->B2L_maps[i].raw(), size); } //If there are any L2H maps if (this->L2H_maps.size() && this->L2H_maps[i].size()) { int size = this->L2H_maps[i].size(); int num_blocks = min(4096, (size + 127) / 128); hipLaunchKernelGGL(( remove_boundary_kernel) , dim3(num_blocks), dim3(128), 0, 0, flagArray.raw(), this->L2H_maps[i].raw(), size); } cudaCheckError(); } //gets the renumbering of interior nodes thrust::exclusive_scan(flagArray.begin(), flagArray.begin() + size + 1, renumbering.begin()); cudaCheckError(); /* EXAMPLE After removing 1-ring boundary nodes and halo nodes from flagArray: [0 0 1 0 0 0 0 0] After exclusive scan, which gives renumbering for interior nodes (only node #2) renumbering: [0 0 0 1 1 1 1 1] */ // // Step 2 - Renumber nodes that are in the boundary, stepping through each B2L map, and renumbering ones that have not been renumbered yet // //what is the biggest B2L size INDEX_TYPE max_size = 0; for (int i = 0; i < num_neighbors; i++) { max_size = max_size > this->B2L_rings[i][1] ? max_size : this->B2L_rings[i][1]; if (this->L2H_maps.size()) { max_size = max_size > this->L2H_maps[i].size() ? max_size : this->L2H_maps[i].size(); } } //allocate work vectors (should be pretty small) that are used to renumber boundary nodes IVector boundary_renum_flags(max_size); IVector boundary_renum(max_size); //the number of renumbered nodes so far int max_element = renumbering[size]; this->_num_interior_nodes = max_element; this->_num_boundary_nodes = this->A->get_num_rows() - max_element; renumbering.resize(size); /* EXAMPLE size = 8 max_size = 2, max_element = 1, num_interior_nodes=1, num_boundary_nodes = 4-1 = 3 */ for (int i = 0; i < num_neighbors; i++) { //find nodes that are part of the current boundary and they haven't been renumbered yet thrust::fill(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, 0); int size = this->B2L_rings[i][1]; int num_blocks = min(4096, (size + 191) / 192); if (size > 0) hipLaunchKernelGGL(( get_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, flagArray.raw(), this->B2L_maps[i].raw(), boundary_renum_flags.raw(), size, global_size /*,rank*/); //calculate the local renumbering (within this boundary region) of these nodes thrust::exclusive_scan(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, boundary_renum.begin()); //apply renumbering to the big numbering table if (size > 0) hipLaunchKernelGGL(( set_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, boundary_renum_flags.raw(), boundary_renum.raw(), this->B2L_maps[i].raw(), renumbering.raw(), size, max_element, global_size /*,rank*/); //update the number of renumbered nodes max_element += boundary_renum[max_size - 1] + boundary_renum_flags[max_size - 1]; /* EXAMPLE for neighbor 0 (ID 0) boundary_renum_flags = [0 0], size = 2, flagArray [0 0 1 0 0 0 0 0] get_unassigned_kernel's output: boundary_renum_flags = [1 1] flagArray [1 1 1 0 0 0 0 0] after exclusive scan: boundary_renum [0 1] set_unassigned_kernel updates these arrays and renumbers B2L map: renumbering = [1 2 0 1 1 1 1 1] B2L_maps[0] = [1 2| 2 3] (note that after element 3 in renumbering and after element 2 we have invalid/not yet updated values) max_element = 3 for neighbor 1 (ID 2) get_unassigned_kernels's output: boundary_renum_flags [0 1] flagArray [1 1 1 1 0 0 0 0] after exclusive scan boundary_renum [0 0] set_unassigned_kernel renumbering [1 2 0 3 1 1 1 1] B2L_maps[1] = [2 3| 0 2] max_element = 4 */ } cudaCheckError(); //Get renumbering for halo indices if (this->L2H_maps.size()) { //TODO: simplify this, we don't need to check whether it has already been renumbered, there is no overlap between halos for (int i = 0; i < num_neighbors; i++) { //find nodes that are part of the current boundary and they haven't been renumbered yet thrust::fill(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, 0); int size = this->L2H_maps[i].size(); int num_blocks = min(4096, (size + 191) / 192); if (size > 0) hipLaunchKernelGGL(( get_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, flagArray.raw(), this->L2H_maps[i].raw(), boundary_renum_flags.raw(), size, global_size /*,rank*/); //calculate the local renumbering (within this boundary region) of these nodes thrust::exclusive_scan(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, boundary_renum.begin()); //apply renumbering to the big numbering table if (size > 0) hipLaunchKernelGGL(( set_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, boundary_renum_flags.raw(), boundary_renum.raw(), this->L2H_maps[i].raw(), renumbering.raw(), size, max_element, global_size /*,rank*/); //update the number of renumbered nodes max_element += boundary_renum[max_size - 1] + boundary_renum_flags[max_size - 1]; /* EXAMPLE for neighbor 0 (ID 0) boundary_renum_flags = [0 0], size = 2, flagArray [1 1 1 1 0 0 0 0] get_unassigned_kernel's output: boundary_renum_flags = [1 1] flagArray [1 1 1 1 1 1 0 0] after exclusive scan: boundary_renum [0 1] set_unassigned_kernel updates these arrays and renumbers B2L map: renumbering = [1 2 0 3 4 5 1 1] L2H_maps[0] = [4 5] max_element = 6 for neighbor 1 (ID 2) get_unassigned_kernels's output: boundary_renum_flags [1 1] flagArray [1 1 1 1 1 1 1 1] after exclusive scan boundary_renum [0 1] set_unassigned_kernel renumbering = [1 2 0 3 4 5 6 7] L2H_maps[1] = [6 7] max_element = 8 */ } cudaCheckError(); } //apply renumbering to further halo rings too if (rings > 1) { for (int i = 0; i < num_neighbors; i++) { int size = this->B2L_rings[i][this->B2L_rings[i].size() - 1] - this->B2L_rings[i][1]; int num_blocks = min(4096, (size + 127) / 128); hipLaunchKernelGGL(( renumber_b2l_maps) , dim3(num_blocks), dim3(128), 0, 0, this->B2L_maps[i].raw() + this->B2L_rings[i][1], renumbering.raw(), size, global_size /*, rank*/); } cudaCheckError(); } /* EXAMPLE renumbers further boundary rings as listed in B2L_maps, since they have not been replaced yet with their renumbered values B2L_maps [[1 2| 0 3][2 3| 1 0]] */ } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix_owned() { int num_neighbors = this->neighbors.size(); int size = this->A->get_num_rows(); int num_blocks = min(4096, (size + 511) / 512); int rings = (this->B2L_rings.size() > 0) ? this->B2L_rings[0].size() - 1 : 0; this->set_num_halo_rings(rings); int diag = this->A->hasProps(DIAG); if (diag) { FatalError("External diag not supported in classical path", AMGX_ERR_NOT_IMPLEMENTED); } // // Step 1 & 2 - create renumbering // this->createRenumbering(this->renumbering); //now we have the full renumbering table in renum, calculate the inverse this->inverse_renumbering.resize(this->renumbering.size()); if (this->renumbering.size() > 1) { hipLaunchKernelGGL(( calc_inverse_renumbering) , dim3(min(4096, ((int)this->renumbering.size() + 511) / 512)), dim3(512) , 0, 0, this->renumbering.raw(), this->inverse_renumbering.raw(), this->renumbering.size()); cudaCheckError(); } // // Step 4 - calculate number/offset of nodes in the halos from the neighbors, ring by ring // this->halo_offsets.resize(num_neighbors + 1); this->halo_offsets[0] = size; for (int i = 0; i < num_neighbors; i++) { this->halo_offsets[i + 1] = this->halo_offsets[i] + this->L2H_maps[i].size(); } this->set_num_halo_rows(this->halo_offsets[this->halo_offsets.size() - 1] - size); int nh = this->num_halo_rows(); int total_rows = size + nh; cudaCheckError(); // // Step 6 - renumber halo matrices and calculate row length (to eventually append to the big matrix) // int insert = 0; //recalculate row_offsets IVector new_row_offsets(size + 1); if (num_blocks > 0) { hipLaunchKernelGGL(( calc_rowlen_reorder) , dim3(num_blocks), dim3(512), 0, 0, this->A->row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), size, insert); cudaCheckError(); } thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.begin()), thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.end()), this->A->col_indices.begin()); cudaCheckError(); //row_offsets array created by exclusive scan of row sizes thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + size + 1, new_row_offsets.begin()); cudaCheckError(); // // Step 7 - consolidate column indices and values // int new_nnz = new_row_offsets[new_row_offsets.size() - 1]; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; VVector new_values((new_nnz + 1 )* this->A->get_block_size(), types::util<ValueTypeA>::get_zero()); IVector new_col_indices(new_nnz, 0); //reorder based on row permutation if (num_blocks > 0) { hipLaunchKernelGGL(( reorder_whole_matrix <32>) , dim3(num_blocks), dim3(512), 0, 0, this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), this->A->get_block_size(), size, insert); cudaCheckError(); } //create and append halo rows size //create an identity matrix in CSR format int nnz = this->A->get_num_nz(); IVector identity_csr_rows(nh + 1); IVector identity_csr_cols(nh); VVector identity_csr_vals(nh, types::util<ValueTypeA>::get_one()); //needs to be changed to MVector, but this definition is messed up in the header file (should fix later) thrust::sequence(identity_csr_rows.begin(), identity_csr_rows.end()); thrust::sequence(identity_csr_cols.begin(), identity_csr_cols.end()); /*for example, 2x2 identity_csr matrix is created: identity_csr_rows = { 0, 1, 2 } identity_csr_cols = { 0, 1 } identity_csr_vals = { 1.0, 1.0 } */ //shift identity tmatrix by size = this->A->get_num_rows(); thrust::transform(identity_csr_rows.begin(), identity_csr_rows.end(), thrust::constant_iterator<INDEX_TYPE>(nnz), identity_csr_rows.begin(), thrust::plus<INDEX_TYPE>()); thrust::transform(identity_csr_cols.begin(), identity_csr_cols.end(), thrust::constant_iterator<INDEX_TYPE>(size), identity_csr_cols.begin(), thrust::plus<INDEX_TYPE>()); /*for example, 2x2 identity_csr matrix is created: identity_csr_rows = { 0, 1, 2 } identity_csr_cols = {size, size+1 } identity_csr_vals = { 1.0, 1.0 } */ /* WARNING: you must be very careful with the view you are setting (cuurently the view coming here by default is ALL = FULL). If - classical path is selected then the createOneRingHaloRows -> create_one_ring_halo_rows -> append_halo_rows routine will be called. It will overwrite the halo rows setup here (and will use view OWNED, which will ignore the halo rows setup here, to determine how the new halo rows should be placed). - aggregation path is selected then the extra rows setup here will be used in the R*A*P product, where (in order to match dimensions of R and P) it is assumed that (the local partition) matrix A is square, therefore it must be padded by identity rows at the bottom to compensate for the "extra" columns that are outside of the main square part. The old routines for the aggregation path do this padding at the end of the reorder_matrix routine below. */ //ViewType v = this->A->currentView(); //this->A->setView(ALL); //Approach 1: use existing routine to append the identity matrix to the existing one // (seems like too much overhead, also need identity matrix per neighbor) //DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; //prep->append_halo_rows(this->A, identity_csr_rows, identity_csr_cols, identity_csr_vals); //delete prep; //Approach 2: custom for this routine new_row_offsets.resize(total_rows + 1); new_col_indices.resize(nnz + nh); new_values.resize(nnz + nh + 1); //extra 1 element stores zero at the end (to follow the original design) //new_values[nnz]=-1; //marker to track the last element thrust::copy(identity_csr_rows.begin(), identity_csr_rows.end(), new_row_offsets.begin() + size ); thrust::copy(identity_csr_cols.begin(), identity_csr_cols.end(), new_col_indices.begin() + nnz); thrust::copy(new_values.begin() + nnz, new_values.begin() + nnz + 1, new_values.begin() + nnz + nh); thrust::copy(identity_csr_vals.begin(), identity_csr_vals.end(), new_values.begin() + nnz); /* WARNING: see above. */ this->A->set_num_cols(total_rows); this->A->set_num_rows(total_rows); this->A->col_indices.swap(new_col_indices); new_row_offsets.resize(total_rows + 1); this->A->row_offsets.swap(new_row_offsets); new_row_offsets.swap(this->old_row_offsets); this->A->values.swap(new_values); this->A->m_seq_offsets.resize(total_rows + 1); thrust::sequence(this->A->m_seq_offsets.begin(), this->A->m_seq_offsets.end()); cudaCheckError(); //TODO: only do this if AMG_Config matrix_halo_exchange!=2 this->A->delProps(COO); if (!insert) { this->A->computeDiagonal(); } this->set_initialized(this->A->row_offsets); this->A->setView(OWNED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix() { int num_neighbors = this->neighbors.size(); if (num_neighbors == 0) { return; } int size = this->A->get_num_rows(); int num_blocks = min(4096, (size + 511) / 512); int rings = this->B2L_rings[0].size() - 1; this->set_num_halo_rings(rings); int diag = this->A->hasProps(DIAG); std::vector<Matrix<TConfig_d> > &halo_rows = *this->halo_rows; std::vector<DistributedManager<TConfig_d> > &halo_btl = *this->halo_btl; /* EXAMPLE The example matrix, on partition 1 arrives at this point with the following state: num_rings=2 A.num_rows = 4; A.num_nz = 20 A.row_offsets = [0 4 11 15 20] A.col_indices = [4 0 1 2 4 5 0 1 2 3 7 0 1 2 3 1 2 3 6 7] num_neighbors=2; neighbors = [0 2] B2L_rings[[0 2 4][0 2 4]] B2L_maps[[0 1| 2 3][1 3| 0 2]] L2H_maps (and halo_lists) [[4 5][6 7]] With the exchange halo rows: halo_btl[0] (received from neighbor ID 0) global_id = 0; base_index=0; index_range=6; B2L_rings[0] = [0 2 4]; B2L_maps[0] = [2 3| 0 1] L2H_maps = [4 5] halo_rows[0].row_offsets = [0 5 13 17 21] halo_rows[0].col_indices = [1 2 3 4 5 0 1 2 3 4 5 6 7 0 1 3 6 0 1 2 3] halo_btl[1] (received from neighbor ID 2) global_id = 2; base_index=0; index_range=8; B2L_rings[0] = [0 2 4]; B2L_maps[0] = [1 2| 0 3] L2H_maps = [6 7] halo_rows[1].row_offsets = [0 4 11 16 20] halo_rows[1].col_indices = [7 1 2 3 5 6 7 0 1 2 3 4 5 0 2 3 0 1 2 3] */ // // Step 1 & 2 - create renumbering // this->createRenumbering(this->renumbering); cudaCheckError(); /* EXAMPLE this->renumbering = [1 2 0 3 4 5 6 7] B2L_maps = [[1 2| 0 3][2 3| 1 0]] L2H_maps = [[4 5][6 7]] */ // // Step 3 - given a full renumbering of owned nodes, calculate inverse renumbering // //now we have the full renumbering table in renum, calculate the inverse this->inverse_renumbering.resize(this->renumbering.size()); hipLaunchKernelGGL(( calc_inverse_renumbering) , dim3(min(4096, ((int)this->renumbering.size() + 511) / 512)), dim3(512) , 0, 0, this->renumbering.raw(), this->inverse_renumbering.raw(), this->renumbering.size()); cudaCheckError(); /* EXAMPLE this->inverse_renumbering = [2 0 1 3 4 5 6 7] */ // // Step 4 - calculate number/offset of nodes in the halos from the neighbors, ring by ring // this->halo_offsets.resize(rings * num_neighbors + 1, 0); for (int ring = 0; ring < rings; ring++) { for (int i = 0; i < num_neighbors; i++) { this->halo_offsets[ring * num_neighbors + i] = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; } } thrust::exclusive_scan(this->halo_offsets.begin(), this->halo_offsets.end(), this->halo_offsets.begin(), size); cudaCheckError(); this->set_num_halo_rows(this->halo_offsets[this->halo_offsets.size() - 1] - size); int total_rows = size + this->num_halo_rows(); if (total_rows < this->renumbering.size()) { FatalError("total rows < renumbering.size(), send/recv maps should cover all matrix halo columns", AMGX_ERR_NOT_IMPLEMENTED); } if (total_rows > this->renumbering.size()) { this->A->getResources()->warning("# owned nodes + # halo nodes > matrix columns: send/recv maps have some unreferences halo indices, they are not directly connected to our partition and therefore we won't compute them, please use 2-ring comms maps if you want to specify 2nd ring neighbors"); } cudaCheckError(); /* EXAMPLE halo_offsets [2 2 2 2] after exclusive scan: 4 + [0 2 4 6 8] = [4 6 8 10 12] num_halo_rows = 8, total_rows = 12 */ // // Step 5 - create big mapping table of all halo indices we received (this may use a little too much memory) // //count number of fine rows of neighbors thrust::host_vector<INDEX_TYPE> neighbor_rows(num_neighbors + 1); int max_num_rows = 0; for (int i = 0; i < num_neighbors; i++) { neighbor_rows[i] = halo_btl[i].index_range(); max_num_rows = max_num_rows > halo_rows[i].get_num_rows() ? max_num_rows : halo_rows[i].get_num_rows(); } thrust::exclusive_scan(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows.begin()); cudaCheckError(); int total_rows_of_neighbors = neighbor_rows[num_neighbors]; /* EXAMPLE neigbor_rows = [0 6 14] total_rows_of_neighbors = 14 */ IVector halo_mapping(total_rows_of_neighbors); thrust::fill(halo_mapping.begin(), halo_mapping.end(), -1); cudaCheckError(); //ring by ring, neighbor by neighbor assign sequentially increasing numbers for halo nodes for (int ring = 0; ring < rings; ring++) { for (int i = 0; i < num_neighbors; i++) { int size = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; int num_blocks = min(4096, (size + 127) / 128); //This renumbering has to result in the same renumbering that comes out of L2H renumbering hipLaunchKernelGGL(( create_halo_mapping) , dim3(num_blocks), dim3(128), 0, 0, halo_mapping.raw() + neighbor_rows[i], halo_btl[i].B2L_maps[0].raw() + halo_btl[i].B2L_rings[0][ring], halo_btl[i].base_index(), this->halo_offsets[ring * num_neighbors + i], size); cudaCheckError(); /* EXAMPLE ring 0 neighbor 0 - halo_btl[0].B2L_maps[0] = [2 3| 0 1] halo_btl[0].L2H_maps = [4 5] halo_mapping = [-1 -1 4 5 -1 -1 |-1 -1 -1 -1 -1 -1 -1 -1] ring 0 neighbor 1 - halo_btl[1].B2L_maps[0] = [1 2| 0 3] halo_btl[1].L2H_maps = [6 7] halo_mapping = [-1 -1 4 5 -1 -1 |-1 6 7 -1 -1 -1 -1 -1] ring 1 neighbor 0 - halo_btl[0].B2L_maps[0] = [2 3| 0 1] halo_btl[0].L2H_maps = [4 5] halo_mapping = [8 9 4 5 -1 -1 |-1 6 7 -1 -1 -1 -1 -1] ring 1 neighbor 1 - halo_btl[1].B2L_maps[0] = [1 2| 0 3] halo_btl[1].L2H_maps = [6 7] halo_mapping = [8 9 4 5 -1 -1 |10 6 7 11 -1 -1 -1 -1] */ } } cudaCheckError(); for (int i = 0; i < num_neighbors; i++) { int size = halo_btl[i].L2H_maps[0].size(); int num_blocks = min(4096, (size + 127) / 128); //Map the column indices of the halo rows that point back to boundary nodes hipLaunchKernelGGL(( apply_h2l2b_mapping) , dim3(num_blocks), dim3(128), 0, 0, halo_mapping.raw() + neighbor_rows[i], halo_btl[i].L2H_maps[0].raw(), halo_btl[i].base_index(), this->B2L_maps[i].raw(), size); cudaCheckError(); /* EXAMPLE neighbor 0 - mapping back to our own (boundary) indices halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 -1 -1] neighbor 1 - mapping back to our own (boundary) indices halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3] */ } cudaCheckError(); /* EXAMPLE neighbor_rows = [0 6 14] halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3] The first part (0-6) of halo_mapping gives a local index for all the indices that we want to know about in halo_btl[0] The second part (7-14) gives local indices for halo_btl[1], that is both halo ring there, and the column indices representing vertices in this partition's boundary. Note that it does not give indices (-1) for vertices 5 and 6 in neighbor 1 (ID 2), which are column indices connecting it to neighbor 0, hence the two halo regions are not connected */ // // Step 6 - renumber halo matrices and calculate row length (to eventually append to the big matrix) // int insert = 0; if (this->A->hasProps(DIAG) && insertDiagonals) { insert = 1; } diag = diag && !insertDiagonals; //recalculate row_offsets IVector new_row_offsets(size + this->num_halo_rows() + 1); hipLaunchKernelGGL(( calc_rowlen_reorder) , dim3(num_blocks), dim3(512), 0, 0, this->A->row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), size, insert); cudaCheckError(); IVector neighbor_rows_d(num_neighbors + 1); thrust::copy(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows_d.begin()); cudaCheckError(); /* EXAMPLE get row length according to renumbering new_row_offsets = [4 4 7 5 0 0 0 0 0 0 0 0 0] */ //map column indices of my own matrix /*map_col_indices<4><<<num_blocks, 512>>>(this->A->row_offsets.raw(), this->A->col_indices.raw(), this->renumbering.raw(), this->halo_ranges.raw(), halo_mapping.raw(), neighbor_rows_d.raw(), this->base_index(), num_neighbors, size);*/ thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.begin()), thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.end()), this->A->col_indices.begin()); cudaCheckError(); /* EXAMPLE use this->renumbering = [1 2 0 3 4 5 6 7] to map old column indices to new column indices (i.e. according to interior - boundary - halo separation), but do not reshuffle them into their place yet A.col_indices = [4 0 1 2 4 5 0 1 2 3 7 0 1 2 3 1 2 3 6 7] becomes A.col_indices = [4 1 2 0 4 5 1 2 0 3 7 1 2 0 3 2 0 3 6 7] */ cudaCheckError(); IVector temp_row_len(max_num_rows); for (int i = 0; i < num_neighbors; i++) { //map column indices of halo matrices and count of nonzeros we will keep int size = halo_rows[i].get_num_rows(); int num_blocks = min(4096, (size + 127) / 128); hipLaunchKernelGGL(( map_col_indices_and_count_rowlen<4>) , dim3(num_blocks), dim3(128), 128 * sizeof(INDEX_TYPE), 0, halo_rows[i].row_offsets.raw(), halo_rows[i].col_indices.raw(), temp_row_len.raw(), halo_mapping.raw() + neighbor_rows[i], size, insert); cudaCheckError(); //number of nonzeros per row copied into big row sizes array for (int ring = 0; ring < rings; ring++) { thrust::copy(temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring], temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring + 1], new_row_offsets.begin() + this->halo_offsets[ring * num_neighbors + i]); } cudaCheckError(); /* EXAMPLE halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3] look at halo row matrices, and halo_mapping, count column indices that do not map to -1 and map them to their new, local index halo_rows[0].col_indices = [1 2 3 4 5 0 1 2 3 4 5 6 7 0 1 3 6 0 1 2 3] becomes halo_rows[0].col_indices = [9 4 5 1 2 8 9 4 5 1 2 -1 -1 8 9 5 -1 8 9 4 5] with temp_row_len = [5 6 3 4] copied into new_row_offsets: [4 4 7 5| 5 6| 0 0| 3 4| 0 0 0] halo_rows[1].col_indices = [7 1 2 3 5 6 7 0 1 2 3 4 5 0 2 3 0 1 2 3] becomes halo_rows[1].col_indices = [3 6 7 11 -1 2 3 10 6 7 11 -1 -1 10 7 11 10 6 7 11] with temp_row_len = [4 6 3 4] copied into new_row_offsets: [4 4 7 5| 5 6| 4 6| 3 4| 3 4 0] */ } cudaCheckError(); //row_offsets array created by exclusive scan of row sizes thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + size + this->num_halo_rows() + 1, new_row_offsets.begin()); cudaCheckError(); /* EXAMPLE Exclusive scan to get new_row_offsets array: new_row_offsets = [0 4 8 15 20| 25 31| 35 41| 44 48| 51 55] */ // // Step 7 - consolidate column indices and values // int new_nnz = new_row_offsets[new_row_offsets.size() - 1]; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; VVector new_values((new_nnz + 1 + diag * (total_rows - 1))* this->A->get_block_size(), types::util<ValueTypeA>::get_zero()); IVector new_col_indices(new_nnz, 0); //reorder based on row permutation hipLaunchKernelGGL(( reorder_whole_matrix <32>) , dim3(num_blocks), dim3(512), 0, 0, this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), this->A->get_block_size(), size, insert); cudaCheckError(); if (diag) { //reorder based on row permutation hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, new_values.raw() + new_row_offsets[total_rows]*this->A->get_block_size(), this->A->values.raw() + this->A->row_offsets[size]*this->A->get_block_size(), this->renumbering.raw(), this->A->get_block_size(), size); cudaCheckError(); } int cumulative_num_rows = size; for (int i = 0; i < num_neighbors; i++) { for (int ring = 0; ring < rings; ring++) { int num_rows = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; int num_blocks = min(4096, (num_rows + 127) / 128); //copy in nonzeros that we are keeping //TODO: access pattern - should be implemented with warp-wide scans to decide which nonzeros we are keeping and where the rest is going hipLaunchKernelGGL(( reorder_whole_halo_matrix) , dim3(num_blocks), dim3(128), 0, 0, halo_rows[i].row_offsets.raw() + halo_btl[i].B2L_rings[0][ring], halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), new_row_offsets.raw() + this->halo_offsets[ring * num_neighbors + i], new_col_indices.raw(), new_values.raw(), NULL, this->A->get_block_size(), num_rows, insert, this->halo_offsets[ring * num_neighbors + i], halo_btl[i].B2L_rings[0][ring], halo_btl[i].B2L_rings[0][rings]); if (diag) { thrust::copy(halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring])*this->A->get_block_size(), halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring + 1])*this->A->get_block_size(), new_values.begin() + (new_row_offsets[total_rows] + cumulative_num_rows)*this->A->get_block_size()); cumulative_num_rows += num_rows; } } } cudaCheckError(); /* EXAMPLE copy everything in place, dropping -1 column indices in the halo and reordering the owned rows new_row_offsets = [0 4 8 15 20| 25 31| 35 41| 44 48| 51 55] new_col_indices = [1 2 0 3 4 1 2 0 4 5 1 2 0 3 7 2 0 3 6 7 -end of owned 9 4 5 1 2 8 9 4 5 1 2 - end of neighbor 0 ring 0 3 6 7 11 2 3 10 6 7 11 - end of neighbor 1 ring 0 8 9 5 8 9 4 5 - end of neighbor 0 ring 1 10 7 11 10 6 7 11] - end of neighbor 1 ring 1 */ this->A->set_num_cols(total_rows); this->A->set_num_rows(size); this->A->col_indices.swap(new_col_indices); new_row_offsets.resize(total_rows + 1); this->A->row_offsets.swap(new_row_offsets); new_row_offsets.swap(this->old_row_offsets); this->A->values.swap(new_values); this->A->m_seq_offsets.resize(total_rows + 1); thrust::sequence(this->A->m_seq_offsets.begin(), this->A->m_seq_offsets.end()); if (insert) { this->A->delProps(DIAG); this->A->diag.resize(total_rows); thrust::copy(this->A->row_offsets.begin(), this->A->row_offsets.end() - 1, this->A->diag.begin()); } cudaCheckError(); delete this->halo_rows; delete this->halo_btl; //set halo_rows and halo_btl to NULL to avoid a potential double free situation in the future this->halo_rows = NULL; this->halo_btl = NULL; this->A->delProps(COO); this->A->set_initialized(1); //TODO: only do this if AMG_Config matrix_halo_exchange!=2 if (!insert) { this->A->computeDiagonal(); } this->A->setView(OWNED); } //function object (functor) for thrust calls (it is a unary operator to add a constant) template<typename T> class add_constant_op { const T c; public: add_constant_op(T _c) : c(_c) {} __host__ __device__ T operator()(const T &x) const { return x + c; } }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::obtain_shift_l2g_reordering(index_type n, I64Vector_d &l2g, IVector_d &p, IVector_d &q) { /* WARNING: Exchange halo of the inverse_reordering, which is implicitly based on the local_to_global_map (l2g). Notice that it is implicit in the exchange_halo routine, since you are getting exactly the vector halo elements, which are exactly the elements you need. They however must be shifted by the partition starting points (starting global row indices, which are containe din array part_offsets). This allows us to avoid constructing the global vector for inverse permutation, as is done in reference MATLAB code. */ //Recall that part_offsets provide the starting point (global row index) of every partition, in other words, //they contain the prefix sum of number of rows assigned to each partition. Also, notice that part_offsets and //part_offsets_h have the same values on device and host, respectively. See below few lines for details: index_type tag = 1 * 133 + 3 * 7 + 0; //some random number for the tag index_type l = p.size(); q.resize(l); thrust::copy (p.begin(), p.end(), q.begin()); thrust::transform(q.begin(), q.end(), q.begin(), add_constant_op<index_type>(this->part_offsets[this->global_id()])); this->exchange_halo(q, tag); thrust::sequence (q.begin(), q.begin() + n); thrust::transform(q.begin(), q.begin() + n, q.begin(), add_constant_op<index_type>(this->part_offsets[this->global_id()])); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::unpack_partition(index_type *Bp, index_type *Bc, mat_value_type *Bv) { index_type l, n, nnz, offset; index_type *ir; index_type *Ap; index_type *Ac; mat_value_type *Av; IVector q; //some initializations this->A->getOffsetAndSizeForView(OWNED, &offset, &n); this->A->getNnzForView(OWNED, &nnz); l = this->inverse_renumbering.size(); ir = this->inverse_renumbering.raw(); Ap = this->A->row_offsets.raw(); Ac = this->A->col_indices.raw(); Av = this->A->values.raw(); //(i) reorder the matrix back (into mixed interior-boundary nodes) //applies to rows and columns (out-of-place) reorder_partition<index_type, mat_value_type, true, true> (n, nnz, Ap, Ac, Av, Bp, Bc, Bv, l, ir); cudaCheckError(); //obtain reordering q that combines the shift of the diagonal block with the off-diagonal block indices conversion from local to global this->obtain_shift_l2g_reordering(n, this->local_to_global_map, this->inverse_renumbering, q); cudaCheckError(); //(ii) reorder the matrix back (shift the diagonal block and convert off-diagonal block column indices from local to global) //applies columns only (in-place) reorder_partition<index_type, mat_value_type, false, true> (n, nnz, Bp, Bc, Bv, Bp, Bc, Bv, q.size(), q.raw()); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::createNeighToDestPartMap(IVector_h &neigh_to_part, IVector_h &neighbors, IVector_h &destination_part, int num_neighbors) { neigh_to_part.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { neigh_to_part[i] = destination_part[neighbors[i]]; } } template <class TConfig> void DistributedManagerBase<TConfig>::createConsolidatedNeighToPartMap(IVector_h &cons_neigh_to_part, IVector_h &neigh_to_part, int my_destination_part, IVector_h &destination_part, int &num_cons_neighbors) { // input: non-initialized cons_neigh_to_part // fine_neigh_to_part // my_destination_part // output: cons_neigh_to_part // num_cons_neighbors cons_neigh_to_part = neigh_to_part; thrust::sort(cons_neigh_to_part.begin(), cons_neigh_to_part.end()); cudaCheckError(); cons_neigh_to_part.erase(thrust::unique(cons_neigh_to_part.begin(), cons_neigh_to_part.end()), cons_neigh_to_part.end()); // Remove if fine_neigh maps to same coarse partition cons_neigh_to_part.erase(thrust::remove_if(cons_neigh_to_part.begin(), cons_neigh_to_part.end(), is_my_part(my_destination_part)), cons_neigh_to_part.end()); num_cons_neighbors = cons_neigh_to_part.size(); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::createNeighToConsNeigh(IVector_h &neigh_to_cons_neigh, IVector_h &cons_neigh_to_part, IVector_h &neigh_to_part, int my_destination_part, int &num_neighbors) { neigh_to_cons_neigh.resize(num_neighbors); thrust::lower_bound(cons_neigh_to_part.begin(), cons_neigh_to_part.end(), neigh_to_part.begin(), neigh_to_part.end(), neigh_to_cons_neigh.begin()); cudaCheckError(); // Flagging fine neighbors that go to same partition (haven't been found in previous step) for (int i = 0; i < num_neighbors; i++) { if ( neigh_to_part[i] == my_destination_part) { neigh_to_cons_neigh[i] = -1; } } } template <class TConfig> template <class IVector_hd> void DistributedManagerBase<TConfig>::consB2Lmaps(std::vector<IVector_hd> &dest_coarse_B2L_maps, std::vector<IVector_hd> &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors) { //Merge B2L fine maps per coarse destination dest_coarse_B2L_maps.resize(num_coarse_neighbors); std::vector<int> dest_coarse_B2L_maps_scratch_sizes(num_coarse_neighbors, 0); int my_id = this->global_id(); // Loop over the fine neighbors, to compute size of each dest_coarse_B2L_maps for (int i = 0; i < num_fine_neighbors; i++) { int k = fine_neigh_to_coarse_neigh[i]; if (k != -1) { dest_coarse_B2L_maps_scratch_sizes[k] += coarse_B2L_maps[i].size(); } } // Now fill dest_coarse_B2L_maps for (int k = 0; k < num_coarse_neighbors; k++) { dest_coarse_B2L_maps[k].resize( dest_coarse_B2L_maps_scratch_sizes[k] ); // Reset sizes to 0 (fill use as offset in next loop); dest_coarse_B2L_maps_scratch_sizes[k] = 0; } for (int i = 0; i < num_fine_neighbors; i++) { int k = fine_neigh_to_coarse_neigh[i]; if (k != -1) { int offset = dest_coarse_B2L_maps_scratch_sizes[k]; thrust::copy(coarse_B2L_maps[i].begin(), coarse_B2L_maps[i].end(), dest_coarse_B2L_maps[k].begin() + offset); dest_coarse_B2L_maps_scratch_sizes[k] += coarse_B2L_maps[i].size(); } } cudaCheckError(); int max_size = 0; for (int i = 0; i < num_coarse_neighbors; i++) { int size = dest_coarse_B2L_maps[i].size(); if (size > max_size) { max_size = size; } } // Remove duplicates (aggregates in boundary that go to same merged partition) for (int i = 0; i < num_coarse_neighbors; i++) { int size = dest_coarse_B2L_maps[i].size(); thrust::sort(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].begin() + size); index_type num_unique = thrust::unique(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].begin() + size) - dest_coarse_B2L_maps[i].begin(); dest_coarse_B2L_maps[i].erase(dest_coarse_B2L_maps[i].begin() + num_unique, dest_coarse_B2L_maps[i].end()); } cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::computeConsolidatedOffsets(const int my_id, const int my_destination_part, const bool is_root_partition, const int num_interior_rows, const int num_boundary_rows, IVector_h_vector &vertex_counts, const IVector_h &parts_to_consolidate, const int num_parts_to_consolidate, int &interior_offset, int &boundary_offset, int &total_interior_rows_in_merged, int &total_boundary_rows_in_merged, int &total_rows_in_merged, DistributedComms<TConfig> *comms) { IVector_h my_offsets(4); IVector_h my_sizes(2); my_sizes[0] = num_interior_rows; my_sizes[1] = num_boundary_rows; if (!is_root_partition) { //Send number of interior and boundary nodes to root comms->send_vector_async(my_sizes, my_destination_part, 777); comms->recv_vector(my_offsets, my_destination_part, 778); comms->send_vector_wait_all(my_sizes); } else { vertex_counts.resize(num_parts_to_consolidate); IVector_h child_sizes(2); IVector_h offsets_interior(num_parts_to_consolidate); IVector_h offsets_boundary(num_parts_to_consolidate); int count_int = 0; int count_bdy = 0; for (int i = 0; i < num_parts_to_consolidate; i++) { if (parts_to_consolidate[i] == my_id) { child_sizes = my_sizes; } else { comms->recv_vector(child_sizes, parts_to_consolidate[i], 777); } //Do a simple cumulative sum to determine total number of interior/boundary rows and their offsets on a per contributing partition basis offsets_interior[i] = count_int; offsets_boundary[i] = count_bdy; count_int += child_sizes[0]; count_bdy += child_sizes[1]; //Save them vertex_counts[i].resize(2); vertex_counts[i][0] = child_sizes[0]; vertex_counts[i][1] = child_sizes[1]; } for (int i = 0; i < num_parts_to_consolidate; i++) { //Send back to contributing partitions IVector_h offsets_to_send(4); offsets_to_send[0] = offsets_interior[i]; offsets_to_send[1] = offsets_boundary[i]; offsets_to_send[2] = count_int; offsets_to_send[3] = count_bdy; if (parts_to_consolidate[i] == my_id) { my_offsets = offsets_to_send; } else { comms->send_vector(offsets_to_send, parts_to_consolidate[i], 778); // cannot make async, rewriting internal buffer } } } interior_offset = my_offsets[0]; boundary_offset = my_offsets[1] + my_offsets[2] - num_interior_rows; total_interior_rows_in_merged = my_offsets[2]; total_boundary_rows_in_merged = my_offsets[3]; total_rows_in_merged = my_offsets[2] + my_offsets[3]; } template <class TConfig> template <class IVector_hd> void DistributedManagerBase<TConfig>::consB2LmapsOnRoot(int &num_consolidated_neighbors, std::vector<IVector_hd> &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, std::vector<IVector_hd> &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms) { // TODO: it is possible to optimize exchanges, for example fuse recv_vector in recreating coarse neigbours // output: num_consolidated_neighbor, consolidated_B2L_maps, consolidated_coarse_ids // input: dest_coarse_B2L_maps, is_root_partition, my_id, my_destination_part, num_fine_parts_to_consolidate, num_coarse_neighbors, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh if (my_destination_part != my_id) { //if not root, send coarse neighbor list using fine indices and the corresponding boundary lists IVector_h num_coarse_neigh(1); num_coarse_neigh[0] = num_coarse_neighbors; comms->send_vector_async(num_coarse_neigh, my_destination_part, 1111); comms->send_vector_async(coarse_neigh_to_fine_part, my_destination_part, 2222); comms->send_vector_async(num_bdy_per_coarse_neigh, my_destination_part, 3333); for (int i = 0; i < num_coarse_neighbors; i++) { comms->send_vector_async(dest_coarse_B2L_maps[i], my_destination_part, 4444 + i) ; } comms->send_vector_wait_all(num_coarse_neigh); comms->send_vector_wait_all(coarse_neigh_to_fine_part); comms->send_vector_wait_all(num_bdy_per_coarse_neigh); for (int i = 0; i < num_coarse_neighbors; i++) { comms->send_vector_wait_all(dest_coarse_B2L_maps[i]) ; } } if (is_root_partition) { IVector_h num_coarse_ids_from_part(fine_parts_to_consolidate); IVector_h_vector coarse_ids_from_part(num_fine_parts_to_consolidate); IVector_h_vector num_coarse_neigh_bdys_from_part(num_fine_parts_to_consolidate); //If root, receive sizes, and resize receive buffers int total_num_coarse_ids = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; IVector_h temp(1); if (current_part != my_id) { comms->recv_vector(temp, current_part, 1111); } else { temp[0] = num_coarse_neighbors; } num_coarse_ids_from_part[i] = temp[0]; coarse_ids_from_part[i].resize(temp[0]); num_coarse_neigh_bdys_from_part[i].resize(temp[0]); total_num_coarse_ids += temp[0]; } //Create a neighbor list for the consolidated coarse matrix, by merging coarse neighbor lists from partitions that are being merged consolidated_coarse_ids.resize(total_num_coarse_ids); int count = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; // Get from each partition the coarse partition ids in their B2L maps if (current_part != my_id) { comms->recv_vector(coarse_ids_from_part[i], current_part, 2222); comms->recv_vector(num_coarse_neigh_bdys_from_part[i], current_part, 3333); } else { coarse_ids_from_part[i] = coarse_neigh_to_fine_part; num_coarse_neigh_bdys_from_part[i] = num_bdy_per_coarse_neigh; } thrust::copy(coarse_ids_from_part[i].begin(), coarse_ids_from_part[i].end(), consolidated_coarse_ids.begin() + count); count += num_coarse_ids_from_part[i]; } cudaCheckError(); //eliminate duplicates thrust::sort(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end()); cudaCheckError(); consolidated_coarse_ids.erase(thrust::unique(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end()), consolidated_coarse_ids.end()); cudaCheckError(); num_consolidated_neighbors = consolidated_coarse_ids.size(); IVector_h_vector coarse_ids_from_part_to_consolidated_neighbor(num_fine_parts_to_consolidate);; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { coarse_ids_from_part_to_consolidated_neighbor[i].resize(num_coarse_ids_from_part[i]); thrust::lower_bound(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end(), coarse_ids_from_part[i].begin(), coarse_ids_from_part[i].end(), coarse_ids_from_part_to_consolidated_neighbor[i].begin()); } cudaCheckError(); // Map each coarse partition to new coarse ID consolidated_B2L_maps.resize(num_consolidated_neighbors); IVector_h consolidated_B2L_maps_sizes(num_consolidated_neighbors); // Offset in the consolidated_B2L_maps IVector_h_vector coarse_ids_offsets(num_fine_parts_to_consolidate); for (int i = 0; i < num_consolidated_neighbors; i++) { consolidated_B2L_maps_sizes[i] = 0; } // Compute the size of each consolidated_B2L_maps and offsets into it, where we will receive the parts coming from partitions that are getting merged into this one for (int i = 0; i < num_fine_parts_to_consolidate; i++) { coarse_ids_offsets[i].resize(num_coarse_ids_from_part[i]); for (int j = 0; j < num_coarse_ids_from_part[i]; j++) { int coarse_id = coarse_ids_from_part[i][j]; int k = num_coarse_neigh_bdys_from_part[i][j]; coarse_ids_offsets[i][j] = consolidated_B2L_maps_sizes[ coarse_ids_from_part_to_consolidated_neighbor[i][j] ]; consolidated_B2L_maps_sizes[ coarse_ids_from_part_to_consolidated_neighbor[i][j] ] += k; } } for (int i = 0; i < num_consolidated_neighbors; i++) { consolidated_B2L_maps[i].resize(consolidated_B2L_maps_sizes[i]); } // Receive the B2L maps from each child partition, concatenate them (gets sorted outside) for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; for (int j = 0; j < num_coarse_ids_from_part[i]; j++) { int my_coarse_neigh = coarse_ids_from_part_to_consolidated_neighbor[i][j]; int offset = coarse_ids_offsets[i][j]; if (current_part != my_id) { comms->recv_vector( consolidated_B2L_maps[my_coarse_neigh], current_part, 4444 + j, offset, num_coarse_neigh_bdys_from_part[i][j]); //Need to do proper tagging here, otherwise messages from the same source would get mixed up } else { thrust::copy(dest_coarse_B2L_maps[j].begin(), dest_coarse_B2L_maps[j].end(), consolidated_B2L_maps[my_coarse_neigh].begin() + offset); } } } cudaCheckError(); } } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateAndRenumberHalos(IVector_h &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_ids, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms) { consAndRenumberHalos(aggregates, manager_halo_offsets, halo_offsets, neighbors, num_fine_neighbors, consolidated_coarse_ids, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, num_all_aggregates, comms); } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateAndRenumberHalos(IVector_d &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_ids, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms) { consAndRenumberHalos(aggregates, manager_halo_offsets, halo_offsets, neighbors, num_fine_neighbors, consolidated_coarse_ids, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, num_all_aggregates, comms); } template <class TConfig> template <class IVector_hd> void DistributedManagerBase<TConfig>::consAndRenumberHalos(IVector_hd &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_neigh_to_fine_part, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms) { /* * EXAMPLE 2 This example is independent from the previous ones. Say partition 0 and 1 are merging (into 0) partition 0 is neighbors with 1,2,3 and partition 1 is neighbors with 0,3,4 Partitions 3 and 4 are merging (into partition 3) and partition 2 is not merging with anyone. This example details the renumbering of halo indices on partition 0 and partition 1. aggregates on partition 0: [(fine interior nodes) (fine boundary nodes) (fine halo from part 1) (fine halo from part 2) (fine halo from part 3)] [(fine interior nodes) (fine boundary nodes) (13 13 15) (12 15 17) (14 16 18)] aggregates on partition 1: [(fine interior nodes) (fine boundary nodes) (fine halo from part 0) (fine halo from part 3) (fine halo from part 4)] [(fine interior nodes) (fine boundary nodes) (14 16 17) (18 19 19) (15 15 17)] manager_halo_offsets on partition 0: [22 25 28 31] manager_halo_offsets on partition 1: [20 23 26 29] halo_offsets on both partitions are uninitialised: [0 0 0] and [0 0] neighbors on partition 0: [1 2 3] partition 1: [0 3 4] num_fine_neighbors partition 0: 3 partition 1: 3 consolidated_coarse_neigh_to_fine_part partition 0: [2 3] partition 1: [3] num_consolidated_neighbors partition 0: 2 partition 1: 1 destination_part [0 0 2 3 3] my_destination_part partition 0: 0 partition 1: 0 is_root_partition partition 0: true partition 1: false fine_parts_to_consolidate partition 0: [0 1] num_fine_parts_to_consolidate partition 0: 2 num_parts 5 my_id partition 0: 0 partition 1: 1 total_rows_in_merged partition 0 and 1: 24 (=sum of the two below) num_all_aggregates partition partition 0: 13 partition 1: 11 - will be updated with the number of halo aggregates */ // // Step 9.2 - com up with nonmerge lists // int num_fine_nonmerge_neighbors;// = fine_nonmerge_neighbors.size(); //NUmber of neighbors we are not merging with num_fine_nonmerge_neighbors = 0; for (int i = 0 ; i < num_fine_neighbors; i++) { if (destination_part[neighbors[i]] != my_destination_part) { num_fine_nonmerge_neighbors++; } } IVector_h halo_sizes(num_fine_nonmerge_neighbors); IVector_h fine_nonmerge_neigh_to_cons_fine_part(num_fine_nonmerge_neighbors); IVector_h fine_nonmerge_neighbor_to_fine_neighbor(num_fine_nonmerge_neighbors); num_fine_nonmerge_neighbors = 0; for (int i = 0 ; i < num_fine_neighbors; i++) { if (destination_part[neighbors[i]] != my_destination_part) { halo_sizes[num_fine_nonmerge_neighbors] = manager_halo_offsets[i + 1] - manager_halo_offsets[i]; fine_nonmerge_neighbor_to_fine_neighbor[num_fine_nonmerge_neighbors] = i; fine_nonmerge_neigh_to_cons_fine_part[num_fine_nonmerge_neighbors] = destination_part[neighbors[i]]; num_fine_nonmerge_neighbors++; } } /* * EXAMPLE 2 num_fine_nonmerge_neighbors partition 0: 2 partition 1: 2 fine_nonmerge_neighbor_to_fine_neighbor partition 0: [1 2] partition 1: [1 2] fine_nonmerge_neigh_to_cons_fine_part partition 0: [2 3] partition 1: [3 3] halo_sizes partition 0: [3 3] partition 1: [3 3] */ //Send them to root along with the halo parts of the aggregates vector if (!is_root_partition) { IVector_h num_fine_nonmerge_neigh(1); num_fine_nonmerge_neigh[0] = num_fine_nonmerge_neighbors; // TODO: async? might be faster. comms->send_vector(num_fine_nonmerge_neigh, my_destination_part, 1111); comms->send_vector(halo_sizes, my_destination_part, 2222); comms->send_vector(fine_nonmerge_neigh_to_cons_fine_part, my_destination_part, 3333); // Here check l2h_identity flag and act accordingly for (int i = 0; i < num_fine_nonmerge_neighbors; i++) { comms->send_vector_async(aggregates, my_destination_part, 4444 + i, manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[i]], halo_sizes[i]) ; } //comms->send_vector_wait_all(num_fine_nonmerge_neigh); //comms->send_vector_wait_all(halo_sizes); //comms->send_vector_wait_all(fine_nonmerge_neigh_to_cons_fine_part); comms->send_vector_wait_all(aggregates); /* * EXAMPLE 2 Partition 1 sends to partition 0: num_fine_nonmerge_neigh 2 halo_sizes [3 3] fine_nonmerge_neigh_to_cons_fine_part [3 3] for loop: sends two pieces: [(18 19 19)] [(15 15 17)] */ } if (is_root_partition) { // // Step 9.3 Root receives this info, creates metadata // std::vector<VecInt_t> num_fine_nonmerge_neigh_array(num_fine_parts_to_consolidate); IVector_h_vector halo_sizes_array(num_fine_parts_to_consolidate); IVector_h_vector fine_nonmerge_neigh_to_cons_fine_part_array(num_fine_parts_to_consolidate); std::vector<std::vector<IVector> > fine_halo_aggregates_to_root_array(num_fine_parts_to_consolidate); std::vector<VecInt_t> min_index_coarse_halo(num_consolidated_neighbors, 0x7FFFFFFF); std::vector<VecInt_t> max_index_coarse_halo(num_consolidated_neighbors, 0); std::vector<VecInt_t> fine_part_to_consolidated_neighbor(num_parts, -1); for (int i = 0; i < num_consolidated_neighbors; i++) { fine_part_to_consolidated_neighbor[consolidated_coarse_neigh_to_fine_part[i]] = i; } /* * EXAMPLE 2 everything from here on is for partition 0, since that is the root partition fine_part_to_consolidated_neighbor [-1 -1 0 1 -1] */ for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; IVector_h temp(1); if (current_part != my_id) { comms->recv_vector(temp, current_part, 1111); } else { temp[0] = num_fine_nonmerge_neighbors; } num_fine_nonmerge_neigh_array[i] = temp[0]; halo_sizes_array[i].resize(temp[0]); fine_nonmerge_neigh_to_cons_fine_part_array[i].resize(temp[0]); fine_halo_aggregates_to_root_array[i].resize(temp[0]); if (current_part != my_id) { comms->recv_vector(halo_sizes_array[i], current_part, 2222); } else { halo_sizes_array[i] = halo_sizes; } if (current_part != my_id) { comms->recv_vector(fine_nonmerge_neigh_to_cons_fine_part_array[i], current_part, 3333); } else { fine_nonmerge_neigh_to_cons_fine_part_array[i] = fine_nonmerge_neigh_to_cons_fine_part; } //Receive the halo regions for (int j = 0; j < temp[0]; j++) { fine_halo_aggregates_to_root_array[i][j].resize(halo_sizes_array[i][j]); if (current_part != my_id) { comms->recv_vector(fine_halo_aggregates_to_root_array[i][j], current_part, 4444 + j); } else { //HERE thrust::copy(aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[j]], aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[j]] + halo_sizes[j], fine_halo_aggregates_to_root_array[i][j].begin()); //TODO: not do this copying around on the root } #define MIN(a,b) a<b?a:b; #define MAX(a,b) a>b?a:b; //Find minimum and maximum halo indices as not to allocate too much scratch space later int min_index = thrust::reduce(fine_halo_aggregates_to_root_array[i][j].begin(), fine_halo_aggregates_to_root_array[i][j].end(), int(0x7FFFFFFF), thrust::minimum<int>()); int max_index = thrust::reduce(fine_halo_aggregates_to_root_array[i][j].begin(), fine_halo_aggregates_to_root_array[i][j].end(), int(0), thrust::maximum<int>()); min_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]] = MIN((int)min_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]], min_index); max_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]] = MAX((int)max_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]], max_index); } } cudaCheckError(); /* * EXAMPLE 2 num_fine_nonmerge_neigh_array = [2 2] halo_sizes_array = [[3 3][3 3]] fine_nonmerge_neigh_to_cons_fine_part_array[][] = [[2 3][3 3]] fine_halo_aggregates_to_root_array[from][to][fine halo vertex] [[[12 15 17][14 16 18]] [[18 19 19][15 15 17]]] min_index_coarse_halo[12 14] max_index_coarse_halo[17 19] */ halo_offsets[0] = total_rows_in_merged; //Now we have all the halo nodes, let's renumber them. int min_index = thrust::reduce(min_index_coarse_halo.begin(), min_index_coarse_halo.end(), int(0x7FFFFFFF), thrust::minimum<int>()); int max_index = thrust::reduce(max_index_coarse_halo.begin(), max_index_coarse_halo.end(), int(0), thrust::maximum<int>()); cudaCheckError(); // // Step 9.4 compute halo indices on root nodes // int scratch_size; if (num_consolidated_neighbors == 0) { scratch_size = 1; } else { scratch_size = max_index - min_index + 2; } IVector scratch(scratch_size); for (int i = 0; i < num_consolidated_neighbors; i++) { thrust::fill(scratch.begin(), scratch.end(), 0); int dest_part = consolidated_coarse_neigh_to_fine_part[i]; //Flag halo indices that occur for a specific coarse neighbor for (int j = 0; j < num_fine_parts_to_consolidate; j++) { for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++) { if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part) { int size = halo_sizes_array[j][k]; this->flag_halo_ids(size, scratch, fine_halo_aggregates_to_root_array[j][k], min_index_coarse_halo[i], max_index, min_index); } } } thrust::exclusive_scan(scratch.begin(), scratch.end(), scratch.begin(), halo_offsets[i]); //renumber them with the proper offset into our halo halo_offsets[i + 1] = scratch[scratch.size() - 1]; //now read them back for (int j = 0; j < num_fine_parts_to_consolidate; j++) { for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++) { if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part) { int size = halo_sizes_array[j][k]; int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); this->read_halo_ids(size, scratch, fine_halo_aggregates_to_root_array[j][k], min_index_coarse_halo[i]); //and send them back to contributing partitions hipDeviceSynchronize(); //TODO: don't need to synchronize when using GPUDirect int current_part = fine_parts_to_consolidate[j]; int tag = 4444 + dest_part; if (current_part != my_id) { comms->send_vector_async(fine_halo_aggregates_to_root_array[j][k], current_part, tag); //!!!!: we are sending them back not in sequential order, need tags!!!! } else { thrust::copy(fine_halo_aggregates_to_root_array[j][k].begin(), fine_halo_aggregates_to_root_array[j][k].end(), aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[k]]); } } } } /* * EXAMPLE 2 the array that is sent back in pieces: fine_halo_aggregates_to_root_array[from][to][fine halo vertex] [[[24 25 26][27 29 31]] [[31 32 32][28 28 30]]] halo_offsets = [24 27 33] */ } // Loop over consolidated neighbors cudaCheckError(); // Wait for sends to have completed (this is to prevent fine_halo_aggregates_to_root_array to be destroyed before send has finished) for (int i = 0; i < num_consolidated_neighbors; i++) { int dest_part = consolidated_coarse_neigh_to_fine_part[i]; for (int j = 0; j < num_fine_parts_to_consolidate; j++) { for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++) { if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part) { int current_part = fine_parts_to_consolidate[j]; if (current_part != my_id) { comms->send_vector_wait_all(fine_halo_aggregates_to_root_array[j][k]); } } } } } // Loop over consolidated neighbors //Send total number of rows in the aggregated matrix for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; IVector_h total_rows(1); total_rows[0] = halo_offsets[num_consolidated_neighbors]; if (current_part != my_id) { comms->send_vector(total_rows, current_part, 5555); } else { num_all_aggregates = total_rows[0]; } } } // If is root partition if (!is_root_partition) { for (int i = 0; i < num_fine_nonmerge_neighbors; i++) { int tag = 4444 + fine_nonmerge_neigh_to_cons_fine_part[i]; comms->recv_vector(aggregates, my_destination_part, tag, manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[i]], halo_sizes[i]); } IVector_h total_rows(1); comms->recv_vector(total_rows, my_destination_part, 5555); num_all_aggregates = total_rows[0]; } /* * EXAMPLE 2 num_all_aggregates = 33 (both partitions 0 and 1 */ } template <class TConfig> void DistributedManagerBase<TConfig>::ipcExchangePtr(void *&ptr, bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_root_partition, int my_id, DistributedComms<TConfig> *comms) { hipIpcMemHandle_t handle; if (is_root_partition) { hipIpcGetMemHandle( (hipIpcMemHandle_t *) &handle, ptr ) ; for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; if (current_part != my_id) { comms->send_raw_data(&handle, sizeof(handle), current_part, 456); } } } else { comms->recv_raw_data(&handle, sizeof(handle), my_root_partition, 456); hipError_t err = hipIpcOpenMemHandle( (void **) &ptr, handle, hipIpcMemLazyEnablePeerAccess); } } template <class TConfig> void DistributedManagerBase<TConfig>::ipcWaitForChildren(bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_destination_part, int my_id, DistributedComms<TConfig> *comms) { hipEvent_t event; hipIpcEventHandle_t event_handle; hipEventCreate(&event, hipEventDisableTiming | hipEventInterprocess); hipIpcGetEventHandle( &event_handle, event); // Each rank record the event hipEventRecord(event); if (is_root_partition) { std::vector<hipEvent_t> child_events(num_parts_to_consolidate); std::vector<hipIpcEventHandle_t> child_event_handles(num_parts_to_consolidate); // Root partition receives event_handles from child and stores in child_event_handles for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; if (current_part != my_id) { comms->recv_raw_data(&(child_event_handles[i]), sizeof(hipIpcEventHandle_t), current_part, 987 + current_part); hipIpcOpenEventHandle(&child_events[i], child_event_handles[i]); } } for (int i = 0; i < num_parts_to_consolidate; i++) { if (parts_to_consolidate[i] != my_id) { hipEventSynchronize(child_events[i]); } } } else { comms->send_raw_data(&event_handle, sizeof(hipIpcEventHandle_t), my_destination_part, 987 + my_id); } } template <class TConfig> void DistributedManagerBase<TConfig>::ipcWaitForRoot(bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_destination_part, int my_id, DistributedComms<TConfig> *comms) { hipEvent_t event; hipIpcEventHandle_t event_handle; hipEventCreate(&event, hipEventDisableTiming | hipEventInterprocess); if (is_root_partition) { hipIpcGetEventHandle( &event_handle, event); // Root records the event hipEventRecord(event); // Root partition sends event_handles to child for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; if (current_part != my_id) { comms->send_raw_data(&event_handle, sizeof(event_handle), current_part, 988 + current_part); } } } else { comms->recv_raw_data(&event_handle, sizeof(event_handle), my_destination_part, 988 + my_id); hipIpcOpenEventHandle(&event, event_handle); hipEventSynchronize(event); } } template <class TConfig> void DistributedManagerBase<TConfig>::read_halo_ids(int size, IVector_d &scratch, IVector_d &halo_aggregates, VecInt_t min_index_coarse_halo) { int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); hipLaunchKernelGGL(( read_halo_ids_kernel) , dim3(num_blocks), dim3(block_size), 0, 0, scratch.raw(), halo_aggregates.raw(), min_index_coarse_halo, size); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::read_halo_ids(int size, IVector_h &scratch, IVector_h &halo_aggregates, VecInt_t min_index_coarse_halo) { FatalError("read_halo_ids not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED); } template <class TConfig> void DistributedManagerBase<TConfig>::flag_halo_ids(int size, IVector_d &scratch, IVector_d &halo_aggregates, VecInt_t min_index_coarse_halo, int max_index, int min_index) { int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); hipLaunchKernelGGL(( flag_halo_ids_kernel) , dim3(num_blocks), dim3(block_size), 0, 0, scratch.raw(), halo_aggregates.raw(), min_index_coarse_halo, size, max_index - min_index + 1); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::flag_halo_ids(int size, IVector_h &scratch, IVector_h &halo_aggregates, VecInt_t min_index_coarse_halo, int max_index, int min_index) { FatalError("flag_halo_ids not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::consolidateAndUploadAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &A) { FatalError("Fine level consolidation not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED); } template<class TConfig> void DistributedManagerBase<TConfig>::exchangeSolveResultsConsolidation(int &num_iters, std::vector<PODVector_h> &res_history, AMGX_STATUS &status, bool store_res_history) { int bsize = this->A->get_block_size(); PODVector_h res_history_tmp; if (!m_is_fine_level_consolidated) { return; } else { int my_id = this->getFineLevelComms()->get_global_id(); IVector_h my_num_iters(1); if (m_is_fine_level_root_partition) { my_num_iters[0] = num_iters; if (store_res_history) { // Pack the res_history vector into array res_history_tmp.resize( (num_iters + 1)*bsize); for (int i = 0; i < num_iters + 1; i++) { for (int j = 0; j < bsize; j++) { res_history_tmp[i * bsize + j] = res_history[i][j]; } } } for (int i = 0; i < m_num_fine_level_parts_to_consolidate; i++) { int current_part = m_fine_level_parts_to_consolidate[i]; if (my_id != current_part) { getFineLevelComms()->send_vector_async(my_num_iters, current_part, 245); if (store_res_history) { getFineLevelComms()->send_vector_async(res_history_tmp, current_part, 246); } } } for (int i = 0; i < m_num_fine_level_parts_to_consolidate; i++) { int current_part = m_fine_level_parts_to_consolidate[i]; if (my_id != current_part) { getFineLevelComms()->send_raw_data(&status, sizeof(status), current_part, 247); } } getFineLevelComms()->send_vector_wait_all(my_num_iters); if (store_res_history) { getFineLevelComms()->send_vector_wait_all(res_history_tmp); } } else { // Store num_iters getFineLevelComms()->recv_vector(my_num_iters, m_my_fine_level_destination_part, 245); num_iters = my_num_iters[0]; if (store_res_history) { // Fill res_history vector res_history.resize(num_iters + 1); res_history_tmp.resize( (num_iters + 1)*bsize); getFineLevelComms()->recv_vector(res_history_tmp, m_my_fine_level_destination_part, 246); for (int i = 0; i < num_iters + 1; i++) { res_history[i].resize(bsize); for (int j = 0; j < bsize; j++) { res_history[i][j] = res_history_tmp[i * bsize + j]; } } } getFineLevelComms()->recv_raw_data(&status, sizeof(status), m_my_fine_level_destination_part, 247); } } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::consolidateAndUploadAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A) { this->A = &in_A; this->createComms(this->A->getResources()); //refresh comms DistributedComms<TConfig> *comms = this->getComms(); int my_id = comms->get_global_id(); int num_parts = comms->get_num_partitions(); int num_rings = this->num_halo_rings(); int num_neighbors = this->neighbors.size(); // All partitions have to call this, otherwise it fails // Step 1: Figure out which partition should be consolidated together based on their host_name and their PCI-E slot ID IVector_h destination_part(num_parts); this->computeDestinationPartitionsWithCons(my_id, num_parts, destination_part, comms); int my_destination_part = destination_part[my_id]; // Check if I'm root partition and how many msgs I will receive bool is_root_partition = false; int num_parts_to_consolidate = 0; for (int i = 0; i < num_parts; i++) { if (destination_part[i] == my_id) { is_root_partition = true; num_parts_to_consolidate++; } } if (my_destination_part >= num_parts) { FatalError("During consolidation, sending data to partition that doesn't exist", AMGX_ERR_NOT_IMPLEMENTED); } // Create cons_part_to_part map IVector_h cons_part_to_part = destination_part; thrust::sort(cons_part_to_part.begin(), cons_part_to_part.end()); cudaCheckError(); cons_part_to_part.erase(thrust::unique(cons_part_to_part.begin(), cons_part_to_part.end()), cons_part_to_part.end()); cudaCheckError(); int num_cons_partitions = cons_part_to_part.size(); // If number of consolidated partitions is the same as number of partitions, simply call uploadAll if (num_cons_partitions == num_parts) { this->initializeUploadReorderAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A)); this->m_is_fine_level_consolidated = false; return; } if (is_root_partition) { this->A->getResources()->expandRootPool(); } this->m_is_fine_level_consolidated = true; if (num_rings != 1) { FatalError("num_rings > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED); } // Fill with b2l_maps IVector_h_vector B2L_maps_tmp; B2L_maps_tmp.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { B2L_maps_tmp[i] = this->cached_B2L_maps[i]; } bool useCudaIpc = this->m_use_cuda_ipc_consolidation; mat_value_type *data_hd = NULL; mat_value_type *diag_hd = NULL; int *col_indices_hd = NULL; int data_alloc = 0; int diag_alloc = 0; int col_alloc = 0; col_indices_hd = (int *) this->getDevicePointerForData((void *)col_indices, nnz * block_dimx * block_dimy * sizeof(int), &col_alloc); data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data, nnz * block_dimx * block_dimy * sizeof(mat_value_type), &data_alloc); if (diag != NULL) { diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag, nnz * block_dimx * block_dimy * sizeof(mat_value_type), &diag_alloc); } // Copy the original row_offsets array (this is required when replacing coefficients this->m_old_row_offsets_CONS.resize(n + 1); hipMemcpy(this->m_old_row_offsets_CONS.raw(), row_ptrs, (n + 1)*sizeof(int), hipMemcpyDefault); cudaCheckError(); this->m_old_nnz_CONS = nnz; // This function: // Creates fine level consolidated matrices // Modifies the btl_maps, lth_maps // Create part_to_cons_part map IVector_h part_to_cons_part(num_parts); thrust::lower_bound(cons_part_to_part.begin(), cons_part_to_part.end(), destination_part.begin(), destination_part.end(), part_to_cons_part.begin()); cudaCheckError(); IVector_h neigh_to_part; this->createNeighToDestPartMap(neigh_to_part, this->neighbors, destination_part, num_neighbors); IVector_h cons_neigh_to_part; int num_cons_neighbors; this->createConsolidatedNeighToPartMap(cons_neigh_to_part, neigh_to_part, my_destination_part, destination_part, num_cons_neighbors); IVector_h neigh_to_cons_neigh; this->createNeighToConsNeigh( neigh_to_cons_neigh, cons_neigh_to_part, neigh_to_part, my_destination_part, num_neighbors); // --------------------------------------- // MERGE B2L MAPS BASED ON DEST PARTITION // --------------------------------------- IVector_h_vector dest_B2L_maps; this->consolidateB2Lmaps(dest_B2L_maps, B2L_maps_tmp, neigh_to_cons_neigh, num_cons_neighbors, num_neighbors); // ------------------------------------ // Renumber interior and boundary rows // ------------------------------------ int num_interior_rows; int num_boundary_rows; IVector_h renumbering; this->createAggregatesRenumbering(renumbering, dest_B2L_maps, n, num_cons_neighbors, num_interior_rows, num_boundary_rows, num_rings); // -------------------------------------------------- // Create list of destination parts to consolidate // -------------------------------------------------- // Store whether or not this is a root partition on fine level IVector_h parts_to_consolidate; parts_to_consolidate.resize(num_parts_to_consolidate); int count = 0; for (int i = 0; i < num_parts; i++) { if (destination_part[i] == my_id) { parts_to_consolidate[count] = i; count++; } } // --------------------------------------------------------------------- // Each partition computes its offset for its interior and boundary nodes // --------------------------------------------------------------------- IVector_h_vector vertex_counts; int interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged; int total_rows_in_merged; this->computeConsolidatedOffsets(my_id, my_destination_part, is_root_partition, num_interior_rows, num_boundary_rows, vertex_counts, parts_to_consolidate, num_parts_to_consolidate, interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged, total_rows_in_merged, comms); // ----------------------------------- // Each partition renumber it's rows // ----------------------------------- int total_num_halos = 0; // Pack new bdy_ids for (int i = 0; i < num_neighbors; i++) { total_num_halos += this->cached_L2H_maps[i].size(); } IVector_h row_ids(n + total_num_halos, -1); this->m_row_ids_CONS.resize(n + total_num_halos); // Renumber the interior and boundary rows for (int i = 0; i < n; i++) { int new_id; if (renumbering.size() == 0) { new_id = i; } else { new_id = renumbering[i]; } new_id += ((new_id >= num_interior_rows) ? boundary_offset : interior_offset); row_ids[i] = new_id; } for (int i = 0; i < num_cons_neighbors; i++) { thrust::transform(dest_B2L_maps[i].begin(), dest_B2L_maps[i].end(), thrust::constant_iterator<index_type>(boundary_offset), dest_B2L_maps[i].begin(), thrust::plus<index_type>()); } cudaCheckError(); // ------------------------------------------------- // Send dest_B2L_maps to root partitions // ------------------------------------------------ IVector_h num_bdy_per_cons_neigh(num_cons_neighbors); for (int i = 0; i < num_cons_neighbors; i++) { num_bdy_per_cons_neigh[i] = dest_B2L_maps[i].size(); } IVector_h root_cons_neighbors; int root_num_cons_neighbors = 0; IVector_h_vector cons_B2L_maps; this->consolidateB2LmapsOnRoot(root_num_cons_neighbors, cons_B2L_maps, root_cons_neighbors, dest_B2L_maps, cons_neigh_to_part, num_bdy_per_cons_neigh, parts_to_consolidate, num_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_cons_neighbors, comms); IVector_h halo_ids_offsets(num_neighbors + 1); IVector_h halo_ids; int halo_ids_size = 0; halo_ids_offsets[0] = 0; for (int i = 0; i < num_neighbors; i++) { halo_ids_size += this->cached_L2H_maps[i].size(); halo_ids_offsets[i + 1] = halo_ids_size; } halo_ids.resize(halo_ids_size); // Do exchange with neighbors // Pack new bdy_ids IVector_h_vector bdy_ids; bdy_ids.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { int size = this->cached_B2L_maps[i].size(); bdy_ids[i].resize(size); // Pack buffer for (int j = 0; j < size; j++) { bdy_ids[i][j] = row_ids[this->cached_B2L_maps[i][j]]; } } for (int i = 0; i < num_neighbors; i++) { comms->send_vector_async(bdy_ids[i], this->neighbors[i], 6666 + this->neighbors[i]); } for (int i = 0; i < num_neighbors; i++) { comms->recv_vector(halo_ids, this->neighbors[i], 6666 + my_id, halo_ids_offsets[i], this->cached_L2H_maps[i].size()); } for (int i = 0; i < num_neighbors; i++) { comms->send_vector_wait_all(bdy_ids[i]); } IVector_h halo_offsets(root_num_cons_neighbors + 1, 0); int root_num_rows; this->consolidateAndRenumberHalos(halo_ids, halo_ids_offsets, halo_offsets, this->neighbors, num_neighbors, root_cons_neighbors, root_num_cons_neighbors, destination_part, my_destination_part, is_root_partition, parts_to_consolidate, num_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, root_num_rows, comms); if (is_root_partition) { this->B2L_maps.resize(cons_B2L_maps.size()); for (int i = 0; i < cons_B2L_maps.size(); i++) { thrust::sort(cons_B2L_maps[i].begin(), cons_B2L_maps[i].end()); this->B2L_maps[i].copy(cons_B2L_maps[i]); // H2D copy of B2L maps } cudaCheckError(); } // Now renumber the row_ids based on lth_maps count = 0; for (int i = 0; i < num_neighbors; i++) { for (int j = 0; j < this->cached_L2H_maps[i].size(); j++) { row_ids[this->cached_L2H_maps[i][j]] = halo_ids[count]; count++; } } hipMemcpy(this->m_row_ids_CONS.raw(), row_ids.raw(), (n + total_num_halos)*sizeof(int), hipMemcpyDefault); cudaCheckError(); int bsize = block_dimx * block_dimy; if (is_root_partition) { this->A->row_offsets.resize(root_num_rows + 1); } void *root_row_ptr = (void *) this->A->row_offsets.raw(); if (useCudaIpc) { // ---------------------------------------------------- // 1. cudaIPC to get pointer to root's row_offset array // ---------------------------------------------------- this->ipcExchangePtr(root_row_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); cudaCheckError(); // ------------------------------------------------------------------- // 2. each rank copy it's row length on root partition using row_ids // ------------------------------------------------------------------- int cta_size = 128; int grid_size = min(4096, (n + total_num_halos + cta_size - 1) / cta_size); hipLaunchKernelGGL(( zero_copy_row_lengths_ids_offsets<mat_value_type>) , dim3(grid_size), dim3(cta_size), 0, 0, this->m_old_row_offsets_CONS.raw(), ((int *) root_row_ptr) /* IPC */, this->m_row_ids_CONS.raw(), n, total_num_halos, (mat_value_type *) diag); cudaCheckError(); // Root partition waits for children to be done writing their result this->ipcWaitForChildren(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); cudaCheckError(); } else // CudaIpcNotAvailable { this->checkPinnedBuffer( max( nnz * sizeof(mat_value_type), (n + 1)*max(sizeof(index_type), sizeof(value_type)) ) ); if (!is_root_partition) { IVector_h data_to_send(3); data_to_send[0] = n; data_to_send[1] = nnz; data_to_send[2] = total_num_halos; int dummy; void *row_ptrs_to_send = this->getHostPointerForData((void *)row_ptrs, sizeof(index_type) * (n + 1), &dummy); comms->send_vector(data_to_send, my_destination_part, 10000 + my_id); comms->send_raw_data(row_ptrs_to_send, (n + 1)*sizeof(int), my_destination_part, 10001 + my_id); comms->send_raw_data(&row_ids[0], (n + total_num_halos)*sizeof(int), my_destination_part, 10002 + my_id); } else { hipEvent_t event; hipEventCreate(&event); //TODO: Could use streams here //TODO: Avoid extra device to host copies std::vector<IVector_h> data_recv(num_parts_to_consolidate); for (int i = 0; i < num_parts_to_consolidate; i++) { data_recv[i].resize(3); int current_part = parts_to_consolidate[i]; if (current_part != my_id) { comms->recv_vector(data_recv[i], current_part, 10000 + current_part); } else { data_recv[i][0] = n; data_recv[i][1] = nnz; data_recv[i][2] = total_num_halos; } } this->m_child_n.resize(num_parts_to_consolidate); this->m_child_nnz.resize(num_parts_to_consolidate); this->m_child_num_halos.resize(num_parts_to_consolidate); this->m_child_row_ids.resize(num_parts_to_consolidate); this->m_child_old_row_offsets.resize(num_parts_to_consolidate); int max_n = 0; int max_nnz = 0; for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; this->m_child_n[i] = data_recv[i][0]; this->m_child_nnz[i] = data_recv[i][1]; this->m_child_num_halos[i] = data_recv[i][2]; if (this->m_child_n[i] > max_n) { max_n = this->m_child_n[i]; } if (this->m_child_nnz[i] > max_nnz) { max_nnz = this->m_child_nnz[i]; } this->m_child_row_ids[i].resize(this->m_child_n[i] + this->m_child_num_halos[i]); this->m_child_old_row_offsets[i].resize(this->m_child_n[i] + 1); } this->m_child_max_n = max_n; this->m_child_max_nnz = max_nnz; for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; int cta_size = 128; int grid_size = min(4096, (this->m_child_n[i] + this->m_child_num_halos[i] + cta_size - 1) / cta_size); if (current_part != my_id) { comms->recv_vector(this->m_child_old_row_offsets[i], current_part, 10001 + current_part, 0, this->m_child_n[i] + 1); comms->recv_vector(this->m_child_row_ids[i], current_part, 10002 + current_part, 0, this->m_child_n[i] + this->m_child_num_halos[i]); hipLaunchKernelGGL(( zero_copy_row_lengths_ids_offsets<mat_value_type>) , dim3(grid_size), dim3(cta_size), 0, 0, this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), this->m_child_row_ids[i].raw(), this->m_child_n[i], this->m_child_num_halos[i], (mat_value_type *) diag); // Wait for kernel to finish before overwriting host buffer hipEventRecord(event); hipEventSynchronize(event); } else { hipLaunchKernelGGL(( zero_copy_row_lengths_ids_offsets<mat_value_type>) , dim3(grid_size), dim3(cta_size), 0, 0, this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), this->m_row_ids_CONS.raw(), n, total_num_halos, (mat_value_type *) diag); hipEventRecord(event); hipEventSynchronize(event); } } cudaCheckError(); hipEventDestroy(event); } // If root partition //TODO: is this necessary comms->barrier(); } //3. root does a exclusive_scan if (is_root_partition) { hipEvent_t event; hipEventCreate(&event); // Populate the halo rows with diagonal, increase the length of the halo rows thrust::fill(this->A->row_offsets.begin() + halo_offsets[0], this->A->row_offsets.begin() + halo_offsets[root_num_cons_neighbors], 1); thrust::exclusive_scan(this->A->row_offsets.begin(), this->A->row_offsets.end(), this->A->row_offsets.begin()); hipEventRecord(event); hipEventSynchronize(event); cudaCheckError(); this->A->set_initialized(0); this->A->delProps(DIAG); // We always insert the diagonal this->A->delProps(COO); // No COO this->A->setColsReorderedByColor(false); // Cols not reordered by color int nnz = this->A->row_offsets[root_num_rows]; // This is a device to host copy this->A->resize(root_num_rows, root_num_rows, nnz, block_dimx, block_dimy); this->A->set_num_nz(nnz); // num_nz doesn't include halo rows //this->A->set_initialized(1); hipEventDestroy(event); } else { this->A->set_initialized(0); this->A->resize( 0, 0, 0, block_dimx, block_dimy ); this->A->delProps(DIAG); // We always insert the diagonal this->A->delProps(COO); // No COO this->A->setColsReorderedByColor(false); // Cols not reordered by color //this->A->set_initialized(1); } if (useCudaIpc) { // ---------------------------------------------- // 4. Do ipc consolidation of values and columns // ---------------------------------------------- // Child partition waits for parent to create row_offsets this->ipcWaitForRoot(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); void *root_col_ptr = (void *) this->A->col_indices.raw(); void *root_val_ptr = (void *) this->A->values.raw(); this->ipcExchangePtr(root_col_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); this->ipcExchangePtr(root_val_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); int cta_size2 = 128; int grid_size2 = min(4096, (n + cta_size2 - 1) / cta_size2); hipLaunchKernelGGL(( ipc_consolidation_upload_matrix<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, n, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), ( (int *) root_row_ptr ) /*IPC*/, col_indices_hd, ( (int *) root_col_ptr) /*IPC*/, data_hd, ( (mat_value_type *) root_val_ptr ) /*IPC*/, diag_hd, bsize); cudaCheckError(); // Root partition waits for children to upload their matrices this->ipcWaitForChildren(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); cudaCheckError(); // Child partitions close their mem handle (they are done upload data) if (!is_root_partition) { hipIpcCloseMemHandle(root_row_ptr); hipIpcCloseMemHandle(root_val_ptr); hipIpcCloseMemHandle(root_col_ptr); } } else // If cudaIpcNotAvailable { if (!is_root_partition) { int dummy; void *col_indices_to_send = this->getHostPointerForData((void *)col_indices, sizeof(index_type) * nnz, &dummy); comms->send_raw_data(col_indices_to_send, nnz * sizeof(int), my_destination_part, 10000 + my_id); void *data_to_send = this->getHostPointerForData((void *)data, sizeof(mat_value_type) * nnz, &dummy); comms->send_raw_data(data_to_send, nnz * bsize * sizeof(mat_value_type), my_destination_part, 10001 + my_id); if (diag != NULL) { void *diag_to_send = this->getHostPointerForData((void *)diag, sizeof(mat_value_type) * n, &dummy); comms->send_raw_data(diag_to_send, n * bsize * sizeof(mat_value_type), my_destination_part, 10002 + my_id); } } else { hipEvent_t event; hipEventCreate(&event); //TODO: Could use streams here int *child_col_indices; mat_value_type *child_data; mat_value_type *child_diag = NULL; hipHostMalloc( (void **) &child_col_indices, this->m_child_max_nnz * sizeof(int), hipHostMallocMapped); hipHostMalloc( (void **) &child_data, this->m_child_max_nnz * bsize * sizeof(mat_value_type), hipHostMallocMapped); if (diag != NULL) { hipHostMalloc( (void **) &child_diag, (this->m_child_max_n)*bsize * sizeof(mat_value_type), hipHostMallocMapped); } for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; int cta_size2 = 128; int grid_size2 = min(4096, (this->m_child_n[i] + cta_size2 - 1) / cta_size2); if (current_part != my_id) { comms->recv_raw_data(child_col_indices, this->m_child_nnz[i]*sizeof(int), current_part, 10000 + current_part); comms->recv_raw_data(child_data, this->m_child_nnz[i]*bsize * sizeof(mat_value_type), current_part, 10001 + current_part); if (diag != NULL) { comms->recv_raw_data(child_diag, this->m_child_n[i]*bsize * sizeof(mat_value_type), current_part, 10002 + current_part); } int *child_col_indices_hd; mat_value_type *child_data_hd; mat_value_type *child_diag_hd = NULL; hipHostGetDevicePointer(&child_col_indices_hd, child_col_indices, 0); hipHostGetDevicePointer(&child_data_hd, child_data, 0); if (diag != NULL) { hipHostGetDevicePointer(&child_diag_hd, child_diag, 0); } hipLaunchKernelGGL(( ipc_consolidation_upload_matrix<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, this->m_child_n[i], this->m_child_row_ids[i].raw(), this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), child_col_indices_hd, this->A->col_indices.raw(), child_data_hd, this->A->values.raw(), child_diag_hd, bsize); // Wait for kernel to finish before overwriting host buffer hipEventRecord(event); hipEventSynchronize(event); } else { hipLaunchKernelGGL(( ipc_consolidation_upload_matrix<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, n, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), col_indices_hd, this->A->col_indices.raw(), data_hd, this->A->values.raw(), diag_hd, bsize); hipEventRecord(event); hipEventSynchronize(event); } } cudaCheckError(); hipEventDestroy(event); hipHostFree(child_col_indices); hipHostFree(child_data); if (diag != NULL) { hipHostFree(child_diag); } } // If root partition //TODO: is this necessary comms->barrier(); } // Store the original fine level communicator this->m_is_fine_level_root_partition = is_root_partition; this->m_my_fine_level_destination_part = my_destination_part; // Create a clone of the original communicator this->m_fine_level_comms = comms; //this->_comms is the same pointer that this->m_fine_level_comms right now, so we can overwrite this->_comms, but make sure that we release m_fine_level_cons this->_comms = this->m_fine_level_comms->CloneSubComm(cons_part_to_part, is_root_partition); // this->_comms will be empty comm for non-root partition and new comm for root ranks only if root partition this->m_fine_level_id = my_id; if (is_root_partition) { int cta_size = 128; int grid_size3 = min(4096, ( (root_num_rows - halo_offsets[0]) + cta_size - 1) / cta_size); if (grid_size3 != 0) { hipLaunchKernelGGL(( set_halo_cols_values) , dim3(grid_size3), dim3(cta_size), 0, 0, this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), halo_offsets[0], root_num_rows, bsize); cudaCheckError(); } int my_cons_id = part_to_cons_part[my_id]; this->_global_id = my_cons_id; this->_num_interior_nodes = total_interior_rows_in_merged; this->_num_boundary_nodes = total_boundary_rows_in_merged; for (int i = 0; i < root_num_cons_neighbors; i++) { root_cons_neighbors[i] = part_to_cons_part[root_cons_neighbors[i]]; } this->_comms->set_neighbors(root_num_cons_neighbors); this->neighbors = root_cons_neighbors; this->halo_offsets = halo_offsets; // H2D copy of halo offsets this->m_num_fine_level_parts_to_consolidate = num_parts_to_consolidate; this->m_fine_level_parts_to_consolidate = parts_to_consolidate; this->set_num_halo_rings(num_rings); this->set_num_halo_rows(halo_offsets[root_num_cons_neighbors] - halo_offsets[0]); // B2L_maps has already been copied this->B2L_rings.resize(root_num_cons_neighbors); for (int i = 0; i < root_num_cons_neighbors; i++) { this->B2L_rings[i].resize(2); this->B2L_rings[i][0] = 0; this->B2L_rings[i][1] = cons_B2L_maps[i].size(); } this->set_initialized(this->A->row_offsets); this->A->set_initialized(0); this->A->delProps(DIAG); this->A->diag.resize(root_num_rows); this->A->computeDiagonal(); // this->A->setView(OWNED); hipEventCreate(&(this->comm_event)); this->A->set_initialized(1); } else { this->neighbors.resize(0); this->halo_offsets.resize(0); } /* free memory (if needed) */ if (col_alloc) { hipFree(col_indices_hd); } if (data_alloc) { hipFree(data_hd); } if (diag_alloc) { hipFree(diag_hd); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsNoCons(int n, int nnz, const mat_value_type *data_pinned, const mat_value_type *diag_pinned) { //matrix parameters //int num_nnz = this->A->get_num_nz(); int num_rows = this->halo_offsets[0]; int total_rows = num_rows + this->num_halo_rows(); int block_size = this->A->get_block_size(); mat_value_type *data_hd = NULL; mat_value_type *diag_hd = NULL; int data_alloc = 0; int diag_alloc = 0; //cuda parameters int num_blocks = min(4096, (num_rows + 127) / 128); /* WARNING: the number of non-zero elements (nnz) in the array data_pinned and A->values (num_nnz) might be different at this point. 1. If the matrix has CSR property and therefore diagonal is included in the matrix this values will be the same. 2. If the matrix has DIAG property and therefore diagonal is originally stored separately, and later appended to the array of values, and subsequently inserted into the matrix than num_nnz = nnz + n. We have to account for this fact when replacing the coefficients (and use nnz not num_nnz). obs.: see calls to computeDiagonal (matrix.cu), AMGX_matrix_upload and AMGX_replace_coefficients (amgx_c.cu), and uploadMatrix and replaceMatrixCoefficients[No|With]Cons (distributed_manager.cu) for details. */ /* check early exit */ if ((this->neighbors.size() == 0 || this->renumbering.size() == 0) && !this->m_is_fine_level_glued) { return; } cudaCheckError(); /* allocate if data and diag if they are not pinned */ data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data_pinned, nnz * block_size * sizeof(mat_value_type), &data_alloc); if (diag_pinned != NULL) { diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag_pinned, num_rows * block_size * sizeof(mat_value_type), &diag_alloc); } /* replace the values (reordering them if needed) */ if (insertDiagonals && diag_pinned != NULL) { hipLaunchKernelGGL(( replace_values_matrix <32>) , dim3(num_blocks), dim3(512), 0, 0, data_hd, diag_hd, this->old_row_offsets.raw(), this->A->row_offsets.raw(), this->A->values.raw(), this->renumbering.raw(), block_size, num_rows); } else { hipLaunchKernelGGL(( replace_values_matrix <32>) , dim3(num_blocks), dim3(512), 0, 0, data_hd, this->old_row_offsets.raw(), this->A->row_offsets.raw(), this->A->values.raw(), this->renumbering.raw(), block_size, num_rows); if (diag_pinned != NULL) { hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, this->A->values.raw() + this->A->row_offsets[total_rows]*block_size, diag_hd, this->renumbering.raw(), block_size, num_rows); } } cudaCheckError(); /* free memory (if needed) */ if (data_alloc) { hipFree(data_hd); } if (diag_alloc) { hipFree(diag_hd); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsWithCons(int n, int nnz, const mat_value_type *data_pinned, const mat_value_type *diag_pinned) { //matrix parameters //int num_nnz = this->A->get_num_nz(); /* WARNING: in consolidation, for non-root partitions, halo_offsets might be NULL due to the call halo_offsets.resize(0); at the end of the routine uploadMatrix->consolidateAndUploadAll. We should use the parameter n instead this->halo_offsets[0] for num_rows. */ int num_rows = n; int block_size = this->A->get_block_size(); mat_value_type *data_hd = NULL; mat_value_type *diag_hd = NULL; int data_alloc = 0; int diag_alloc = 0; data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data_pinned, nnz * block_size * sizeof(mat_value_type), &data_alloc); if (diag_pinned != NULL) { diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag_pinned, num_rows * block_size * sizeof(mat_value_type), &diag_alloc); } bool useCudaIpc = this->m_use_cuda_ipc_consolidation; if (useCudaIpc) { // Child partitions wait for root to be done this->ipcWaitForRoot(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); void *root_row_ptr = (void *) this->A->row_offsets.raw(); void *root_val_ptr = (void *) this->A->values.raw(); this->ipcExchangePtr(root_row_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); this->ipcExchangePtr(root_val_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); // replace the values, insert the diagonal int ncons = this->m_old_row_offsets_CONS.size() - 1; int cta_size = 128; int grid_size2 = min(4096, (ncons + cta_size - 1) / cta_size); hipLaunchKernelGGL(( ipc_consolidation_replace_values<mat_value_type>) , dim3(grid_size2), dim3(cta_size), 0, 0, ncons, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), ( (int *) root_row_ptr )/*IPC*/, data_hd, ( (mat_value_type *) root_val_ptr )/*IPC*/, diag_hd, this->A->get_block_size() ); cudaCheckError(); // Root partition wait for child to be done replacing their values this->ipcWaitForChildren(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); if (!this->m_is_fine_level_root_partition) { hipIpcCloseMemHandle(root_row_ptr); hipIpcCloseMemHandle(root_val_ptr); } } else // cudaIpcNotAvailable { if (this->m_is_fine_level_consolidated) // aggregation { int bsize = this->A->get_block_size(); int ncons = this->m_old_row_offsets_CONS.size() - 1; if (!this->m_is_fine_level_root_partition) { int dummy; int nnzcons = this->m_old_nnz_CONS; void *data_to_send = this->getHostPointerForData((void *)data_pinned, nnzcons * bsize * sizeof(mat_value_type), &dummy); this->getFineLevelComms()->send_raw_data(data_to_send, nnzcons * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10001 + this->fine_level_id()); if (diag_pinned != NULL) { void *diag_to_send = this->getHostPointerForData((void *)diag_pinned, ncons * bsize * sizeof(mat_value_type), &dummy); this->getFineLevelComms()->send_raw_data(diag_to_send, ncons * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10002 + this->fine_level_id()); } } else { hipEvent_t event; hipEventCreate(&event); //TODO: Could use streams here mat_value_type *child_data; mat_value_type *child_diag = NULL; hipHostMalloc( (void **) &child_data, this->m_child_max_nnz * bsize * sizeof(mat_value_type), hipHostMallocMapped); if (diag_pinned != NULL) { hipHostMalloc( (void **) &child_diag, (this->m_child_max_n)*bsize * sizeof(mat_value_type), hipHostMallocMapped); } for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; int cta_size2 = 128; int grid_size2 = min(4096, (this->m_child_n[i] + cta_size2 - 1) / cta_size2); if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_raw_data(child_data, this->m_child_nnz[i]*bsize * sizeof(mat_value_type), current_part, 10001 + current_part); if (diag_pinned != NULL) { this->getFineLevelComms()->recv_raw_data(child_diag, this->m_child_n[i]*bsize * sizeof(mat_value_type), current_part, 10002 + current_part); } mat_value_type *child_data_hd; mat_value_type *child_diag_hd = NULL; hipHostGetDevicePointer(&child_data_hd, child_data, 0); if (diag_pinned != NULL) { hipHostGetDevicePointer(&child_diag_hd, child_diag, 0); } hipLaunchKernelGGL(( ipc_consolidation_replace_values<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, this->m_child_n[i], this->m_child_row_ids[i].raw(), this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), child_data_hd, this->A->values.raw(), child_diag_hd, bsize); // Wait for kernel to finish before overwriting host buffer hipEventRecord(event); hipEventSynchronize(event); } else { hipLaunchKernelGGL(( ipc_consolidation_replace_values<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, ncons, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), data_hd, this->A->values.raw(), diag_hd, bsize); //hipEventRecord(event); //hipEventSynchronize(event); } } cudaCheckError(); hipEventDestroy(event); hipHostFree(child_data); if (diag_pinned != NULL) { hipHostFree(child_diag); } } // If root partition //TODO: is this necessary this->getFineLevelComms()->barrier(); } //agg else if (this->m_is_fine_level_glued) // classical { int bsize = this->A->get_block_size(); int ncons = this->m_old_row_offsets_CONS.size() - 1; IVector_h nnz_off; nnz_off.resize(this->getConsolidationArrayOffsets().size()); IVector_h nnz_array; nnz_array.resize(this->getConsolidationArrayOffsets().size() - 1); this->getFineLevelComms()->all_gather( nnz, nnz_array, this->getConsolidationArrayOffsets().size() - 1); nnz_off[0] = 0; for (int i = 0; i < nnz_array.size(); i++) { nnz_off[i + 1] = nnz_off[i] + nnz_array[i]; } if (!this->m_is_fine_level_root_partition) { int dummy; void *data_to_send = this->getHostPointerForData((void *)data_pinned, nnz * bsize * sizeof(mat_value_type), &dummy); this->getFineLevelComms()->send_raw_data( data_to_send, nnz * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10001 + this->fine_level_id()); if (diag_pinned != NULL) { void *diag_to_send = this->getHostPointerForData((void *)diag_pinned, n * sizeof(mat_value_type), &dummy); this->getFineLevelComms()->send_raw_data( diag_to_send, n * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10002 + this->fine_level_id()); //diag.resize(0); cudaCheckError(); } //values.resize(0); cudaCheckError(); } else { //TODO: Could use streams here mat_value_type *child_data; mat_value_type *child_diag = NULL; // Assumes partions have been glued already this->A->getNnzForView(OWNED, &nnz); hipHostMalloc( (void **) &child_data, nnz * bsize * sizeof(mat_value_type), hipHostMallocMapped); if (diag_pinned != NULL) { hipHostMalloc( (void **) &child_diag, this->halo_offsets[this->neighbors.size()]*bsize * sizeof(mat_value_type), hipHostMallocMapped); } // roots copy their data memcpy ( &child_data[0], data_pinned, nnz_array[this->fine_level_id()]*sizeof(value_type)); if (diag_pinned != NULL) { memcpy ( &child_diag[0], diag_pinned, n * sizeof(value_type)); } for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; int current_offset = nnz_off[current_part] - nnz_off[this->fine_level_id()] ; int current_nnz = nnz_array[current_part]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_raw_data( &child_data[current_offset], current_nnz * bsize * sizeof(mat_value_type), current_part, 10001 + current_part); if (diag_pinned != NULL) this->getFineLevelComms()->recv_raw_data( &child_diag[this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()]], (this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part])*bsize * sizeof(mat_value_type), current_part, 10002 + current_part); } } cudaCheckError(); // we can follow the usual upload path for raw data now // Assumes partions have been glued already int os; this->A->getOffsetAndSizeForView(OWNED, &os, &n); replaceMatrixCoefficientsNoCons( n, nnz, child_data, child_diag); cudaCheckError(); hipHostFree(child_data); if (diag_pinned != NULL) { hipHostFree(child_diag); } } // If root partition //TODO: is this necessary this->getFineLevelComms()->barrier(); } // cla } // not ipc this->A->setView(OWNED); /* free memory (if needed) */ if (data_alloc) { hipFree(data_hd); } if (diag_alloc) { hipFree(diag_hd); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVector(VVector_v &v, const void *data, int n, int block_dim) { if (this->isFineLevelConsolidated() || (this->isFineLevelGlued() && !this->isGlued())) { transformAndUploadVectorWithCons(v, data, n, block_dim); } else { v.resize(n * block_dim); cudaCheckError(); // Upload on host hipMemcpy(v.raw(), (value_type *)data, n * block_dim * sizeof(value_type), hipMemcpyDefault); cudaCheckError(); // Permute based on renumbering vector transformVector(v); int tag = 0; // Exchange halos this->exchange_halo(v, tag); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVectorWithCons(VVector_v &v, const void *data_pinned, int n, int block_dim) { if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } bool useCudaIpc = this->m_use_cuda_ipc_consolidation; this->getFineLevelComms()->barrier(); void *root_temp_ptr = NULL; VVector_v temp; if (this->m_is_fine_level_root_partition && !this->m_is_fine_level_glued ) { temp.resize(this->halo_offsets[this->neighbors.size()]*v.get_block_size(), types::util<value_type>::get_zero()); temp.set_block_dimx(v.get_block_dimx()); temp.set_block_dimy(v.get_block_dimy()); root_temp_ptr = (void *) temp.raw(); } cudaCheckError(); int data_alloc = 0; value_type *data_hd = NULL; if (!this->m_is_fine_level_glued ) { data_hd = (value_type *) this->getDevicePointerForData((void *)data_pinned, n * block_dim * sizeof(value_type), &data_alloc); } if (useCudaIpc) { // Do IPC this->ipcExchangePtr(root_temp_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); int num_blocks = min(4096, (n + 511) / 512); hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, (value_type *) root_temp_ptr, data_hd, this->m_row_ids_CONS.raw(), v.get_block_size(), n); // Root partition waits for children to be done this->ipcWaitForChildren(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); if (!this->m_is_fine_level_root_partition) { hipIpcCloseMemHandle(root_temp_ptr); } } else // If cudaIpcNotAvail { if (this->m_is_fine_level_consolidated) // aggregation { // Exchange the vector between root and child if (!this->m_is_fine_level_root_partition) { IVector_h size(1); size[0] = n; this->getFineLevelComms()->send_vector(size, this->m_my_fine_level_destination_part, 20000 + this->fine_level_id()); int dummy; void *data_to_send = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy); this->getFineLevelComms()->send_raw_data(data_to_send, n * v.get_block_size()*sizeof(value_type), this->m_my_fine_level_destination_part, 20001 + this->fine_level_id()); } else { hipEvent_t event; hipEventCreate(&event); IVector_h child_n(this->m_num_fine_level_parts_to_consolidate); int max_n = 0; for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_vector(child_n, current_part, 20000 + current_part, i, 1); } else { child_n[i] = n; } if (child_n[i] > max_n) { max_n = child_n[i]; } } value_type *child_data; hipHostMalloc( (void **) &child_data, max_n * v.get_block_size()*sizeof(value_type), hipHostMallocMapped); value_type *child_data_hd; hipHostGetDevicePointer(&child_data_hd, child_data, 0); for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; int num_blocks = min(4096, (child_n[i] + 511) / 512); if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_raw_data(&child_data[0], child_n[i]*v.get_block_size()*sizeof(value_type), current_part, 20001 + current_part); hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, (value_type *) root_temp_ptr, child_data_hd, this->m_child_row_ids[i].raw(), v.get_block_size(), child_n[i]); hipEventRecord(event); hipEventSynchronize(event); cudaCheckError(); } else { hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, (value_type *) root_temp_ptr, data_hd, this->m_row_ids_CONS.raw(), v.get_block_size(), n); } } // Loop over parts to consolidate cudaCheckError(); hipEventDestroy(event); hipHostFree(child_data); } // If root partition } //agg else if (this->m_is_fine_level_glued) // cla { value_type *child_data = NULL; if (!this->m_is_fine_level_root_partition) { int dummy; void *data_to_send = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy); this->getFineLevelComms()->send_raw_data( data_to_send, n * v.get_block_size()*sizeof(value_type), this->m_my_fine_level_destination_part, 20001 + this->fine_level_id()); //v.resize(0); // just in case something resized it betwen iterations cudaCheckError(); } else { hipHostMalloc( (void **) &child_data, this->halo_offsets[this->neighbors.size()]*v.get_block_size()*sizeof(value_type), hipHostMallocMapped); value_type *child_data_hd; hipHostGetDevicePointer(&child_data_hd, child_data, 0); // roots copy their data int dummy; void *my_data = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy); memcpy ( &child_data[0], data_pinned, n * v.get_block_size()*sizeof(value_type)); // Loop over parts to consolidate for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_raw_data( &child_data[this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()]], sizeof(value_type) * (this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part]), current_part, 20001 + current_part ); } } // usual path // Upload on host hipMemcpy(v.raw(), (value_type *)child_data, v.size()* sizeof(value_type), hipMemcpyDefault); cudaCheckError(); } // If root partition // Permute based on renumbering vector transformVector(v); cudaCheckError(); // Exchange halos int tag = 0; this->exchange_halo(v, tag); cudaCheckError(); v.set_unconsolidated_size(n); // free host if (child_data) { hipHostFree(child_data); } cudaCheckError(); } //cla } // If cudaIpcAvailable if (!this->m_is_fine_level_glued) // not needed for classcical { if (this->m_is_fine_level_root_partition) { v.swap(temp); int tag = 0; // Root partitions do the exchange this->exchange_halo(v, tag); } v.set_unconsolidated_size(n * v.get_block_size()); v.set_transformed(); } /* free memory (if needed) */ if (data_alloc) { hipFree(data_hd); } cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformVector(VVector_v &v) { if (this->neighbors.size() == 0) { return; } else if (this->renumbering.size() == 0) { v.resize(this->halo_offsets[this->neighbors.size()]*v.get_block_size()); return; } if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } if (v.size() < this->halo_offsets[this->neighbors.size()]*v.get_block_size()) { VVector_v temp(this->halo_offsets[this->neighbors.size()]*v.get_block_size(), types::util<value_type>::get_zero()); temp.set_block_dimx(v.get_block_dimx()); temp.set_block_dimy(v.get_block_dimy()); if (v.size() < this->halo_offsets[0]*this->A->get_block_dimx()) { FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED); } //reorder based on row permutation int size = this->halo_offsets[0]; int num_blocks = min(4096, (size + 511) / 512); hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size); v.swap(temp); } else { VVector_v temp(this->halo_offsets[0]*v.get_block_size()); int size = this->halo_offsets[0]; int num_blocks = min(4096, (size + 511) / 512); hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size); thrust::copy(temp.begin(), temp.end(), v.begin()); } cudaCheckError(); v.set_transformed(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v) { if (this->neighbors.size() == 0 || this->renumbering.size() == 0) { return; } if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } VVector_v temp(this->halo_offsets[0]*this->A->get_block_dimx()); if (v.size() < this->halo_offsets[0]*v.get_block_size()) { FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED); } //reorder based on row permutation int size = this->halo_offsets[0]; int num_blocks = min(4096, (size + 511) / 512); hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size); //reorder_vector_values<<<num_blocks, 512>>>(temp.raw(), v.raw(), this->inverse_renumbering.raw(), v.get_block_size(), size); cudaCheckError(); v.resize(this->halo_offsets[0]*this->A->get_block_dimx()); thrust::copy(temp.begin(), temp.end(), v.begin()); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::computeDestinationPartitions(INDEX_TYPE upper_threshold, float avg_size, const int num_parts, int &new_num_parts, bool &wantNeighbors) { m_destination_partitions.resize(num_parts); std::vector<int> dp(num_parts); if (avg_size < 1.f) { avg_size = 1.f; } // avoid floating point exception int wanted_num_fine_parts_to_consolidate = ( upper_threshold + (int) avg_size - 1) / ( (int) avg_size ); new_num_parts = (num_parts + wanted_num_fine_parts_to_consolidate - 1) / wanted_num_fine_parts_to_consolidate; for (int i = 0; i < num_parts; i++) { dp[i] = i % new_num_parts; } // example wantNeighbors = true -> destination_part = [0 0 0 0 4 4 4 4 8 8 8 8] // example wantNeighbors = false -> destination_part = [0 1 2 3 0 1 2 3 0 1 2 3] if (wantNeighbors) { std::sort (dp.begin(), dp.end()); m_destination_partitions[0] = 0; for (int i = 1; i < num_parts; i++) { if (dp[i - 1] < dp[i]) { m_destination_partitions[i] = i; } else { m_destination_partitions[i] = m_destination_partitions[i - 1]; } } } m_my_destination_part = m_destination_partitions[global_id()]; } template <class TConfig> void DistributedManagerBase<TConfig>::computeDestinationPartitionsWithCons(int my_id, int num_parts, IVector_h &destination_part, DistributedComms<TConfig> *comms) { int device_id = this->A->getResources()->getDevice(0); std::string my_hostname_tmp; comms->get_hostname(my_hostname_tmp); // Append PCI-E ID to string hipDeviceProp_t dev_prop; hipGetDeviceProperties(&dev_prop, device_id); std::stringstream s; s << my_hostname_tmp << "_" << dev_prop.pciBusID << "_" << dev_prop.pciDeviceID; std::string my_hostname(s.str()); std::vector<std::string> hostnames; comms->exchange_hostnames(my_hostname, hostnames, num_parts); std::vector<std::string>::iterator low = std::find( hostnames.begin(), hostnames.end(), my_hostname ); int my_destination_part = low - hostnames.begin(); // Do a gather into destination_part comms->all_gather(my_destination_part, destination_part, num_parts); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVector(VVector_v &v_in, const void *data, int n, int block_dimy) { if (this->isFineLevelConsolidated() || this->isFineLevelGlued()) { revertAndDownloadVectorWithCons(v_in, data, n, block_dimy); } else { if ( n == 0 ) { FatalError("Cannot download if size = 0", AMGX_ERR_NOT_IMPLEMENTED); } if (data == NULL ) { FatalError("Cannot download to a NULL pointer", AMGX_ERR_NOT_IMPLEMENTED); } if (v_in.size() == 0 ) { FatalError("Cannot download an empty vector", AMGX_ERR_NOT_IMPLEMENTED); } VVector_v v_out; revertVector(v_in, v_out); hipMemcpy((value_type *)data, v_out.raw(), n * block_dimy * sizeof(value_type), hipMemcpyDefault); cudaCheckError(); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v_in, VVector_v &v_out) { if (!this->isFineLevelGlued() && this->neighbors.size() == 0 || this->renumbering.size() == 0) { return;} if (v_in.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } if (v_in.size() < this->halo_offsets[0]*v_in.get_block_size()) { FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED); } int size = this->halo_offsets[0]; if (v_out.size() != size * this->A->get_block_dimx()) { v_out.resize(size * this->A->get_block_dimx()); } //reorder based on row permutation int num_blocks = min(4096, (size + 511) / 512); hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, v_out.raw(), v_in.raw(), this->renumbering.raw(), v_in.get_block_size(), size); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVectorWithCons(VVector_v &v_in, const void *data_pinned, int n, int block_dimy) { if (v_in.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } void *root_v_ptr = NULL; int size = v_in.get_unconsolidated_size(); int num_rows = size / v_in.get_block_size(); if (this->m_is_fine_level_root_partition) { root_v_ptr = (void *) v_in.raw(); } VVector_v temp; temp.set_block_dimx(v_in.get_block_dimx()); temp.set_block_dimy(v_in.get_block_dimy()); temp.resize(size); bool useCudaIpc = this->m_use_cuda_ipc_consolidation; if (useCudaIpc) { // Do IPC this->ipcExchangePtr(root_v_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); // Children partition waits for parent to be done updating vector this->ipcWaitForRoot(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); //reorder based on row permutation int num_blocks = min(4096, (num_rows + 511) / 512); hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), (value_type *) root_v_ptr, this->m_row_ids_CONS.raw(), v_in.get_block_size(), num_rows); cudaCheckError(); if (!this->m_is_fine_level_root_partition) { hipIpcCloseMemHandle(root_v_ptr); } } else { if (this->m_is_fine_level_consolidated) // aggregation { if (this->m_is_fine_level_root_partition) { IVector_h child_n(this->m_num_fine_level_parts_to_consolidate); int max_n = 0; for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_vector(child_n, current_part, 30000 + current_part, i, 1); } else { child_n[i] = num_rows; } if (child_n[i] > max_n) { max_n = child_n[i]; } } // Resize temp vector VVector_v child_temp;; child_temp.resize(max_n * v_in.get_block_size()); for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; // Pack the vector to be sent int num_blocks = min(4096, (child_n[i] + 511) / 512); if (current_part != this->fine_level_id()) { hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, child_temp.raw(), (value_type *) root_v_ptr, this->m_child_row_ids[i].raw(), v_in.get_block_size(), child_n[i]); this->getFineLevelComms()->send_vector(child_temp, current_part, 30001 + current_part, 0, child_n[i]*v_in.get_block_size()); } else { hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), (value_type *) root_v_ptr, this->m_row_ids_CONS.raw(), v_in.get_block_size(), child_n[i]); } } cudaCheckError(); } else { IVector_h size(1); size[0] = num_rows; this->getFineLevelComms()->send_vector(size, this->m_my_fine_level_destination_part, 30000 + this->fine_level_id()); this->getFineLevelComms()->recv_vector(temp, this->m_my_fine_level_destination_part, 30001 + this->fine_level_id()); } } else if (this->m_is_fine_level_glued) // classical { if (this->m_is_fine_level_root_partition) { temp.resize(v_in.size()); revertVector(v_in, temp); cudaCheckError(); for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->send_vector( temp, current_part, current_part + 30001, this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()], this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part] ); cudaCheckError(); } } } else { this->getFineLevelComms()->recv_vector(temp, this->m_my_fine_level_destination_part, 30001 + this->fine_level_id()); cudaCheckError(); } temp.resize(this->getConsolidationArrayOffsets()[this->fine_level_id() + 1] - this->getConsolidationArrayOffsets()[this->fine_level_id()]); cudaCheckError(); } } // Copy on host hipMemcpy((value_type *)data_pinned, temp.raw(), temp.size() * sizeof(value_type), hipMemcpyDefault); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVector(VVector_v &v, const void *data, int n, int block_dim) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformVector(VVector_v &v) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVectorWithCons(VVector_v &v, const void *data, int n, int block_dim) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVector(VVector_v &v, const void *data, int n, int block_dim) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v_in, VVector_v &v_out) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVectorWithCons(VVector_v &v_in, const void *data, int n, int block_dim) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix() { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix_owned() { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::obtain_shift_l2g_reordering(index_type n, I64Vector_h &l2g, IVector_h &p, IVector_h &q) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::unpack_partition(index_type *Bp, index_type *Bc, mat_value_type *Bv) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::generatePoisson7pt(int nx, int ny, int nz, int P, int Q, int R) { FatalError("GeneratePoisson7pt only implemented on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrix( int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const MatrixDistribution &dist) { FatalError("loadDistributedMatrix only implemented on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingB2Lmaps() { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingHaloRows() { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsNoCons(int n, int nnz, const mat_value_type *data, const mat_value_type *diag) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsWithCons(int n, int nnz, const mat_value_type *data, const mat_value_type *diag) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createRenumbering(IVector &renumbering) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template < class TConfig > void DistributedManagerBase<TConfig>::remove_boundary(IVector_h &flagArray, IVector_h &B2L_map, int size) { for (int i = 0; i < size; i++) { flagArray[B2L_map[i]] = 0; } } template < class TConfig > void DistributedManagerBase<TConfig>::get_unassigned(IVector_h &flagArray, IVector_h &B2L_map, IVector_h &partition_flags, int size, int fa_size/*, int rank*/) { for (int i = 0; i < size; i++) { if (B2L_map[i] < fa_size) { if (flagArray[B2L_map[i]] == 0) { flagArray[B2L_map[i]] = 1; partition_flags[i] = 1; } } } } template < class TConfig > void DistributedManagerBase<TConfig>::set_unassigned(IVector_h &partition_flags, IVector_h &partition_renum, IVector_h &B2L_map, IVector_h &renumbering, int size, int max_element, int renum_size/*, int rank*/) { for (int i = 0; i < size; i++) { if (B2L_map[i] < renum_size) { if (partition_flags[i] == 1) { renumbering[B2L_map[i]] = max_element + partition_renum[i]; } B2L_map[i] = renumbering[B2L_map[i]]; } } } /* print manager for target rank to a file or stdout */ template<class TConfig> void DistributedManagerBase<TConfig>::print(char *f, char *s, int trank) { DistributedManagerBase<TConfig> *m = this; int rank = 0; int level = 0; char filename[1024]; FILE *fid = NULL; int i, j, k, t1, t2; #ifdef AMGX_WITH_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); #endif //check target rank if (rank == trank) { //check whether to output to stdout or a file if (f == NULL) { fid = stdout; } else { level = m->A->amg_level_index; #ifdef _WIN32 _snprintf_s(filename, 1024, 1024, "%s_r%d_l%d.mtx", f, rank, level); #else snprintf(filename, 1024, "%s_r%d_l%d.mtx", f, rank, level); #endif fid = fopen(filename, "w"); } hipDeviceSynchronize(); cudaCheckError(); fprintf(fid, "%s\n", s); //--- communication info --- //compare neighbors t1 = m->neighbors.size(); fprintf(fid, "neighbors %d\n", t1); for (i = 0; i < t1; i++) { k = m->neighbors[i]; fprintf(fid, "%d\n", k); } //compare B2L_rings t1 = B2L_rings.size(); fprintf(fid, "B2L_rings %d\n", t1); for (i = 0; i < t1; i++) { t2 = m->B2L_rings[i].size(); fprintf(fid, "B2L_rings-%d [%d]\n", i, t2); for (j = 0; j < t2; j++) { k = m->B2L_rings[i][j]; fprintf(fid, "%d\n", k); } } //compare B2L_maps t1 = B2L_maps.size(); fprintf(fid, "B2L_maps %d\n", t1); for (i = 0; i < t1; i++) { t2 = m->B2L_maps[i].size(); fprintf(fid, "B2L_maps-%d [%d]\n", i, t2); for (j = 0; j < t2; j++) { k = m->B2L_maps[i][j]; fprintf(fid, "%d\n", k); } } //compare L2H_maps t1 = L2H_maps.size(); fprintf(fid, "L2H_maps %d\n", t1); for (i = 0; i < t1; i++) { t2 = m->L2H_maps[i].size(); fprintf(fid, "L2H_maps-%d [%d]\n", i, t2); for (j = 0; j < t2; j++) { k = m->L2H_maps[i][j]; fprintf(fid, "%d\n", k); } } //--- matrix info --- fprintf(fid, "num_rows_global=%ld\n", num_rows_global); fprintf(fid, "_num_rows_interior=%d\n", m->_num_rows_interior); fprintf(fid, "_num_rows_owned=%d\n", m->_num_rows_owned); fprintf(fid, "_num_rows_full=%d\n", m->_num_rows_full); fprintf(fid, "_num_rows_all=%d\n", m->_num_rows_all); fprintf(fid, "_num_nz_interior=%d\n", m->_num_nz_interior); fprintf(fid, "_num_nz_owned=%d\n", m->_num_nz_owned); fprintf(fid, "_num_nz_full=%d\n", m->_num_nz_full); fprintf(fid, "_num_nz_all=%d\n", m->_num_nz_all); //compare # halo rows and halo offsets fprintf(fid, "# halo rings %d and rows %d\n", m->num_halo_rings(), m->num_halo_rows()); t1 = m->halo_offsets.size(); fprintf(fid, "halo_offsets %d\n", t1); for (i = 0; i < t1; i++) { k = m->halo_offsets[i]; fprintf(fid, "%d\n", k); } //compare halo ranges t1 = m->halo_ranges.size(); fprintf(fid, "halo_ranges %d\n", t1); for (i = 0; i < t1; i++) { k = m->halo_ranges[i]; fprintf(fid, "%d\n", k); } //compare halo ranges (host) t1 = m->halo_ranges_h.size(); fprintf(fid, "halo_ranges_h %d\n", t1); for (i = 0; i < t1; i++) { k = m->halo_ranges_h[i]; fprintf(fid, "%d\n", k); } //compare part offsets t1 = m->part_offsets.size(); fprintf(fid, "part_offsets %d\n", t1); for (i = 0; i < t1; i++) { k = m->part_offsets[i]; fprintf(fid, "%d\n", k); } //compare part offsets (host) t1 = m->part_offsets_h.size(); fprintf(fid, "part_offsets_h %d\n", t1); for (i = 0; i < t1; i++) { k = m->part_offsets_h[i]; fprintf(fid, "%d\n", k); } //compare interior row list t1 = m->interior_rows_list.size(); fprintf(fid, "interior_rows_list %d\n", t1); for (i = 0; i < t1; i++) { k = m->interior_rows_list[i]; fprintf(fid, "%d\n", k); } //compare boundary row list t1 = m->boundary_rows_list.size(); fprintf(fid, "boundary_rows_list %d\n", t1); for (i = 0; i < t1; i++) { k = m->boundary_rows_list[i]; fprintf(fid, "%d\n", k); } //compare halo1 row list t1 = m->halo1_rows_list.size(); fprintf(fid, "halo1_rows_list %d\n", t1); for (i = 0; i < t1; i++) { k = m->halo1_rows_list[i]; fprintf(fid, "%d\n", k); } fprintf(fid, "pointers halo_rows=%p and halo_btl=%p\n", m->halo_rows, m->halo_btl); //--- packing info --- //compare local to global map t1 = m->local_to_global_map.size(); fprintf(fid, "local_to_global_map %d\n", t1); for (i = 0; i < t1; i++) { k = m->local_to_global_map[i]; fprintf(fid, "%d\n", k); } //compare renumbering t1 = m->renumbering.size(); fprintf(fid, "renumbering %d\n", t1); for (i = 0; i < t1; i++) { k = m->renumbering[i]; fprintf(fid, "%d\n", k); } //compare inverse renumbering t1 = m->inverse_renumbering.size(); fprintf(fid, "inverse_renumbering %d\n", t1); for (i = 0; i < t1; i++) { k = m->inverse_renumbering[i]; fprintf(fid, "%d\n", k); } //--- GPU related and miscellaneous info //streams fprintf(fid, "streams i=%p, b=%p\n", m->get_int_stream(), m->get_bdy_stream()); //miscellaneous info int64_t bi = m->base_index(); //inlined function int np = m->get_num_partitions(); //inlined function int rp = (int)m->isRootPartition(); //cast from boolean to int fprintf(fid, "gid=%d,bi=%ld,np=%d,rp=%d,ir=%d,in=%d,bn=%d\n", m->global_id(), bi, np, rp, m->index_range(), m->num_interior_nodes(), m->num_boundary_nodes()); hipDeviceSynchronize(); hipGetLastError(); if (fid != stdout) { fclose(fid); } } } /* print manager for target rank to a file or stdout (for all ranks) */ template<class TConfig> void DistributedManagerBase<TConfig>::printToFile(char *f, char *s) { DistributedManagerBase<TConfig> *m = this; int rank = 0; #ifdef AMGX_WITH_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); #endif //notice that print will be called with different (target) rank on different ranks/processes m->print(f, s, rank); } /* compare two managers */ template<class TConfig> int DistributedManagerBase<TConfig>::compare(DistributedManagerBase<TConfig> *m2) { DistributedManagerBase<TConfig> *m1 = this; int i, j, t1, t2; //compare neighbors t1 = m1->neighbors.size(); t2 = m2->neighbors.size(); if (t1 != t2) { return 1; } for (i = 0; i < t1; i++) { if (m1->neighbors[i] != m2->neighbors[i]) { return 2; } } //compare B2L_rings for (i = 0; i < (m1->neighbors.size()); i++) { t1 = m1->B2L_rings[i].size(); t2 = m2->B2L_rings[i].size(); if (t1 != t2) { return 3; } for (j = 0; j < t1; j++) { if (m1->B2L_rings[i][j] != m2->B2L_rings[i][j]) { return 4; } } } //compare B2L_maps t1 = m1->B2L_maps.size(); t2 = m2->B2L_maps.size(); if (t1 != t2) { return 5; } for (i = 0; i < t1; i++) { if (m1->B2L_maps[i] != m2->B2L_maps[i]) { return 6; } } //compare L2H_maps t1 = m1->L2H_maps.size(); t2 = m2->L2H_maps.size(); if (t1 != t2) { return 7; } for (i = 0; i < t1; i++) { if (m1->L2H_maps[i] != m2->L2H_maps[i]) { return 8; } } return 0; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> DistributedManager< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::~DistributedManager< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >() { } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> DistributedManager< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::~DistributedManager< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >() { } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateB2Lmaps(IVector_h_vector &dest_coarse_B2L_maps, IVector_h_vector &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors) { consB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors); } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateB2Lmaps(IVector_d_vector &dest_coarse_B2L_maps, IVector_d_vector &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors) { consB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors); } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateB2LmapsOnRoot(int &num_consolidated_neighbors, IVector_h_vector &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, IVector_h_vector &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms) { consB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_ids, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, comms); } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateB2LmapsOnRoot(int &num_consolidated_neighbors, IVector_d_vector &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, IVector_d_vector &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms) { consB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_ids, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, comms); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class DistributedManager<TemplateMode<CASE>::Type >; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template void DistributedManager<TemplateMode<CASE>::Type>::loadDistributedMatrix( \ int, int, const int, const int, const int*, const int *col_indices, const mat_value_type*, int, int, const void*, const MatrixDistribution &dist); \ template void DistributedManager<TemplateMode<CASE>::Type>::loadDistributedMatrix( \ int, int, const int, const int, const int*, const int64_t *col_indices, const mat_value_type*, int, int, const void*, const MatrixDistribution &dist); AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class DistributedManagerBase<TemplateMode<CASE>::Type >; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace amgx
f0e0a2807a6d8405edb80faf830e671af3a98b4e.cu
/* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <distributed/distributed_manager.h> #include <distributed/comms_mpi_gpudirect.h> #include <distributed/comms_mpi_hostbuffer_stream.h> #include <distributed/comms_visitors.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <thrust/unique.h> #include <thrust/binary_search.h> #include <thrust/iterator/constant_iterator.h> #include <basic_types.h> #include <error.h> #include <util.h> #include <types.h> #include <iostream> #include <sstream> #include <fstream> #include <assert.h> #include "cuda_runtime.h" #include "reorder_partition.h" #include "amgx_types/util.h" #include <algorithm> #include <iostream> //debug only: struct is_my_part : public thrust::unary_function<int, bool> { const int _my_part; is_my_part(int my_part) : _my_part(my_part) { } __host__ __device__ bool operator()(const int part) { return (part == _my_part); } }; using namespace std; namespace amgx { static int insertDiagonals = 1; template <typename index_type> static __device__ __forceinline__ index_type internal_index(index_type i, index_type j, index_type k, index_type nx, index_type ny, index_type nz) { return k * (nx * ny) + j * nx + i; } template <typename index_type> static __device__ __forceinline__ int64_t get_global_offset(index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows) { int rank_id = r * (P * Q) + q * P + p; return ((int64_t) rank_id) * ((int64_t) num_rows); } template <typename index_type> __global__ void poisson7pt_count_row_len(index_type *row_len, index_type nx, index_type ny, index_type nz, index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows) { for (int tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < num_rows ; tidx += blockDim.x * gridDim.x) { /* compute p,q,r from P,Q,R and myid */ int i = tidx % nx; // Position in x direction int j = (( tidx - i) / nx) % ny; // Position in y int k = ( tidx - i - nx * j) / ( nx * ny ); // Position in z int substract = ((i == 0) && (p == 0)); substract += ((i == nx - 1) && (p == P - 1)); substract += ((j == 0) && (q == 0)); substract += ((j == ny - 1) && (q == Q - 1)); substract += ((k == 0) && (r == 0)); substract += ((k == nz - 1) && (r == R - 1)); // Store 7 in position (num_rows+1), such that row_len[num_rows+1] = 0 //substract = (tidx == num_rows+1) ? 7 : substract; row_len[tidx] = 7 - substract; } } template <typename index_type, typename mat_value_type> __global__ void poisson7pt_set_col_values(const index_type *__restrict__ row_offsets, index_type *__restrict__ col_indices, mat_value_type *__restrict__ values, index_type *__restrict__ diag, int64_t *__restrict__ local_to_global, index_type nx, index_type ny, index_type nz, index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows) { for (int row = threadIdx.x + blockIdx.x * blockDim.x; row < num_rows ; row += blockDim.x * gridDim.x) { /* compute p,q,r from P,Q,R and myid */ int i = row % nx; // Position in x direction int j = (( row - i) / nx) % ny; // Position in y int k = ( row - i - nx * j) / ( nx * ny ); // Position in z int halo_offset = num_rows; int pos = row_offsets[row]; // Diagonal element diag[row] = pos; col_indices[pos] = row; values[pos++] = types::util<mat_value_type>::get_one() * 6.; // ---------------------------- // Neighbor at position i-1 // ---------------------------- if (i) { // Has a i-1 neighbor, which is an internal node at position (i-1,j,k) col_indices[pos] = internal_index(i - 1, j, k, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else if (p) { // Has a i-1 neighbor, which is a halo node int halo_index = halo_offset + k * ny + j; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p - 1, q, r, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(nx - 1, j, k, nx, ny, nz); } if (p) { halo_offset += ny * nz; } // ---------------------------- // Neighbor at position i+1 // ---------------------------- if (i < nx - 1) { // Has i+1 neighbor, which is an internal node at position (i+1,j,k) col_indices[pos] = internal_index(i + 1, j, k, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else { if (p < P - 1) { // Has i+1 neighbor, which is a halo node int halo_index = halo_offset + k * ny + j; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p + 1, q, r, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(0, j, k, nx, ny, nz); } } if (p < P - 1) { halo_offset += ny * nz; } // ---------------------------- // Neighbor at position j-1 // ---------------------------- if (j) { // Has a j-1 neighbor, which is an internal node at position (i,j-1,k) col_indices[pos] = internal_index(i, j - 1, k, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else if (q) { // Has a j-1 neighbor, which is a halo node int halo_index = halo_offset + k * nx + i; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p, q - 1, r, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(i, ny - 1, k, nx, ny, nz); } if (q) { halo_offset += nx * nz; } // ---------------------------- // Neighbor at position j+1 // ---------------------------- if (j < ny - 1) { // Has a j+1 neighbor, which is an internal node at position (i,j+1,k) col_indices[pos] = internal_index(i, j + 1, k, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else { if (q < Q - 1) { // Has a j+1 neighbor, which is a halo node int halo_index = halo_offset + k * nx + i; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p, q + 1, r, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(i, 0, k, nx, ny, nz); } } if (q < Q - 1) { halo_offset += nx * nz; } // ---------------------------- // Neighbor at position k-1 // ---------------------------- if (k) { // Has a k-1 neighbor, which is an internal node at position (i,j,k-1) col_indices[pos] = internal_index(i, j, k - 1, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else if (r) { // Has a k-1 neighbor, which is a halo node int halo_index = halo_offset + j * nx + i; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p, q, r - 1, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(i, j, nz - 1, nx, ny, nz); } if (r) { halo_offset += nx * ny; } // ---------------------------- // Neighbor at position k+1 // ---------------------------- if (k < nz - 1) { // Has a k+1 neighbor, which is an internal node at position (i,j,k+1) col_indices[pos] = internal_index(i, j, k + 1, nx, ny, nz); values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); } else { if (r < R - 1) { // Has a k+1 neighbor, which is a halo node int halo_index = halo_offset + j * nx + i; col_indices[pos] = halo_index; values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one()); int64_t global_offset = get_global_offset(p, q, r + 1, P, Q, R, num_rows); local_to_global[halo_index - num_rows] = global_offset + internal_index(i, j, 0, nx, ny, nz); } } if (r < R - 1) { halo_offset += nx * ny; } } } template <typename mat_value_type> __global__ void set_halo_cols_values(int *row_offsets, int *col_indices, mat_value_type *values, int n, int total_rows, int bsize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < (total_rows - n) ) { int offset = row_offsets[n + tid]; col_indices[offset] = n + tid; #pragma unroll for (int i = 0; i < bsize; i++) { values[offset * bsize + i] = types::util<mat_value_type>::get_one(); // This is arbitrary } tid += gridDim.x * blockDim.x; } } template <typename mat_value_type> __global__ void zero_copy_row_lengths_ids_offsets(int *d_old_row_offsets, int *root_row_offsets, int *d_row_ids, int n, int total_num_halos, mat_value_type *diag) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < n + total_num_halos) { int new_row_id = d_row_ids[tid]; if (tid < n) { int start = d_old_row_offsets[tid]; int row_length = d_old_row_offsets[tid + 1] - start; // zero-copy if (diag != NULL) // will insert the diagonal { row_length++; } root_row_offsets[new_row_id] = row_length; } tid += gridDim.x * blockDim.x; } } template< typename mat_value_type> __global__ void ipc_consolidation_upload_matrix(int num_rows, int *row_ids, const int *old_row_offsets, int *new_row_offsets, const int *h_old_col_indices, int *new_col_indices, const mat_value_type *h_old_values, mat_value_type *new_values, const mat_value_type *h_old_diag, int bsize) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < num_rows) { int new_row = row_ids[row]; int src_base = old_row_offsets[row]; int dst_base = new_row_offsets[new_row]; // Insert the diagonal at the beginning of each row if (h_old_diag != NULL) { new_col_indices[dst_base] = new_row; #pragma unroll for (int j = 0; j < bsize; j++) { new_values[dst_base * bsize + j] = h_old_diag[row * bsize + j]; } // Increment dst_base by one dst_base++; } int end = old_row_offsets[row + 1] - src_base; for (int i = 0; i < end; i++) { int old_col = h_old_col_indices[src_base + i]; int new_col = row_ids[old_col]; new_col_indices[dst_base + i] = new_col; #pragma unroll for (int j = 0; j < bsize; j++) { new_values[ (dst_base + i)*bsize + j ] = h_old_values[ (src_base + i) * bsize + j ]; } } row += gridDim.x * blockDim.x; } } template< typename mat_value_type> __global__ void ipc_consolidation_replace_values(int num_rows, int *row_ids, const int *old_row_offsets, int *new_row_offsets, const mat_value_type *h_old_values, mat_value_type *new_values, const mat_value_type *h_old_diag, int bsize) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < num_rows) { int new_row = row_ids[row]; int src_base = old_row_offsets[row]; int dst_base = new_row_offsets[new_row]; // Insert the diagonal at the beginning of each row if (h_old_diag != NULL) { #pragma unroll for (int j = 0; j < bsize; j++) { new_values[dst_base * bsize + j] = h_old_diag[row * bsize + j]; } // Increment dst_base by one dst_base++; } int end = old_row_offsets[row + 1] - src_base; for (int i = 0; i < end; i++) { #pragma unroll for (int j = 0; j < bsize; j++) { new_values[ (dst_base + i)*bsize + j ] = h_old_values[ (src_base + i) * bsize + j ]; } } row += gridDim.x * blockDim.x; } } __global__ void flag_halo_ids_kernel(INDEX_TYPE *flags, INDEX_TYPE *ids, INDEX_TYPE offset, INDEX_TYPE size, INDEX_TYPE upper) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { flags[ids[idx] - offset] = 1; idx += blockDim.x * gridDim.x; } } __global__ void read_halo_ids_kernel(INDEX_TYPE *flags, INDEX_TYPE *ids, INDEX_TYPE offset, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { ids[idx] = flags[ids[idx] - offset]; idx += blockDim.x * gridDim.x; } } template<class T> __global__ void reorder_vector_values(T *dst, const T *src, const INDEX_TYPE *map, INDEX_TYPE blocksize, INDEX_TYPE num_rows) { int row = blockIdx.x * (blockDim.x / blocksize) + threadIdx.x / blocksize; //vectorised by block size int vec_id = threadIdx.x % blocksize; if (threadIdx.x >= (blockDim.x / blocksize)*blocksize ) { return; } while (row < num_rows) { dst[map[row]*blocksize + vec_id] = src[row * blocksize + vec_id]; row += gridDim.x * (blockDim.x / blocksize); } } template<class T> __global__ void inverse_reorder_vector_values(T *dst, T *src, INDEX_TYPE *map, INDEX_TYPE blocksize, INDEX_TYPE num_rows) { int row = blockIdx.x * (blockDim.x / blocksize) + threadIdx.x / blocksize; int vec_id = threadIdx.x % blocksize; if (threadIdx.x >= (blockDim.x / blocksize)*blocksize ) { return; } while (row < num_rows) { dst[row * blocksize + vec_id] = src[map[row] * blocksize + vec_id]; row += gridDim.x * (blockDim.x / blocksize); } } __global__ void remove_boundary_kernel(INDEX_TYPE *flags, INDEX_TYPE *maps, INDEX_TYPE size) { int element = blockIdx.x * blockDim.x + threadIdx.x; while (element < size) { flags[maps[element]] = 0; //this won't be a problem, because we are overwriting the same thing element += blockDim.x * gridDim.x; } } __global__ void get_unassigned_kernel(INDEX_TYPE *unassigned_flags, INDEX_TYPE *map, INDEX_TYPE *output, INDEX_TYPE part_size, INDEX_TYPE uf_size ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < part_size) { if (map[idx] < uf_size) { if (unassigned_flags[map[idx]] == 0) { unassigned_flags[map[idx]] = 1; output[idx] = 1; } } idx += blockDim.x * gridDim.x; } } __global__ void set_unassigned_kernel(INDEX_TYPE *part_assigned_flags, INDEX_TYPE *part_num, INDEX_TYPE *map, INDEX_TYPE *renum, INDEX_TYPE part_size, INDEX_TYPE max_element, INDEX_TYPE renum_size /*, INDEX_TYPE rank*/) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < part_size) { if (map[idx] < renum_size) { if (part_assigned_flags[idx] == 1) { renum[map[idx]] = max_element + part_num[idx]; } //also update the B2L map map[idx] = renum[map[idx]]; } idx += blockDim.x * gridDim.x; } } __global__ void renumber_b2l_maps(INDEX_TYPE *map, INDEX_TYPE *renum, INDEX_TYPE part_size, INDEX_TYPE renum_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < part_size) { if (map[idx] < renum_size) { //update the B2L map map[idx] = renum[map[idx]]; idx += blockDim.x * gridDim.x; } } } __global__ void calc_inverse_renumbering(INDEX_TYPE *renum, INDEX_TYPE *irenum, INDEX_TYPE max_element) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < max_element) { if (renum[idx] < 0 || renum[idx] >= max_element) { printf("Renumbering error: %d %d\n", renum[idx], max_element); } irenum[renum[idx]] = idx; idx += blockDim.x * gridDim.x; } } __global__ void create_halo_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, int64_t base_index, INDEX_TYPE map_offset, INDEX_TYPE size) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < size) { int idx = node_list[row] - base_index; mapping[idx] = map_offset + row; row += blockDim.x * gridDim.x; } } __global__ void apply_h2l2b_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, int64_t base_index, INDEX_TYPE *b2l_map, INDEX_TYPE size) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < size) { int idx = node_list[row] - base_index; mapping[idx] = b2l_map[row]; row += blockDim.x * gridDim.x; } } template <int coop> __global__ void map_col_indices_and_count_rowlen(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *row_length, INDEX_TYPE *mapping, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal) { extern __shared__ volatile int reduction[]; int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { int valid = 0; for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += coop) //this may look horrible, but I expect low branch divergence, because col indices in a row usually belong to the same partition (or at most one more) { int colIdx = col_indices[idx]; int new_col_idx = mapping[colIdx]; if (new_col_idx >= 0) { valid++; col_indices[idx] = new_col_idx; } else { col_indices[idx] = -1; } } reduction[threadIdx.x] = valid; for (int s = 2; s > 0; s >>= 1) { if (coopIdx < s) { reduction[threadIdx.x] += reduction[threadIdx.x + s]; } __syncthreads(); } if (coopIdx == 0) { row_length[row] = reduction[threadIdx.x] + insert_diagonal; } row += gridDim.x * blockDim.x / coop; } } __global__ void renumber_P_col_indices(INDEX_TYPE *__restrict__ col_indices, const INDEX_TYPE *__restrict__ renum, INDEX_TYPE num_owned_coarse_pts, INDEX_TYPE num_owned_fine_pts) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < num_owned_fine_pts ) { INDEX_TYPE col_id = col_indices[idx]; if (col_id < num_owned_coarse_pts) { col_indices[idx] = renum[col_id]; } idx += blockDim.x * gridDim.x; } } template <int coop, class T> __global__ void reorder_R_matrix(const INDEX_TYPE *old_rows, const INDEX_TYPE *old_cols, const T *old_vals, const INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE num_owned_rows) { int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst_base = row < num_owned_rows ? rows[renumbering[row]] : src_base; for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop) { vals[dst_base * bsize + i] = old_vals[src_base * bsize + i]; } for (int i = coopIdx; i < old_rows[row + 1] - src_base; i += coop) { cols[dst_base + i] = old_cols[src_base + i]; } row += blockDim.x * gridDim.x / coop; } } template <int coop, class T> __global__ void reorder_whole_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal) { int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst_base = rows[renumbering[row]]; if (insert_diagonal) { if (coopIdx == 0) { cols[dst_base] = renumbering[row]; } for (int i = coopIdx; i < bsize; i += coop) { vals[dst_base * bsize + i] = old_vals[(old_rows[num_rows] + row) * bsize + i]; } dst_base++; } for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop) { vals[dst_base * bsize + i] = old_vals[src_base * bsize + i]; } for (int i = coopIdx; i < old_rows[row + 1] - src_base; i += coop) { cols[dst_base + i] = old_cols[src_base + i]; } row += blockDim.x * gridDim.x / coop; } } template <int coop, class T> __global__ void replace_values_matrix(const T *src_vals_h, const T *src_diag_h, const INDEX_TYPE *old_rows, const INDEX_TYPE *rows, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows) { int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst_base = rows[renumbering[row]]; for (int i = coopIdx; i < bsize; i += coop) { vals[dst_base * bsize + i] = src_diag_h[row * bsize + i]; } dst_base++; for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop) { vals[dst_base * bsize + i] = src_vals_h[src_base * bsize + i]; } row += blockDim.x * gridDim.x / coop; } } template <int coop, class T> __global__ void replace_values_matrix(const T *src_vals_h, const INDEX_TYPE *old_rows, const INDEX_TYPE *rows, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows) { int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop; int coopIdx = threadIdx.x % coop; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst_base = rows[renumbering[row]]; for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop) { vals[dst_base * bsize + i] = src_vals_h[src_base * bsize + i]; } row += blockDim.x * gridDim.x / coop; } } //TODO: optimize by vectorizing template <class T> __global__ void reorder_whole_halo_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal, INDEX_TYPE global_offset, INDEX_TYPE local_offset, INDEX_TYPE halo_rows) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < num_rows) { INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst = rows[row]; if (insert_diagonal) { cols[dst] = global_offset + row; for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(old_rows[halo_rows - local_offset] + local_offset + row) * bsize + j]; } dst++; } for (int i = 0; i < old_rows[row + 1] - src_base; i++) { INDEX_TYPE colIdx = old_cols[src_base + i]; if (colIdx >= 0) { cols[dst] = colIdx; for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(src_base + i) * bsize + j]; } dst++; } } row += blockDim.x * gridDim.x; } } __global__ void calc_rowlen_reorder(INDEX_TYPE *row_offsets, INDEX_TYPE *row_len, INDEX_TYPE *map, INDEX_TYPE size, INDEX_TYPE insert_diag) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { row_len[map[idx]] = row_offsets[idx + 1] - row_offsets[idx] + insert_diag; idx += blockDim.x * gridDim.x; } } template < class TConfig > void DistributedManagerBase<TConfig>::remove_boundary(IVector_d &flagArray, IVector_d &B2L_map, int size) { int num_blocks = min(4096, (size + 127) / 128); remove_boundary_kernel <<< num_blocks, 128>>>(flagArray.raw(), B2L_map.raw(), size); cudaCheckError(); } template < class TConfig > void DistributedManagerBase<TConfig>::get_unassigned(IVector_d &flagArray, IVector_d &B2L_map, IVector_d &partition_flags, int size, int global_size /*, int rank*/) { int num_blocks = min(4096, (size + 191) / 192); get_unassigned_kernel <<< num_blocks, 192>>>(flagArray.raw(), B2L_map.raw(), partition_flags.raw(), size, global_size /*, rank*/); cudaCheckError(); } template < class TConfig > void DistributedManagerBase<TConfig>::set_unassigned(IVector_d &partition_flags, IVector_d &partition_renum, IVector_d &B2L_map, IVector_d &renumbering, int size, int max_element, int global_size /*, int rank*/) { int num_blocks = min(4096, (size + 191) / 192); set_unassigned_kernel <<< num_blocks, 192>>>(partition_flags.raw(), partition_renum.raw(), B2L_map.raw(), renumbering.raw(), size, max_element, global_size /*,rank*/); cudaCheckError(); } template <class TConfig > inline void DistributedManagerBase<TConfig>::set_initialized(IVector &row_offsets) { // For P and R sizes the sizes are fixed at creation if(m_fixed_view_size) { return; } if (neighbors.size() > 0) { //distributed: cache num_rows/num_nz for different views _num_rows_interior = _num_interior_nodes; _num_nz_interior = row_offsets[_num_rows_interior]; _num_rows_owned = _num_interior_nodes + _num_boundary_nodes; _num_nz_owned = row_offsets[_num_rows_owned]; _num_rows_full = halo_offsets[neighbors.size()]; if (_num_rows_full >= row_offsets.size()) { _num_nz_full = row_offsets[row_offsets.size() - 1]; } else { _num_nz_full = row_offsets[_num_rows_full]; } _num_rows_all = halo_offsets[halo_offsets.size() - 1]; _num_nz_all = _num_nz_full; } else { _num_rows_interior = _num_interior_nodes; _num_nz_interior = row_offsets[_num_rows_interior]; _num_rows_owned = _num_interior_nodes; _num_nz_owned = row_offsets[_num_rows_owned]; _num_rows_full = _num_rows_owned; _num_nz_full = _num_nz_owned; _num_rows_all = _num_rows_owned; _num_nz_all = _num_nz_owned; } } template <class TConfig > void DistributedManagerBase<TConfig>::createAggregatesRenumbering(IVector_h &renumbering, IVector_h_vector &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings) { createAggRenumbering(renumbering, B2L_maps, size, num_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings); } template <class TConfig > void DistributedManagerBase<TConfig>::createAggregatesRenumbering(IVector_d &renumbering, IVector_d_vector &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings) { createAggRenumbering(renumbering, B2L_maps, size, num_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings); } template <class TConfig > template <class IVector_hd> void DistributedManagerBase<TConfig>::createAggRenumbering(IVector_hd &renumbering, std::vector<IVector_hd> &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings) { if (num_rings != 1) { FatalError("num_rings > 1 not supported in consolidation", AMGX_ERR_NOT_IMPLEMENTED); } //int num_neighbors = this->neighbors.size(); if (num_neighbors == 0) { num_boundary_aggregates = 0; num_interior_aggregates = size; return; } //initial size to size+1 so we have the total size after a scan int global_size = size; renumbering.resize(size + 1); // // Step 1 - in the main matrix, separate interior and boundary nodes (1/0 in flagArray), renumber interior ones with an exclusive scan // IVector_hd flagArray(size + 1); thrust::fill(flagArray.begin(), flagArray.begin() + size + 1, 1); cudaCheckError(); //sets 1 for interior nodes, 0 for boundary node for (int i = 0; i < num_neighbors; i++ ) { int size = B2L_maps[i].size(); remove_boundary(flagArray, B2L_maps[i], size); } //gets the renumbering of interior nodes thrust::exclusive_scan(flagArray.begin(), flagArray.begin() + size + 1, renumbering.begin()); cudaCheckError(); // // Step 2 - Renumber nodes that are in the boundary, stepping through each B2L map, and renumbering ones that have not been renumbered yet // //what is the biggest B2L size INDEX_TYPE max_size = 0; for (int i = 0; i < num_neighbors; i++) { max_size = max_size > B2L_maps[i].size() ? max_size : B2L_maps[i].size(); } //allocate work vectors (should be pretty small) IVector_hd partition_flags(max_size); IVector_hd partition_renum(max_size); //the number of renumbered nodes so far int max_element = renumbering[size]; num_interior_aggregates = max_element; num_boundary_aggregates = size - max_element; renumbering.resize(size); for (int i = 0; i < num_neighbors; i++) { //find nodes that are part of the current boundary and they haven't been renumbered yet thrust::fill(partition_flags.begin(), partition_flags.begin() + max_size, 0); int size = B2L_maps[i].size(); get_unassigned(flagArray, B2L_maps[i], partition_flags, size, global_size/*,0*/); //calculate the local renumbering (within this boundary region) of these nodes thrust::exclusive_scan(partition_flags.begin(), partition_flags.begin() + max_size, partition_renum.begin()); //apply renumbering to the big numbering table set_unassigned(partition_flags, partition_renum, B2L_maps[i], renumbering, size, max_element, global_size/*,0*/); //update the number of renumbered nodes max_element += partition_renum[max_size - 1] + partition_flags[max_size - 1]; } cudaCheckError(); } template <class TConfig> inline DistributedManagerBase<TConfig>::DistributedManagerBase(Matrix<TConfig> &a) : m_fine_level_comms(NULL), A(&a), m_pinned_buffer_size(0), m_pinned_buffer(NULL), _num_interior_nodes(0), _num_boundary_nodes(0), _comms(NULL), has_B2L(false), neighbors(_neighbors), B2L_maps(_B2L_maps), L2H_maps(_L2H_maps), B2L_rings(_B2L_rings), halo_rows_ref_count(0), halo_btl_ref_count(0), halo_ranges(_halo_ranges), halo_ranges_h(_halo_ranges_h), part_offsets(_part_offsets), part_offsets_h(_part_offsets_h), halo_rows(NULL), halo_btl(NULL), m_is_root_partition(false), m_is_glued(false), m_is_fine_level_glued(false), m_is_fine_level_consolidated(false), m_is_fine_level_root_partition(false), m_use_cuda_ipc_consolidation(false), m_fixed_view_size(false) { cudaEventCreate(&comm_event); cudaStreamCreateWithFlags(&m_int_stream, cudaStreamNonBlocking); cudaStreamCreateWithFlags(&m_bdy_stream, cudaStreamNonBlocking); this->createComms(A->getResources()); int my_id = this->getComms()->get_global_id(); int num_parts = this->getComms()->get_num_partitions(); this->set_global_id(my_id); this->set_num_partitions(num_parts); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::generatePoisson7pt(int nx, int ny, int nz, int P, int Q, int R) { int my_id = this->getComms()->get_global_id(); int p, q, r; if (nx < P || ny < Q || nz < R) { FatalError("(nx < P) or (ny < Q) or (nz < R) not supported\n", AMGX_ERR_NOT_IMPLEMENTED); } /* compute p,q,r from P,Q,R and myid */ p = my_id % P; // Position in x direction q = (( my_id - p) / P) % Q; // Position in y r = ( my_id - p - P * q) / ( P * Q ); // Position in z // Create A.row_indices, A.col_indices, A.values, A.diag int num_rows = nx * ny * nz; int num_nonzeros = num_rows * 7; // Ignoring any boundary, 7 nnz per row int num_substract = 0; if (p == 0) { num_substract += ny * nz; } if (p == P - 1) { num_substract += ny * nz; } if (q == 0) { num_substract += nx * nz; } if (q == Q - 1) { num_substract += nx * nz; } if (r == 0) { num_substract += nx * ny; } if (r == R - 1) { num_substract += nx * ny; } num_nonzeros -= num_substract; int num_halo_nodes = 2 * (ny * nz + nx * nz + nx * ny) - num_substract; this->local_to_global_map.resize(num_halo_nodes); this->A->set_initialized(0); this->A->resize(0, 0, 0, 1, 1, 1); this->A->addProps(CSR); this->A->resize(num_rows, num_rows + num_halo_nodes, num_nonzeros, 1, 1, 1); const int cta_size = 128; const int grid_size = std::min( 4096, (num_rows + cta_size - 1) / cta_size ); poisson7pt_count_row_len <<< grid_size, cta_size>>>(this->A->row_offsets.raw(), nx, ny, nz, p, q, r, P, Q, R, num_rows); thrust::exclusive_scan(this->A->row_offsets.begin(), this->A->row_offsets.end(), this->A->row_offsets.begin()); cudaCheckError(); // Now set nonzeros columns and values // TODO: vectorize this const int grid_size2 = std::min( 4096, (num_rows + cta_size - 1) / cta_size ); poisson7pt_set_col_values <<< grid_size2, cta_size>>> (this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), this->A->diag.raw(), this->local_to_global_map.raw(), nx, ny, nz, p, q, r, P, Q, R, num_rows); cudaCheckError(); // fill parts_offsets_h // All ranks have same number of nodes int num_ranks = P * Q * R; this->part_offsets_h.resize(num_ranks + 1); this->part_offsets_h[0] = (int64_t) 0; for (int i = 1; i < num_ranks + 1; i++) { this->part_offsets_h[i] = this->part_offsets_h[i - 1] + (int64_t) num_rows; } // Device to host copy this->part_offsets = this->part_offsets_h; this->num_rows_global = P * Q * R * nx * ny * nz; // this->A->set_initialized(1); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_SetOffsets( int num_ranks, int num_rows_global, const t_colIndex* partition_offsets) { // fill part offsets internal data structures this->part_offsets_h.resize(num_ranks + 1); for (int i = 0; i <= num_ranks; i++) { this->part_offsets_h[i] = partition_offsets[i]; } // copy to device this->part_offsets = this->part_offsets_h; // set num of global rows this->num_rows_global = num_rows_global; cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> map<t_colIndex, int> DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_LocalToGlobal(int num_rows, I64Vector_h &off_diag_cols) { // sort global column indices thrust::sort(off_diag_cols.begin(), off_diag_cols.end()); // find unique columns and set local <-> global mappings // 1) Removed unneeded vector 2) Create map on host first, upload later (less thrust calls) I64Vector_h local_to_global_h; map<t_colIndex, int> global_to_local; // temporary if (off_diag_cols.size() > 0) { global_to_local[off_diag_cols[0]] = num_rows; local_to_global_h.push_back(off_diag_cols[0]); } for (int i = 1; i < off_diag_cols.size(); i++) { if (off_diag_cols[i] != off_diag_cols[i - 1]) { global_to_local[off_diag_cols[i]] = num_rows + local_to_global_h.size(); local_to_global_h.push_back(off_diag_cols[i]); } } // Upload finished map in one piece this->local_to_global_map.resize(local_to_global_h.size()); thrust::copy(local_to_global_h.begin(), local_to_global_h.end(), this->local_to_global_map.begin()); return global_to_local; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_InitLocalMatrix( IVector_h local_col_indices, int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const mat_value_type *values, const void *diag) { // init local matrix this->A->set_initialized(0); this->A->resize(0, 0, 0, 1, 1, 1); this->A->addProps(CSR); if (diag) { this->A->addProps(DIAG); } this->A->resize(num_rows, num_rows + this->local_to_global_map.size(), num_nonzeros, block_dimx, block_dimy, 1); cudaCheckError(); // set local matrix thrust::copy(row_offsets, row_offsets + num_rows + 1, this->A->row_offsets.begin()); this->A->col_indices = local_col_indices; thrust::copy(values, values + num_nonzeros * block_dimx * block_dimy, this->A->values.begin()); cudaCheckError(); // setup diagonal if (diag) { cudaMemcpy(this->A->values.raw() + this->A->diagOffset()*this->A->get_block_size(), diag, sizeof(mat_value_type) * num_rows * block_dimx * block_dimy, cudaMemcpyDefault); } else { this->A->computeDiagonal(); } cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrixPartitionVec( int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const int *partition) { // fetch my rank int my_id = this->getComms()->get_global_id(); // setup partition vector IVector_h partitionVec(num_rows_global); if (partition == NULL) { // initialize equal partitioning IVector_h scanPartSize(num_ranks + 1); for (int p = 0; p < num_ranks; p++) { scanPartSize[p] = p * num_rows_global / num_ranks; } scanPartSize[num_ranks] = num_rows_global; int p = 0; for (int i = 0; i < num_rows_global; i++) { if (i >= scanPartSize[p + 1]) { p++; } partitionVec[i] = p; } } else { // use existing partition info for (int i = 0; i < num_rows_global; i++) { partitionVec[i] = partition[i]; } } // compute partition offsets (based on number of elements per partition). Will be modified when calculating partition map. t_colIndex *partition_offsets = (t_colIndex *)calloc(num_ranks + 1, sizeof(t_colIndex)); for (int i = 0; i < num_rows_global; i++) { int pvi = partitionVec[i]; partition_offsets[pvi + 1]++; } thrust::inclusive_scan(partition_offsets, partition_offsets + num_ranks + 1, partition_offsets); loadDistributed_SetOffsets(num_ranks, num_rows_global, partition_offsets); // compute partition map (which tells you how the global elements are mapped into the partitions) t_colIndex *partition_map = (t_colIndex *)calloc(num_rows_global, sizeof(t_colIndex)); for (int i = 0; i < num_rows_global; i++) { int pvi = partitionVec[i]; t_colIndex poi = partition_offsets[pvi]; partition_map[poi] = i; partition_offsets[pvi]++; } free(partition_offsets); // compute the inverse partition map t_colIndex *ipartition_map = (t_colIndex *)calloc(num_rows_global, sizeof(t_colIndex)); for (int i = 0; i < num_rows_global; i++) { ipartition_map[partition_map[i]] = i; } free(partition_map); int h_cidx_allocated = 0; const t_colIndex *h_col_indices_global = (const t_colIndex *)this->getHostPointerForData(col_indices, num_nonzeros * sizeof(t_colIndex), &h_cidx_allocated); // gather all off-diag columns I64Vector_h off_diag_cols; for (int i = 0; i < num_nonzeros; i++) { if (partitionVec[h_col_indices_global[i]] != my_id) { off_diag_cols.push_back(ipartition_map[h_col_indices_global[i]]); } } auto global_to_local = loadDistributed_LocalToGlobal<t_colIndex>(num_rows, off_diag_cols); // set 1, then scan to compute local row indices IVector_h my_indices(num_rows_global); for (int i = 0; i < num_nonzeros; i++) { if (partitionVec[h_col_indices_global[i]] == my_id) // find my local columns and set to 1 { my_indices[ipartition_map[h_col_indices_global[i]]] = 1; } } thrust::exclusive_scan(my_indices.begin(), my_indices.end(), my_indices.begin()); // remap colums to local IVector_h local_col_indices(num_nonzeros); for (int i = 0; i < num_nonzeros; i++) { if (partitionVec[h_col_indices_global[i]] != my_id) { // off-diag local_col_indices[i] = global_to_local[ipartition_map[h_col_indices_global[i]]]; } else { // diag local_col_indices[i] = my_indices[ipartition_map[h_col_indices_global[i]]]; } } free(ipartition_map); loadDistributed_InitLocalMatrix(local_col_indices, num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, values, diag); cudaCheckError(); // don't free possibly allocated pinned buffer, since it could be used later. if it would not - it would be deallocated automatically /*if (h_cidx_allocated) { free((void*)h_col_indices_global); }*/ } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrixPartitionOffsets( int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const t_colIndex *partition_offsets) { // fetch my rank int my_id = this->getComms()->get_global_id(); // sanity check, cheap to perform, and helps prevent harder-to-debug errors later on if (!std::is_sorted(partition_offsets, partition_offsets + num_ranks + 1)) { FatalError("Partition offsets are not sorted.", AMGX_ERR_BAD_PARAMETERS); } loadDistributed_SetOffsets(num_ranks, num_rows_global, partition_offsets); // Create predicate to determine if a column is in the local diagonal block t_colIndex my_first_col = this->part_offsets_h[my_id]; t_colIndex one_past_my_last_col = this->part_offsets_h[my_id + 1]; auto in_local_diagonal_block = [my_first_col, one_past_my_last_col](const t_colIndex col_index) { return col_index >= my_first_col && col_index < one_past_my_last_col; }; int h_cidx_allocated = 0; const t_colIndex *h_col_indices_global = (const t_colIndex *)this->getHostPointerForData(col_indices, num_nonzeros * sizeof(t_colIndex), &h_cidx_allocated); // gather all off-diag columns I64Vector_h off_diag_cols; for (int i = 0; i < num_nonzeros; i++) { if (!in_local_diagonal_block(h_col_indices_global[i])) { off_diag_cols.push_back(h_col_indices_global[i]); } } auto global_to_local = loadDistributed_LocalToGlobal<t_colIndex>(num_rows, off_diag_cols); // set 1, then scan to compute local row indices // "coordinate-shift" columns so they lie in much smaller range of my diagonal indices int diagonal_size = this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]; IVector_h my_indices(diagonal_size); for (int i = 0; i < num_nonzeros; i++) { t_colIndex col_index = h_col_indices_global[i]; if (in_local_diagonal_block(h_col_indices_global[i])) // find my local columns and set to 1 { // olumns that are on *my* diag partition cannot have an index from 0..num_rows_global // instead, part_offsets_h[my_id] <= col_index < part_offsets[my_id+1] col_index -= this->part_offsets_h[my_id]; my_indices[col_index] = 1; } } thrust::exclusive_scan(my_indices.begin(), my_indices.end(), my_indices.begin()); // remap colums to local IVector_h local_col_indices(num_nonzeros); for (int i = 0; i < num_nonzeros; i++) { t_colIndex col_index = h_col_indices_global[i]; if (!in_local_diagonal_block(col_index)) { // off-diag local_col_indices[i] = global_to_local[col_index]; } else { // diag col_index -= this->part_offsets_h[my_id]; local_col_indices[i] = my_indices[col_index]; } } loadDistributed_InitLocalMatrix(local_col_indices, num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, values, diag); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrix( int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const MatrixDistribution &dist) { using PI = MatrixDistribution::PartitionInformation; switch (dist.getPartitionInformationStyle()) { case PI::PartitionVec: loadDistributedMatrixPartitionVec(num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, col_indices, values, num_ranks, num_rows_global, diag, (const int*) dist.getPartitionData()); break; case PI::PartitionOffsets: loadDistributedMatrixPartitionOffsets(num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, col_indices, values, num_ranks, num_rows_global, diag, (const t_colIndex*) dist.getPartitionData()); break; default: FatalError("Unsupported partitioning data format used with loadDistributedMatrix", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::renumberMatrixOneRing(int update_neighbours) { FatalError("Distributed classical AMG not implemented on host", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::renumberMatrixOneRing(int update_neighbours) { // Step 1: Using halo_ranges, flag neighbors and at the same time, flag halo_nodes (flag_halo_nodes_local) int my_id = this->global_id(); int num_parts = this->get_num_partitions(); this->set_base_index(this->part_offsets_h[my_id]); this->set_index_range(this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]); DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; // Create/update list of neighbors if (update_neighbours) { typedef typename TConfig::template setVecPrec<AMGX_vecInt64>::Type i64vec_value_type; typedef Vector<i64vec_value_type> I64Vector; typedef typename Matrix<TConfig>::MVector MVector; std::vector<IVector> halo_row_offsets(this->neighbors.size()); std::vector<I64Vector> halo_global_indices(this->neighbors.size()); std::vector<MVector> halo_values(this->neighbors.size()); prep->create_halo_rows_global_indices(*(this->A), halo_row_offsets, halo_global_indices, halo_values); prep->update_neighbors_list(*(this->A), this->neighbors, this->halo_ranges_h, this->halo_ranges, this->part_offsets_h, this->part_offsets, halo_row_offsets, halo_global_indices); } else { prep->create_neighbors_v2(*(this->A)); } this->getComms()->set_neighbors(this->neighbors.size()); // Create B2L_maps and L2H_maps prep->create_boundary_lists_v3(*(this->A)); // halo_offsets int neighbors = this->A->manager->num_neighbors(); int A_num_rows, offset; this->A->getOffsetAndSizeForView(OWNED, &offset, &A_num_rows); this->halo_offsets.resize(neighbors + 1, 0); this->halo_offsets[0] = A_num_rows; for (int i = 0; i < neighbors; i++) { this->halo_offsets[i + 1] = this->halo_offsets[i] + this->B2L_maps[i].size(); } this->getComms()->exchange_vectors(this->A->manager->B2L_maps, *(this->A), 0); // Initialize B2L_rings int num_neighbors = this->neighbors.size(); this->B2L_rings.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { this->B2L_rings[i].resize(2); this->B2L_rings[i][0] = 0; this->B2L_rings[i][1] = this->B2L_maps[i].size(); } prep->initialize_B2L_maps_offsets(*(this->A), 1); delete prep; //Use the exchanged halo row matrices and the boundary/halo index lists to renumber and consolidate the matrix // Step 5: renumber all owned rows and columns this->reorder_matrix_owned(); // Step 6: renumber local_to_global_map int num_owned_rows = this->A->manager->halo_offsets[0]; int size_one_ring; this->A->getOffsetAndSizeForView(FULL, &offset, &size_one_ring); I64Vector_d global_col_indices(size_one_ring); thrust::sequence(global_col_indices.begin(), global_col_indices.begin() + num_owned_rows, this->base_index() ); cudaCheckError(); global_col_indices.dirtybit = 1; this->exchange_halo(global_col_indices, global_col_indices.tag); thrust::copy(global_col_indices.begin() + num_owned_rows, global_col_indices.begin() + size_one_ring, this->local_to_global_map.begin()); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::renumber_P_R(Matrix_h &P, Matrix_h &R, Matrix_h &A_fine) { FatalError("Distributed classical AMG not implemented on host", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::renumber_P_R(Matrix_d &P, Matrix_d &R, Matrix_d &A_fine) { int cta_size = 256; int num_owned_fine_pts = A_fine.manager->halo_offsets[0]; int num_owned_coarse_pts, offset; // matrix Ac this->A->getOffsetAndSizeForView(OWNED, &offset, &num_owned_coarse_pts); // Renumber the owned col indices of P (not the halo columns ,since P.manager was created assunming some other numbering) int nnz_owned_fine_pts = P.row_offsets[num_owned_fine_pts]; int num_blocks_fine = min(4096, (nnz_owned_fine_pts + cta_size - 1) / cta_size); if (num_blocks_fine > 0) { renumber_P_col_indices <<< num_blocks_fine, cta_size>>>(P.col_indices.raw(), this->renumbering.raw(), num_owned_coarse_pts, nnz_owned_fine_pts); cudaCheckError(); } // Renumber the B2L_maps of P for (int i = 0; i < P.manager->neighbors.size(); i++) { thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), P.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator(this->renumbering.begin(), P.manager->B2L_maps[i].end()), P.manager->B2L_maps[i].begin()); } cudaCheckError(); // Don't renumber the L2H_maps or the halo // Renumber the local_to_global_map of matrix P (since neighbors renumbered their owned rows) // Swap owned rows of R IVector new_row_offsets(R.row_offsets.size()); int insert = 0; // Only renumber the owned rows int num_blocks_owned = min(4096, (num_owned_coarse_pts + cta_size - 1) / cta_size); if (num_blocks_owned > 0) { calc_rowlen_reorder <<< num_blocks_owned, cta_size >>>(R.row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), num_owned_coarse_pts, insert); cudaCheckError(); } thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + num_owned_coarse_pts + 1, new_row_offsets.begin()); cudaCheckError(); // Copy the row_offsets for halo rows thrust::copy(R.row_offsets.begin() + num_owned_coarse_pts, R.row_offsets.end(), new_row_offsets.begin() + num_owned_coarse_pts); cudaCheckError(); // Reorder the rows of R (no need to reorder the column indices) int new_nnz = new_row_offsets[new_row_offsets.size() - 1]; int halo_offset = new_row_offsets[num_owned_coarse_pts]; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; VVector new_values(new_nnz * R.get_block_size(), types::util< ValueTypeA >::get_zero()); IVector new_col_indices(new_nnz, 0); int num_blocks_total = min(4096, (R.get_num_rows() + cta_size - 1) / cta_size); if (num_blocks_total > 0) { reorder_R_matrix <32> <<< num_blocks_total, 512>>>(R.row_offsets.raw(), R.col_indices.raw(), R.values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), R.get_block_size(), R.get_num_rows(), num_owned_coarse_pts); cudaCheckError(); } R.col_indices.swap(new_col_indices); R.row_offsets.swap(new_row_offsets); R.values.swap(new_values); // Renumber the local_to_global_map (since neighbors have changed their owned numbering) if (P.manager->neighbors.size() != 0) { int size_one_ring = P.manager->halo_offsets[P.manager->neighbors.size()]; I64Vector_d global_col_indices(size_one_ring); thrust::sequence(global_col_indices.begin(), global_col_indices.begin() + num_owned_coarse_pts, this->base_index()); cudaCheckError(); global_col_indices.dirtybit = 1; P.manager->exchange_halo(global_col_indices, global_col_indices.tag); thrust::copy(global_col_indices.begin() + num_owned_coarse_pts, global_col_indices.begin() + size_one_ring, P.manager->local_to_global_map.begin()); cudaCheckError(); } DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; prep->initialize_B2L_maps_offsets(P, 1); delete prep; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingB2Lmaps() { // Input: // a matrix with N rows, whose column indices are local indices from 0 to N+M-1, // where M is a number of 1-ring halo vertices // The matrix also contains array "local_to_global_map" of size M, which stores the global index of each halo index // Ex: assuming a column has index N+K, where 0 <= K < M, then it's global id is local_to_global_map[K] // The matrix also contains part_offsets_h and part_offsets array, which stores where each partition begins // Output: // This function creates all the necessary data to to 1-ring exchanges // i.e. list of 1-ring neighbors, B2L_maps for 1-ring, halo_offsets for 1-ring, // Also, the function reorders the halo indices, such that 1-ring indices are in the order // of neighbors, and therefore, exchange_halo doesn't have to be changed (i.e. L2H = identity) // What is does: // Based on the global indices of its halo vertices, count the number of neighbors // For each neighbor, receive the halo indices that will be needed by neighbor // From those, create B2L_maps[0], which contains for all neighbors // This function assumes that: // part_offset is defined // B2L_maps int my_id = this->global_id(); int num_parts = this->get_num_partitions(); this->set_base_index(this->part_offsets_h[my_id]); this->set_index_range(this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]); DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; // This function creates the array neighbors, which contains a list of partitions to which data // needs to be sent and/or received prep->create_neighbors_v2(*(this->A)); // Here change the manager if some partitions have no neighbors this->getComms()->set_neighbors(this->neighbors.size()); prep->create_B2L_one_ring(*(this->A)); delete prep; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingHaloRows() { // Input: // A matrix with 1-ring B2L_maps, 1-ring halo_offsets // Outputs: // A matrix with: 1-ring rows, // 2-ring B2L_maps, // 2-ring halo_offsets // 2-ring neighbors // Implement here: // Look at function create_B2L_from_maps, which calls create_rings, create_halo_btl, create_halo_rows and comms->exchange_matrix_halo DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; prep->create_one_ring_halo_rows(*(this->A)); // I believe this can be removed since we don't use masked SpMV anymore prep->createRowsLists(*(this->A), false); delete prep; // this is not necessary anymore becasue we don't use latency hiding // however in future we might want to get back to this in case we want to use latency hiding //this->reorder_matrix(); } template <class TConfig> inline DistributedManagerBase<TConfig>::DistributedManagerBase( Matrix<TConfig> &a, INDEX_TYPE allocated_halo_depth, INDEX_TYPE num_import_rings, int num_neighbors, const VecInt_t *neighbors_) : m_fine_level_comms(NULL), A(&a), m_pinned_buffer_size(0), m_pinned_buffer(NULL), _num_interior_nodes(0), _num_boundary_nodes(0), _comms(NULL), has_B2L(false), neighbors(_neighbors), halo_rows_ref_count(0), halo_rows(NULL), halo_btl_ref_count(0), halo_btl(NULL), halo_ranges(_halo_ranges), halo_ranges_h(_halo_ranges_h), part_offsets(_part_offsets), part_offsets_h(_part_offsets_h), B2L_maps(_B2L_maps), L2H_maps(_L2H_maps), B2L_rings(_B2L_rings), m_is_root_partition(false), m_is_glued(false), m_is_fine_level_glued(false), m_is_fine_level_consolidated(false), m_is_fine_level_root_partition(false), m_use_cuda_ipc_consolidation(false), m_fixed_view_size(false) { cudaStreamCreateWithFlags(&m_int_stream, cudaStreamNonBlocking); cudaStreamCreateWithFlags(&m_bdy_stream, cudaStreamNonBlocking); if (num_import_rings != 1) { FatalError("num_rings > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED); } if (allocated_halo_depth != 1) { FatalError("allocated_halo_depth > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED); } this->set_num_halo_rings(num_import_rings); neighbors.resize(num_neighbors); cudaMemcpy(neighbors.raw(), neighbors_, num_neighbors * sizeof(VecInt_t), cudaMemcpyDefault); cudaCheckError(); } template <class TConfig> inline void DistributedManagerBase<TConfig>::cacheMaps(const VecInt_t *b2l_maps, const VecInt_t *b2l_ptrs, const VecInt_t *l2h_maps, const VecInt_t *l2h_ptrs) { int num_neighbors = this->neighbors.size(); this->cached_B2L_maps.resize(num_neighbors); this->cached_L2H_maps.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { int size = b2l_ptrs[i + 1] - b2l_ptrs[i]; this->cached_B2L_maps[i].resize(size); int count = 0; for (int j = b2l_ptrs[i]; j < b2l_ptrs[i + 1]; j++) { this->cached_B2L_maps[i][count] = b2l_maps[j]; count++; } size = l2h_ptrs[i + 1] - l2h_ptrs[i]; this->cached_L2H_maps[i].resize(size); count = 0; for (int j = l2h_ptrs[i]; j < l2h_ptrs[i + 1]; j++) { this->cached_L2H_maps[i][count] = l2h_maps[j]; count++; } } } template <class TConfig> inline void DistributedManagerBase<TConfig>::cacheMapsOneRing() { int num_neighbors = this->neighbors.size(); this->cached_B2L_maps.resize(num_neighbors); this->cached_L2H_maps.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { this->cached_B2L_maps[i] = this->B2L_maps[i]; this->cached_L2H_maps[i] = this->L2H_maps[i]; } } template <class TConfig> inline void DistributedManagerBase<TConfig>::cacheMapsOneRing(const VecInt_t **b2l_maps, const VecInt_t *b2l_sizes, const VecInt_t **l2h_maps, const VecInt_t *l2h_sizes) { int num_neighbors = this->neighbors.size(); this->cached_B2L_maps.resize(num_neighbors); this->cached_L2H_maps.resize(num_neighbors); // buffering in the case of GPU data. This shouldn't much affect performance std::vector<VecInt_t *> b2l_buffer, l2h_buffer; std::vector<VecInt_t> b2l_sizes_buffer, l2h_sizes_buffer; b2l_buffer.resize(num_neighbors); l2h_buffer.resize(num_neighbors); b2l_sizes_buffer.resize(num_neighbors); l2h_sizes_buffer.resize(num_neighbors); cudaMemcpy(&(b2l_sizes_buffer[0]), b2l_sizes, sizeof(VecInt_t) * num_neighbors, cudaMemcpyDefault); cudaMemcpy(&(l2h_sizes_buffer[0]), l2h_sizes, sizeof(VecInt_t) * num_neighbors, cudaMemcpyDefault); cudaMemcpy(&(b2l_buffer[0]), b2l_maps, sizeof(VecInt_t *) * num_neighbors, cudaMemcpyDefault); cudaMemcpy(&(l2h_buffer[0]), l2h_maps, sizeof(VecInt_t *) * num_neighbors, cudaMemcpyDefault); // caching all of the maps for (int i = 0; i < num_neighbors; i++) { int size = b2l_sizes_buffer[i]; this->cached_B2L_maps[i].resize(size); cudaMemcpy(&(this->cached_B2L_maps[i][0]), b2l_buffer[i], sizeof(VecInt_t) * size, cudaMemcpyDefault); cudaCheckError(); size = l2h_sizes_buffer[i]; this->cached_L2H_maps[i].resize(size); cudaMemcpy(&(this->cached_L2H_maps[i][0]), l2h_buffer[i], sizeof(VecInt_t) * size, cudaMemcpyDefault); cudaCheckError(); } } template <class TConfig> void DistributedManagerBase<TConfig>::setAConsolidationFlags( Matrix<TConfig> &in_A) { this->A = &in_A; AMG_Config *rsrc_cfg = this->A->getResources()->getResourcesConfig(); std::string scope; int consolidate_flag, cuda_ipc_flag; rsrc_cfg->getParameter<int>("fine_level_consolidation", consolidate_flag, "default", scope); rsrc_cfg->getParameter<int>("use_cuda_ipc_consolidation", cuda_ipc_flag, "default", scope); this->m_is_fine_level_consolidated = (consolidate_flag != 0); this->m_use_cuda_ipc_consolidation = (cuda_ipc_flag != 0); } template <class TConfig> void DistributedManagerBase<TConfig>::uploadMatrix(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A) { this->setAConsolidationFlags(in_A); if (this->m_is_fine_level_consolidated) { this->A->manager->consolidateAndUploadAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A)); } else { this->A->manager->initializeUploadReorderAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A)); } } template <class TConfig> void DistributedManagerBase<TConfig>::checkPinnedBuffer(size_t size) { if ((m_pinned_buffer_size < size) && (m_pinned_buffer != NULL)) { cudaFreeHost(m_pinned_buffer); m_pinned_buffer = NULL; m_pinned_buffer_size = 0; } if (m_pinned_buffer == NULL) { m_pinned_buffer_size = (size_t)(size * 1.1); cudaMallocHost(&m_pinned_buffer, m_pinned_buffer_size); } } template <class TConfig> DistributedManagerBase<TConfig>::~DistributedManagerBase() { if (m_pinned_buffer != NULL) { cudaFreeHost(m_pinned_buffer); } destroyComms(); // from childrens: cudaStreamDestroy(this->m_int_stream); cudaStreamDestroy(this->m_bdy_stream); if (!this->halo_rows_ref_count && this->halo_rows != NULL) { delete this->halo_rows; this->halo_rows = NULL; } if (!this->halo_btl_ref_count && this->halo_btl != NULL) { delete this->halo_btl; this->halo_btl = NULL; } } // if pointer is host pointer - returns data. If it is device pointer - copies it to the m_pinned_buffer and returns pointer to m_pinned_buffer template <class TConfig> void *DistributedManagerBase<TConfig>::getHostPointerForData(void *ptr, size_t size, int *allocated) { cudaError_t rc; cudaPointerAttributes att; void *ptr_h; cudaCheckError(); /* WARNING: We may accept the following types of allocation for ptr: 1. malloc [host memory] 2. cudaMalloc [device memory] 3. malloc + cudaHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory] 4. cudaHostAlloc [pinned host memory form the beginning] The correct way to conver these cases is the following: cudaPointerAttributes att; cudaError_t st = cudaPointerGetAttributes(&att, ptr); if (st == cudaSuccess) { //you are in case 2, 3 or 4. } else{ //you are in case 1. } The following pattern of checks should be implemented cudaPointerAttributes att; cudaError_t st = cudaPointerGetAttributes(&att, ptr); if (st == cudaSuccess) { //you are in case 2 or 4. } else{ st = cudaHostGetDevicePointer(ptr_on_device, ptr, 0); if (st == cudaSuccess){ //you are in case 3. } else{ //you are in case 1. } } The above pattern will be used whenever we need to process input data. Obs.: parameter size is in bytes and parameter allocated indicates whether memory was allocated and needs to be release later on. */ /* // original implementation cudaPointerGetAttributes(&att, ptr); if (att.hostPointer == NULL) { checkPinnedBuffer(size); cudaMemcpy(m_pinned_buffer, ptr, size, cudaMemcpyDefault); return m_pinned_buffer; } else { return ptr; } */ *allocated = 0; // get pointer to values on the device rc = cudaPointerGetAttributes(&att, ptr); if (rc == cudaSuccess) { //you are in case 2 or 4 from the above comment. if (att.hostPointer == NULL) { //you are in case 2 checkPinnedBuffer(size); rc = cudaMemcpy(m_pinned_buffer, ptr, size, cudaMemcpyDefault); if (rc != cudaSuccess) { FatalError("Could not copy into the temporary (host) storage. Try pinning the memory to avoid the cudaMemcpy.", AMGX_ERR_BAD_PARAMETERS); } ptr_h = m_pinned_buffer; *allocated = 1; } else { //you are in case 4 ptr_h = ptr; } } else { //you are in case 1 or 3 from the above comment ptr_h = ptr; } cudaGetLastError(); //to reset last error /* check for null pointers */ if (ptr_h == NULL) { FatalError("Result of (host) allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } return ptr_h; } // if pointer is host pointer - returns data. If it is device pointer - copies it to the m_pinned_buffer and returns pointer to m_pinned_buffer template <class TConfig> const void *DistributedManagerBase<TConfig>::getHostPointerForData(const void *ptr, size_t size, int *allocated) { cudaError_t rc; cudaPointerAttributes att; void *ptr_h; cudaCheckError(); /* WARNING: We may accept the following types of allocation for ptr: 1. malloc [host memory] 2. cudaMalloc [device memory] 3. malloc + cudaHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory] 4. cudaHostAlloc [pinned host memory form the beginning] The correct way to conver these cases is the following: cudaPointerAttributes att; cudaError_t st = cudaPointerGetAttributes(&att, ptr); if (st == cudaSuccess) { //you are in case 2, 3 or 4. } else{ //you are in case 1. } The following pattern of checks should be implemented cudaPointerAttributes att; cudaError_t st = cudaPointerGetAttributes(&att, ptr); if (st == cudaSuccess) { //you are in case 2 or 4. } else{ st = cudaHostGetDevicePointer(ptr_on_device, ptr, 0); if (st == cudaSuccess){ //you are in case 3. } else{ //you are in case 1. } } The above pattern will be used whenever we need to process input data. Obs.: parameter size is in bytes and parameter allocated indicates whether memory was allocated and needs to be release later on. */ *allocated = 0; // get pointer to values on the device rc = cudaPointerGetAttributes(&att, ptr); if (rc == cudaSuccess) { //you are in case 2 or 4 from the above comment. if (att.hostPointer == NULL) { //you are in case 2 checkPinnedBuffer(size); rc = cudaMemcpy(m_pinned_buffer, ptr, size, cudaMemcpyDefault); if (rc != cudaSuccess) { FatalError("Could not copy into the temporary (host) storage. Try pinning the memory to avoid the cudaMemcpy.", AMGX_ERR_BAD_PARAMETERS); } ptr_h = m_pinned_buffer; *allocated = 1; cudaGetLastError(); //to reset last error return ptr_h; } else { //you are in case 4 cudaGetLastError(); //to reset last error return ptr; } } else { cudaGetLastError(); //to reset last error //you are in case 1 or 3 from the above comment return ptr; } } template <class TConfig> void *DistributedManagerBase<TConfig>::getDevicePointerForData(void *ptr, size_t size, int *allocated) { cudaError_t rc; cudaPointerAttributes att; void *ptr_d; cudaCheckError(); /* WARNING: We may accept the following types of allocation for ptr: 1. malloc [host memory] 2. cudaMalloc [device memory] 3. malloc + cudaHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory] 4. cudaHostAlloc [pinned host memory form the beginning] The correct way to conver these cases is the following: cudaPointerAttributes att; cudaError_t st = cudaPointerGetAttributes(&att, ptr); if (st == cudaSuccess) { //you are in case 2, 3 or 4. } else{ //you are in case 1. } The following pattern of checks should be implemented cudaPointerAttributes att; cudaError_t st = cudaPointerGetAttributes(&att, ptr); if (st == cudaSuccess) { //you are in case 2 or 4. } else{ st = cudaHostGetDevicePointer(ptr_on_device, ptr, 0); if (st == cudaSuccess){ //you are in case 3. } else{ //you are in case 1. } } The above pattern will be used whenever we need to process input data. Obs.: parameter size is in bytes and parameter allocated indicates whether memory was allocated and needs to be release later on. */ *allocated = 0; // get pointer to values on the device rc = cudaPointerGetAttributes(&att, ptr); if (rc == cudaSuccess) { //you are in case 2 or 4 from the above comment. ptr_d = (void *)att.devicePointer; } else { //you are in case 1 or 3 from the above comment rc = cudaHostGetDevicePointer(&ptr_d, ptr, 0); if (rc != cudaSuccess) { //you are in case 1 rc = cudaMalloc(&ptr_d, size); if (rc != cudaSuccess) { FatalError("Could not allocate required temporary storage. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } rc = cudaMemcpy(ptr_d, ptr, size, cudaMemcpyDefault); if (rc != cudaSuccess) { FatalError("Could not copy into the temporary storage. Try pinning the memory to avoid the cudaMemcpy.", AMGX_ERR_BAD_PARAMETERS); } *allocated = 1; } } /* check for null pointers */ if (ptr_d == NULL) { FatalError("Result of allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } cudaGetLastError(); //to reset last error return ptr_d; } template <class TConfig> const void *DistributedManagerBase<TConfig>::getDevicePointerForData(const void *ptr, size_t size, int *allocated) { cudaError_t rc; cudaPointerAttributes att; void *ptr_d; cudaCheckError(); /* WARNING: We may accept the following types of allocation for ptr: 1. malloc [host memory] 2. cudaMalloc [device memory] 3. malloc + cudaHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory] 4. cudaHostAlloc [pinned host memory form the beginning] The correct way to conver these cases is the following: cudaPointerAttributes att; cudaError_t st = cudaPointerGetAttributes(&att, ptr); if (st == cudaSuccess) { //you are in case 2, 3 or 4. } else{ //you are in case 1. } The following pattern of checks should be implemented cudaPointerAttributes att; cudaError_t st = cudaPointerGetAttributes(&att, ptr); if (st == cudaSuccess) { //you are in case 2 or 4. } else{ st = cudaHostGetDevicePointer(ptr_on_device, ptr, 0); if (st == cudaSuccess){ //you are in case 3. } else{ //you are in case 1. } } The above pattern will be used whenever we need to process input data. Obs.: parameter size is in bytes and parameter allocated indicates whether memory was allocated and needs to be release later on. */ *allocated = 0; // get pointer to values on the device rc = cudaPointerGetAttributes(&att, ptr); if (rc == cudaSuccess) { //you are in case 2 or 4 from the above comment. cudaGetLastError(); //to reset last error return (const void *)att.devicePointer; } else { //you are in case 1 or 3 from the above comment rc = cudaHostGetDevicePointer(&ptr_d, (void *)ptr, 0); if (rc != cudaSuccess) { //you are in case 1 rc = cudaMalloc(&ptr_d, size); if (rc != cudaSuccess) { FatalError("Could not allocate required temporary storage. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } rc = cudaMemcpy(ptr_d, ptr, size, cudaMemcpyDefault); if (rc != cudaSuccess) { FatalError("Could not copy into the temporary storage. Try pinning the memory to avoid the cudaMemcpy.", AMGX_ERR_BAD_PARAMETERS); } *allocated = 1; cudaGetLastError(); //to reset last error return (const void *)ptr_d; } } /* check for null pointers */ if (ptr_d == NULL) { FatalError("Result of allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS); } // shouldn't get there cudaGetLastError(); //to reset last error return NULL; } template <class TConfig> void initializeMatrixCopyAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> *A) { typedef typename TConfig::MatPrec mat_value_type; A->resize( n, n, nnz, block_dimx, block_dimy ); //Upload the entire matrix cudaMemcpy( A->row_offsets.raw(), row_ptrs, (n + 1) * sizeof(int), cudaMemcpyDefault ); cudaCheckError(); cudaMemcpy( A->col_indices.raw(), col_indices, (nnz) * sizeof(int), cudaMemcpyDefault ); cudaCheckError(); cudaMemcpy( A->values.raw(), (mat_value_type *)data, (nnz * block_dimx * block_dimy) * sizeof(mat_value_type), cudaMemcpyDefault ); cudaCheckError(); if (diag) { cudaMemcpy( A->values.raw() + A->diagOffset()*A->get_block_size(), (mat_value_type *)diag, (n * block_dimx * block_dimy) * sizeof(mat_value_type), cudaMemcpyDefault ); } else { A->computeDiagonal(); } cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::updateMapsReorder() { int my_id = this->getComms()->get_global_id(); DistributedComms<TConfig> *comms_tmp = this->getComms(); DistributedComms<TConfig> **comms_ = &comms_tmp; // Copy B2L_maps in their final place int num_neighbors = this->neighbors.size(); B2L_maps.resize(num_neighbors); L2H_maps.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { B2L_maps[i] = this->cached_B2L_maps[i]; L2H_maps[i] = this->cached_L2H_maps[i]; } //Create a DistributedArranger object to map further halo rings and to construct halo row matrices and exchange them (if halo_coloring != LAST) DistributedArranger<TConfig> *prep = new DistributedArranger<TConfig>; prep->create_B2L_from_maps( (*(this->A)), my_id, this->num_halo_rings(), neighbors, B2L_maps, L2H_maps, B2L_rings, comms_, &halo_rows, &halo_btl); DistributedManagerBaseInit(my_id, 0, this->A->get_num_rows(), *(this->A), comms_, NULL, NULL); //Use the exchanged halo row matrices and the boundary/halo index lists to renumber and consolidate the matrix this->reorder_matrix(); prep->initialize_B2L_maps_offsets(*(this->A), this->num_halo_rings()); delete prep; } template <class TConfig> void DistributedManagerBase<TConfig>::initializeUploadReorderAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A) { this->A = &in_A; initializeMatrixCopyAll<TConfig>(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, this->A); this->updateMapsReorder(); } template <class TConfig> void DistributedManagerBase<TConfig>::destroyComms() { if ( (this->_comms) != NULL ) { if (this->_comms->decr_ref_count()) { delete (this->_comms); this->_comms = NULL; } } if ( (this->m_fine_level_comms) != NULL) { if (this->m_fine_level_comms->decr_ref_count()) { delete (this->m_fine_level_comms); this->m_fine_level_comms = NULL; } } } template <class TConfig> void DistributedManagerBase<TConfig>::initComms(Resources *rsrc) { this->createComms(rsrc); int my_id = this->getComms()->get_global_id(); int num_parts = this->getComms()->get_num_partitions(); this->set_global_id(my_id); this->set_num_partitions(num_parts); } template <class TConfig> void DistributedManagerBase<TConfig>::createComms(Resources *rsrc) { // create communicator #ifdef AMGX_WITH_MPI destroyComms(); if (rsrc == NULL) FatalError("Resources should not be NULL", AMGX_ERR_INTERNAL); MPI_Comm *mpi_comm = rsrc->getMpiComm(); AMG_Config *cfg = rsrc->getResourcesConfig(); std::string comm_value, comm_scope; cfg->getParameter<std::string>("communicator", comm_value, "default", comm_scope); int rank = -1; MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (comm_value == "MPI_DIRECT") { _comms = new CommsMPIDirect<TConfig>(*cfg, comm_scope, mpi_comm); if ( rank == 0 ) { std::cout << "Using CUDA-Aware MPI (GPU Direct) communicator..." << std::endl; } } else if (comm_value == "MPI") { CommsMPIHostBufferStream<TConfig> *ptr_comm = new CommsMPIHostBufferStream<TConfig>(*cfg, comm_scope, mpi_comm); _comms = ptr_comm; if ( rank == 0 ) { std::cout << "Using Normal MPI (Hostbuffer) communicator..." << std::endl; } } else { throw std::string("Bad communicator value"); } #endif } template <class TConfig> void DistributedManagerBase<TConfig>::malloc_export_maps(VecInt_t ***b2l_maps_e, VecInt_t **b2l_maps_sizes_e, VecInt_t ***l2h_maps_e, VecInt_t **l2h_maps_sizes_e) { *b2l_maps_e = (VecInt_t **) malloc(sizeof(VecInt_t *)*this->num_neighbors()); *l2h_maps_e = (VecInt_t **) malloc(sizeof(VecInt_t *)*this->num_neighbors()); *b2l_maps_sizes_e = (VecInt_t *) malloc(sizeof(VecInt_t) * (this->num_neighbors())); *l2h_maps_sizes_e = (VecInt_t *) malloc(sizeof(VecInt_t) * (this->num_neighbors())); for (int i = 0; i < this->num_neighbors(); i++) { (*b2l_maps_sizes_e)[i] = B2L_maps[i].size(); (*l2h_maps_sizes_e)[i] = L2H_maps[i].size(); (*b2l_maps_e)[i] = (VecInt_t *) malloc(sizeof(VecInt_t) * ( (*b2l_maps_sizes_e)[i]) ); if (L2H_maps[i].size() != 0) { (*l2h_maps_e)[i] = (VecInt_t *) malloc(sizeof(VecInt_t) * ( (*l2h_maps_sizes_e)[i]) ); thrust::copy(L2H_maps[i].begin(), L2H_maps[i].end(), (*l2h_maps_e)[i]); } thrust::copy(B2L_maps[i].begin(), B2L_maps[i].end(), (*b2l_maps_e)[i]); } cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createRenumbering(IVector &renumbering) { int num_neighbors = this->neighbors.size(); // still renumber if the number of neighbors = 0, to support non-symmetric matrices // if (num_neighbors == 0) return; /* EXAMPLE Example matrix, partition 1 arrives with state: A.row_offsets = [0 4 11 15 20] A.col_indices = [4 0 1 2 4 5 0 1 2 3 7 0 1 2 3 1 2 3 6 7] num_neighbors=2; neighbors = [0 2] B2L_rings[[0 2 4][0 2 4]] B2L_maps[[0 1| 2 3][1 3| 0 2]] L2H_maps (and halo_lists) [[4 5][6 7]] */ int size = 0; if (this->L2H_maps.size()) { size = thrust::reduce(this->A->col_indices.begin(), this->A->col_indices.end(), int(0), thrust::maximum<int>()) + 1; //Sufficient to do reduction on lth maps cudaCheckError(); } else { size = this->A->get_num_rows(); } int rings = (this->B2L_rings.size() > 0) ? this->B2L_rings[0].size() - 1 : 0; //initial size to size+1 so we have the total size after a scan renumbering.resize(size + 1); int global_size = size; // // Step 1 - in the main matrix, separate interior and boundary nodes (1/0 in flagArray), renumber interior ones with an exclusive scan // IVector flagArray(size + 1); thrust::fill(flagArray.begin(), flagArray.begin() + size + 1, 1); cudaCheckError(); //sets 1 for interior nodes, 0 for boundary node for (int i = 0; i < num_neighbors; i++ ) { int size = this->B2L_rings[i][1]; int num_blocks = min(4096, (size + 127) / 128); if (size > 0) { remove_boundary_kernel <<< num_blocks, 128>>>(flagArray.raw(), this->B2L_maps[i].raw(), size); } //If there are any L2H maps if (this->L2H_maps.size() && this->L2H_maps[i].size()) { int size = this->L2H_maps[i].size(); int num_blocks = min(4096, (size + 127) / 128); remove_boundary_kernel <<< num_blocks, 128>>>(flagArray.raw(), this->L2H_maps[i].raw(), size); } cudaCheckError(); } //gets the renumbering of interior nodes thrust::exclusive_scan(flagArray.begin(), flagArray.begin() + size + 1, renumbering.begin()); cudaCheckError(); /* EXAMPLE After removing 1-ring boundary nodes and halo nodes from flagArray: [0 0 1 0 0 0 0 0] After exclusive scan, which gives renumbering for interior nodes (only node #2) renumbering: [0 0 0 1 1 1 1 1] */ // // Step 2 - Renumber nodes that are in the boundary, stepping through each B2L map, and renumbering ones that have not been renumbered yet // //what is the biggest B2L size INDEX_TYPE max_size = 0; for (int i = 0; i < num_neighbors; i++) { max_size = max_size > this->B2L_rings[i][1] ? max_size : this->B2L_rings[i][1]; if (this->L2H_maps.size()) { max_size = max_size > this->L2H_maps[i].size() ? max_size : this->L2H_maps[i].size(); } } //allocate work vectors (should be pretty small) that are used to renumber boundary nodes IVector boundary_renum_flags(max_size); IVector boundary_renum(max_size); //the number of renumbered nodes so far int max_element = renumbering[size]; this->_num_interior_nodes = max_element; this->_num_boundary_nodes = this->A->get_num_rows() - max_element; renumbering.resize(size); /* EXAMPLE size = 8 max_size = 2, max_element = 1, num_interior_nodes=1, num_boundary_nodes = 4-1 = 3 */ for (int i = 0; i < num_neighbors; i++) { //find nodes that are part of the current boundary and they haven't been renumbered yet thrust::fill(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, 0); int size = this->B2L_rings[i][1]; int num_blocks = min(4096, (size + 191) / 192); if (size > 0) get_unassigned_kernel <<< num_blocks, 192>>>(flagArray.raw(), this->B2L_maps[i].raw(), boundary_renum_flags.raw(), size, global_size /*,rank*/); //calculate the local renumbering (within this boundary region) of these nodes thrust::exclusive_scan(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, boundary_renum.begin()); //apply renumbering to the big numbering table if (size > 0) set_unassigned_kernel <<< num_blocks, 192>>>(boundary_renum_flags.raw(), boundary_renum.raw(), this->B2L_maps[i].raw(), renumbering.raw(), size, max_element, global_size /*,rank*/); //update the number of renumbered nodes max_element += boundary_renum[max_size - 1] + boundary_renum_flags[max_size - 1]; /* EXAMPLE for neighbor 0 (ID 0) boundary_renum_flags = [0 0], size = 2, flagArray [0 0 1 0 0 0 0 0] get_unassigned_kernel's output: boundary_renum_flags = [1 1] flagArray [1 1 1 0 0 0 0 0] after exclusive scan: boundary_renum [0 1] set_unassigned_kernel updates these arrays and renumbers B2L map: renumbering = [1 2 0 1 1 1 1 1] B2L_maps[0] = [1 2| 2 3] (note that after element 3 in renumbering and after element 2 we have invalid/not yet updated values) max_element = 3 for neighbor 1 (ID 2) get_unassigned_kernels's output: boundary_renum_flags [0 1] flagArray [1 1 1 1 0 0 0 0] after exclusive scan boundary_renum [0 0] set_unassigned_kernel renumbering [1 2 0 3 1 1 1 1] B2L_maps[1] = [2 3| 0 2] max_element = 4 */ } cudaCheckError(); //Get renumbering for halo indices if (this->L2H_maps.size()) { //TODO: simplify this, we don't need to check whether it has already been renumbered, there is no overlap between halos for (int i = 0; i < num_neighbors; i++) { //find nodes that are part of the current boundary and they haven't been renumbered yet thrust::fill(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, 0); int size = this->L2H_maps[i].size(); int num_blocks = min(4096, (size + 191) / 192); if (size > 0) get_unassigned_kernel <<< num_blocks, 192>>>(flagArray.raw(), this->L2H_maps[i].raw(), boundary_renum_flags.raw(), size, global_size /*,rank*/); //calculate the local renumbering (within this boundary region) of these nodes thrust::exclusive_scan(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, boundary_renum.begin()); //apply renumbering to the big numbering table if (size > 0) set_unassigned_kernel <<< num_blocks, 192>>>(boundary_renum_flags.raw(), boundary_renum.raw(), this->L2H_maps[i].raw(), renumbering.raw(), size, max_element, global_size /*,rank*/); //update the number of renumbered nodes max_element += boundary_renum[max_size - 1] + boundary_renum_flags[max_size - 1]; /* EXAMPLE for neighbor 0 (ID 0) boundary_renum_flags = [0 0], size = 2, flagArray [1 1 1 1 0 0 0 0] get_unassigned_kernel's output: boundary_renum_flags = [1 1] flagArray [1 1 1 1 1 1 0 0] after exclusive scan: boundary_renum [0 1] set_unassigned_kernel updates these arrays and renumbers B2L map: renumbering = [1 2 0 3 4 5 1 1] L2H_maps[0] = [4 5] max_element = 6 for neighbor 1 (ID 2) get_unassigned_kernels's output: boundary_renum_flags [1 1] flagArray [1 1 1 1 1 1 1 1] after exclusive scan boundary_renum [0 1] set_unassigned_kernel renumbering = [1 2 0 3 4 5 6 7] L2H_maps[1] = [6 7] max_element = 8 */ } cudaCheckError(); } //apply renumbering to further halo rings too if (rings > 1) { for (int i = 0; i < num_neighbors; i++) { int size = this->B2L_rings[i][this->B2L_rings[i].size() - 1] - this->B2L_rings[i][1]; int num_blocks = min(4096, (size + 127) / 128); renumber_b2l_maps <<< num_blocks, 128>>>(this->B2L_maps[i].raw() + this->B2L_rings[i][1], renumbering.raw(), size, global_size /*, rank*/); } cudaCheckError(); } /* EXAMPLE renumbers further boundary rings as listed in B2L_maps, since they have not been replaced yet with their renumbered values B2L_maps [[1 2| 0 3][2 3| 1 0]] */ } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix_owned() { int num_neighbors = this->neighbors.size(); int size = this->A->get_num_rows(); int num_blocks = min(4096, (size + 511) / 512); int rings = (this->B2L_rings.size() > 0) ? this->B2L_rings[0].size() - 1 : 0; this->set_num_halo_rings(rings); int diag = this->A->hasProps(DIAG); if (diag) { FatalError("External diag not supported in classical path", AMGX_ERR_NOT_IMPLEMENTED); } // // Step 1 & 2 - create renumbering // this->createRenumbering(this->renumbering); //now we have the full renumbering table in renum, calculate the inverse this->inverse_renumbering.resize(this->renumbering.size()); if (this->renumbering.size() > 1) { calc_inverse_renumbering <<< min(4096, ((int)this->renumbering.size() + 511) / 512), 512 >>> (this->renumbering.raw(), this->inverse_renumbering.raw(), this->renumbering.size()); cudaCheckError(); } // // Step 4 - calculate number/offset of nodes in the halos from the neighbors, ring by ring // this->halo_offsets.resize(num_neighbors + 1); this->halo_offsets[0] = size; for (int i = 0; i < num_neighbors; i++) { this->halo_offsets[i + 1] = this->halo_offsets[i] + this->L2H_maps[i].size(); } this->set_num_halo_rows(this->halo_offsets[this->halo_offsets.size() - 1] - size); int nh = this->num_halo_rows(); int total_rows = size + nh; cudaCheckError(); // // Step 6 - renumber halo matrices and calculate row length (to eventually append to the big matrix) // int insert = 0; //recalculate row_offsets IVector new_row_offsets(size + 1); if (num_blocks > 0) { calc_rowlen_reorder <<< num_blocks, 512>>>(this->A->row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), size, insert); cudaCheckError(); } thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.begin()), thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.end()), this->A->col_indices.begin()); cudaCheckError(); //row_offsets array created by exclusive scan of row sizes thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + size + 1, new_row_offsets.begin()); cudaCheckError(); // // Step 7 - consolidate column indices and values // int new_nnz = new_row_offsets[new_row_offsets.size() - 1]; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; VVector new_values((new_nnz + 1 )* this->A->get_block_size(), types::util<ValueTypeA>::get_zero()); IVector new_col_indices(new_nnz, 0); //reorder based on row permutation if (num_blocks > 0) { reorder_whole_matrix <32> <<< num_blocks, 512>>>(this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), this->A->get_block_size(), size, insert); cudaCheckError(); } //create and append halo rows size //create an identity matrix in CSR format int nnz = this->A->get_num_nz(); IVector identity_csr_rows(nh + 1); IVector identity_csr_cols(nh); VVector identity_csr_vals(nh, types::util<ValueTypeA>::get_one()); //needs to be changed to MVector, but this definition is messed up in the header file (should fix later) thrust::sequence(identity_csr_rows.begin(), identity_csr_rows.end()); thrust::sequence(identity_csr_cols.begin(), identity_csr_cols.end()); /*for example, 2x2 identity_csr matrix is created: identity_csr_rows = { 0, 1, 2 } identity_csr_cols = { 0, 1 } identity_csr_vals = { 1.0, 1.0 } */ //shift identity tmatrix by size = this->A->get_num_rows(); thrust::transform(identity_csr_rows.begin(), identity_csr_rows.end(), thrust::constant_iterator<INDEX_TYPE>(nnz), identity_csr_rows.begin(), thrust::plus<INDEX_TYPE>()); thrust::transform(identity_csr_cols.begin(), identity_csr_cols.end(), thrust::constant_iterator<INDEX_TYPE>(size), identity_csr_cols.begin(), thrust::plus<INDEX_TYPE>()); /*for example, 2x2 identity_csr matrix is created: identity_csr_rows = { 0, 1, 2 } identity_csr_cols = {size, size+1 } identity_csr_vals = { 1.0, 1.0 } */ /* WARNING: you must be very careful with the view you are setting (cuurently the view coming here by default is ALL = FULL). If - classical path is selected then the createOneRingHaloRows -> create_one_ring_halo_rows -> append_halo_rows routine will be called. It will overwrite the halo rows setup here (and will use view OWNED, which will ignore the halo rows setup here, to determine how the new halo rows should be placed). - aggregation path is selected then the extra rows setup here will be used in the R*A*P product, where (in order to match dimensions of R and P) it is assumed that (the local partition) matrix A is square, therefore it must be padded by identity rows at the bottom to compensate for the "extra" columns that are outside of the main square part. The old routines for the aggregation path do this padding at the end of the reorder_matrix routine below. */ //ViewType v = this->A->currentView(); //this->A->setView(ALL); //Approach 1: use existing routine to append the identity matrix to the existing one // (seems like too much overhead, also need identity matrix per neighbor) //DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; //prep->append_halo_rows(this->A, identity_csr_rows, identity_csr_cols, identity_csr_vals); //delete prep; //Approach 2: custom for this routine new_row_offsets.resize(total_rows + 1); new_col_indices.resize(nnz + nh); new_values.resize(nnz + nh + 1); //extra 1 element stores zero at the end (to follow the original design) //new_values[nnz]=-1; //marker to track the last element thrust::copy(identity_csr_rows.begin(), identity_csr_rows.end(), new_row_offsets.begin() + size ); thrust::copy(identity_csr_cols.begin(), identity_csr_cols.end(), new_col_indices.begin() + nnz); thrust::copy(new_values.begin() + nnz, new_values.begin() + nnz + 1, new_values.begin() + nnz + nh); thrust::copy(identity_csr_vals.begin(), identity_csr_vals.end(), new_values.begin() + nnz); /* WARNING: see above. */ this->A->set_num_cols(total_rows); this->A->set_num_rows(total_rows); this->A->col_indices.swap(new_col_indices); new_row_offsets.resize(total_rows + 1); this->A->row_offsets.swap(new_row_offsets); new_row_offsets.swap(this->old_row_offsets); this->A->values.swap(new_values); this->A->m_seq_offsets.resize(total_rows + 1); thrust::sequence(this->A->m_seq_offsets.begin(), this->A->m_seq_offsets.end()); cudaCheckError(); //TODO: only do this if AMG_Config matrix_halo_exchange!=2 this->A->delProps(COO); if (!insert) { this->A->computeDiagonal(); } this->set_initialized(this->A->row_offsets); this->A->setView(OWNED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix() { int num_neighbors = this->neighbors.size(); if (num_neighbors == 0) { return; } int size = this->A->get_num_rows(); int num_blocks = min(4096, (size + 511) / 512); int rings = this->B2L_rings[0].size() - 1; this->set_num_halo_rings(rings); int diag = this->A->hasProps(DIAG); std::vector<Matrix<TConfig_d> > &halo_rows = *this->halo_rows; std::vector<DistributedManager<TConfig_d> > &halo_btl = *this->halo_btl; /* EXAMPLE The example matrix, on partition 1 arrives at this point with the following state: num_rings=2 A.num_rows = 4; A.num_nz = 20 A.row_offsets = [0 4 11 15 20] A.col_indices = [4 0 1 2 4 5 0 1 2 3 7 0 1 2 3 1 2 3 6 7] num_neighbors=2; neighbors = [0 2] B2L_rings[[0 2 4][0 2 4]] B2L_maps[[0 1| 2 3][1 3| 0 2]] L2H_maps (and halo_lists) [[4 5][6 7]] With the exchange halo rows: halo_btl[0] (received from neighbor ID 0) global_id = 0; base_index=0; index_range=6; B2L_rings[0] = [0 2 4]; B2L_maps[0] = [2 3| 0 1] L2H_maps = [4 5] halo_rows[0].row_offsets = [0 5 13 17 21] halo_rows[0].col_indices = [1 2 3 4 5 0 1 2 3 4 5 6 7 0 1 3 6 0 1 2 3] halo_btl[1] (received from neighbor ID 2) global_id = 2; base_index=0; index_range=8; B2L_rings[0] = [0 2 4]; B2L_maps[0] = [1 2| 0 3] L2H_maps = [6 7] halo_rows[1].row_offsets = [0 4 11 16 20] halo_rows[1].col_indices = [7 1 2 3 5 6 7 0 1 2 3 4 5 0 2 3 0 1 2 3] */ // // Step 1 & 2 - create renumbering // this->createRenumbering(this->renumbering); cudaCheckError(); /* EXAMPLE this->renumbering = [1 2 0 3 4 5 6 7] B2L_maps = [[1 2| 0 3][2 3| 1 0]] L2H_maps = [[4 5][6 7]] */ // // Step 3 - given a full renumbering of owned nodes, calculate inverse renumbering // //now we have the full renumbering table in renum, calculate the inverse this->inverse_renumbering.resize(this->renumbering.size()); calc_inverse_renumbering <<< min(4096, ((int)this->renumbering.size() + 511) / 512), 512 >>> (this->renumbering.raw(), this->inverse_renumbering.raw(), this->renumbering.size()); cudaCheckError(); /* EXAMPLE this->inverse_renumbering = [2 0 1 3 4 5 6 7] */ // // Step 4 - calculate number/offset of nodes in the halos from the neighbors, ring by ring // this->halo_offsets.resize(rings * num_neighbors + 1, 0); for (int ring = 0; ring < rings; ring++) { for (int i = 0; i < num_neighbors; i++) { this->halo_offsets[ring * num_neighbors + i] = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; } } thrust::exclusive_scan(this->halo_offsets.begin(), this->halo_offsets.end(), this->halo_offsets.begin(), size); cudaCheckError(); this->set_num_halo_rows(this->halo_offsets[this->halo_offsets.size() - 1] - size); int total_rows = size + this->num_halo_rows(); if (total_rows < this->renumbering.size()) { FatalError("total rows < renumbering.size(), send/recv maps should cover all matrix halo columns", AMGX_ERR_NOT_IMPLEMENTED); } if (total_rows > this->renumbering.size()) { this->A->getResources()->warning("# owned nodes + # halo nodes > matrix columns: send/recv maps have some unreferences halo indices, they are not directly connected to our partition and therefore we won't compute them, please use 2-ring comms maps if you want to specify 2nd ring neighbors"); } cudaCheckError(); /* EXAMPLE halo_offsets [2 2 2 2] after exclusive scan: 4 + [0 2 4 6 8] = [4 6 8 10 12] num_halo_rows = 8, total_rows = 12 */ // // Step 5 - create big mapping table of all halo indices we received (this may use a little too much memory) // //count number of fine rows of neighbors thrust::host_vector<INDEX_TYPE> neighbor_rows(num_neighbors + 1); int max_num_rows = 0; for (int i = 0; i < num_neighbors; i++) { neighbor_rows[i] = halo_btl[i].index_range(); max_num_rows = max_num_rows > halo_rows[i].get_num_rows() ? max_num_rows : halo_rows[i].get_num_rows(); } thrust::exclusive_scan(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows.begin()); cudaCheckError(); int total_rows_of_neighbors = neighbor_rows[num_neighbors]; /* EXAMPLE neigbor_rows = [0 6 14] total_rows_of_neighbors = 14 */ IVector halo_mapping(total_rows_of_neighbors); thrust::fill(halo_mapping.begin(), halo_mapping.end(), -1); cudaCheckError(); //ring by ring, neighbor by neighbor assign sequentially increasing numbers for halo nodes for (int ring = 0; ring < rings; ring++) { for (int i = 0; i < num_neighbors; i++) { int size = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; int num_blocks = min(4096, (size + 127) / 128); //This renumbering has to result in the same renumbering that comes out of L2H renumbering create_halo_mapping <<< num_blocks, 128>>>(halo_mapping.raw() + neighbor_rows[i], halo_btl[i].B2L_maps[0].raw() + halo_btl[i].B2L_rings[0][ring], halo_btl[i].base_index(), this->halo_offsets[ring * num_neighbors + i], size); cudaCheckError(); /* EXAMPLE ring 0 neighbor 0 - halo_btl[0].B2L_maps[0] = [2 3| 0 1] halo_btl[0].L2H_maps = [4 5] halo_mapping = [-1 -1 4 5 -1 -1 |-1 -1 -1 -1 -1 -1 -1 -1] ring 0 neighbor 1 - halo_btl[1].B2L_maps[0] = [1 2| 0 3] halo_btl[1].L2H_maps = [6 7] halo_mapping = [-1 -1 4 5 -1 -1 |-1 6 7 -1 -1 -1 -1 -1] ring 1 neighbor 0 - halo_btl[0].B2L_maps[0] = [2 3| 0 1] halo_btl[0].L2H_maps = [4 5] halo_mapping = [8 9 4 5 -1 -1 |-1 6 7 -1 -1 -1 -1 -1] ring 1 neighbor 1 - halo_btl[1].B2L_maps[0] = [1 2| 0 3] halo_btl[1].L2H_maps = [6 7] halo_mapping = [8 9 4 5 -1 -1 |10 6 7 11 -1 -1 -1 -1] */ } } cudaCheckError(); for (int i = 0; i < num_neighbors; i++) { int size = halo_btl[i].L2H_maps[0].size(); int num_blocks = min(4096, (size + 127) / 128); //Map the column indices of the halo rows that point back to boundary nodes apply_h2l2b_mapping <<< num_blocks, 128>>>(halo_mapping.raw() + neighbor_rows[i], halo_btl[i].L2H_maps[0].raw(), halo_btl[i].base_index(), this->B2L_maps[i].raw(), size); cudaCheckError(); /* EXAMPLE neighbor 0 - mapping back to our own (boundary) indices halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 -1 -1] neighbor 1 - mapping back to our own (boundary) indices halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3] */ } cudaCheckError(); /* EXAMPLE neighbor_rows = [0 6 14] halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3] The first part (0-6) of halo_mapping gives a local index for all the indices that we want to know about in halo_btl[0] The second part (7-14) gives local indices for halo_btl[1], that is both halo ring there, and the column indices representing vertices in this partition's boundary. Note that it does not give indices (-1) for vertices 5 and 6 in neighbor 1 (ID 2), which are column indices connecting it to neighbor 0, hence the two halo regions are not connected */ // // Step 6 - renumber halo matrices and calculate row length (to eventually append to the big matrix) // int insert = 0; if (this->A->hasProps(DIAG) && insertDiagonals) { insert = 1; } diag = diag && !insertDiagonals; //recalculate row_offsets IVector new_row_offsets(size + this->num_halo_rows() + 1); calc_rowlen_reorder <<< num_blocks, 512>>>(this->A->row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), size, insert); cudaCheckError(); IVector neighbor_rows_d(num_neighbors + 1); thrust::copy(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows_d.begin()); cudaCheckError(); /* EXAMPLE get row length according to renumbering new_row_offsets = [4 4 7 5 0 0 0 0 0 0 0 0 0] */ //map column indices of my own matrix /*map_col_indices<4><<<num_blocks, 512>>>(this->A->row_offsets.raw(), this->A->col_indices.raw(), this->renumbering.raw(), this->halo_ranges.raw(), halo_mapping.raw(), neighbor_rows_d.raw(), this->base_index(), num_neighbors, size);*/ thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.begin()), thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.end()), this->A->col_indices.begin()); cudaCheckError(); /* EXAMPLE use this->renumbering = [1 2 0 3 4 5 6 7] to map old column indices to new column indices (i.e. according to interior - boundary - halo separation), but do not reshuffle them into their place yet A.col_indices = [4 0 1 2 4 5 0 1 2 3 7 0 1 2 3 1 2 3 6 7] becomes A.col_indices = [4 1 2 0 4 5 1 2 0 3 7 1 2 0 3 2 0 3 6 7] */ cudaCheckError(); IVector temp_row_len(max_num_rows); for (int i = 0; i < num_neighbors; i++) { //map column indices of halo matrices and count of nonzeros we will keep int size = halo_rows[i].get_num_rows(); int num_blocks = min(4096, (size + 127) / 128); map_col_indices_and_count_rowlen<4> <<< num_blocks, 128, 128 * sizeof(INDEX_TYPE)>>>( halo_rows[i].row_offsets.raw(), halo_rows[i].col_indices.raw(), temp_row_len.raw(), halo_mapping.raw() + neighbor_rows[i], size, insert); cudaCheckError(); //number of nonzeros per row copied into big row sizes array for (int ring = 0; ring < rings; ring++) { thrust::copy(temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring], temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring + 1], new_row_offsets.begin() + this->halo_offsets[ring * num_neighbors + i]); } cudaCheckError(); /* EXAMPLE halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3] look at halo row matrices, and halo_mapping, count column indices that do not map to -1 and map them to their new, local index halo_rows[0].col_indices = [1 2 3 4 5 0 1 2 3 4 5 6 7 0 1 3 6 0 1 2 3] becomes halo_rows[0].col_indices = [9 4 5 1 2 8 9 4 5 1 2 -1 -1 8 9 5 -1 8 9 4 5] with temp_row_len = [5 6 3 4] copied into new_row_offsets: [4 4 7 5| 5 6| 0 0| 3 4| 0 0 0] halo_rows[1].col_indices = [7 1 2 3 5 6 7 0 1 2 3 4 5 0 2 3 0 1 2 3] becomes halo_rows[1].col_indices = [3 6 7 11 -1 2 3 10 6 7 11 -1 -1 10 7 11 10 6 7 11] with temp_row_len = [4 6 3 4] copied into new_row_offsets: [4 4 7 5| 5 6| 4 6| 3 4| 3 4 0] */ } cudaCheckError(); //row_offsets array created by exclusive scan of row sizes thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + size + this->num_halo_rows() + 1, new_row_offsets.begin()); cudaCheckError(); /* EXAMPLE Exclusive scan to get new_row_offsets array: new_row_offsets = [0 4 8 15 20| 25 31| 35 41| 44 48| 51 55] */ // // Step 7 - consolidate column indices and values // int new_nnz = new_row_offsets[new_row_offsets.size() - 1]; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; VVector new_values((new_nnz + 1 + diag * (total_rows - 1))* this->A->get_block_size(), types::util<ValueTypeA>::get_zero()); IVector new_col_indices(new_nnz, 0); //reorder based on row permutation reorder_whole_matrix <32> <<< num_blocks, 512>>>(this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), this->A->get_block_size(), size, insert); cudaCheckError(); if (diag) { //reorder based on row permutation reorder_vector_values <<< num_blocks, 512>>>(new_values.raw() + new_row_offsets[total_rows]*this->A->get_block_size(), this->A->values.raw() + this->A->row_offsets[size]*this->A->get_block_size(), this->renumbering.raw(), this->A->get_block_size(), size); cudaCheckError(); } int cumulative_num_rows = size; for (int i = 0; i < num_neighbors; i++) { for (int ring = 0; ring < rings; ring++) { int num_rows = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; int num_blocks = min(4096, (num_rows + 127) / 128); //copy in nonzeros that we are keeping //TODO: access pattern - should be implemented with warp-wide scans to decide which nonzeros we are keeping and where the rest is going reorder_whole_halo_matrix <<< num_blocks, 128>>>(halo_rows[i].row_offsets.raw() + halo_btl[i].B2L_rings[0][ring], halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), new_row_offsets.raw() + this->halo_offsets[ring * num_neighbors + i], new_col_indices.raw(), new_values.raw(), NULL, this->A->get_block_size(), num_rows, insert, this->halo_offsets[ring * num_neighbors + i], halo_btl[i].B2L_rings[0][ring], halo_btl[i].B2L_rings[0][rings]); if (diag) { thrust::copy(halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring])*this->A->get_block_size(), halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring + 1])*this->A->get_block_size(), new_values.begin() + (new_row_offsets[total_rows] + cumulative_num_rows)*this->A->get_block_size()); cumulative_num_rows += num_rows; } } } cudaCheckError(); /* EXAMPLE copy everything in place, dropping -1 column indices in the halo and reordering the owned rows new_row_offsets = [0 4 8 15 20| 25 31| 35 41| 44 48| 51 55] new_col_indices = [1 2 0 3 4 1 2 0 4 5 1 2 0 3 7 2 0 3 6 7 -end of owned 9 4 5 1 2 8 9 4 5 1 2 - end of neighbor 0 ring 0 3 6 7 11 2 3 10 6 7 11 - end of neighbor 1 ring 0 8 9 5 8 9 4 5 - end of neighbor 0 ring 1 10 7 11 10 6 7 11] - end of neighbor 1 ring 1 */ this->A->set_num_cols(total_rows); this->A->set_num_rows(size); this->A->col_indices.swap(new_col_indices); new_row_offsets.resize(total_rows + 1); this->A->row_offsets.swap(new_row_offsets); new_row_offsets.swap(this->old_row_offsets); this->A->values.swap(new_values); this->A->m_seq_offsets.resize(total_rows + 1); thrust::sequence(this->A->m_seq_offsets.begin(), this->A->m_seq_offsets.end()); if (insert) { this->A->delProps(DIAG); this->A->diag.resize(total_rows); thrust::copy(this->A->row_offsets.begin(), this->A->row_offsets.end() - 1, this->A->diag.begin()); } cudaCheckError(); delete this->halo_rows; delete this->halo_btl; //set halo_rows and halo_btl to NULL to avoid a potential double free situation in the future this->halo_rows = NULL; this->halo_btl = NULL; this->A->delProps(COO); this->A->set_initialized(1); //TODO: only do this if AMG_Config matrix_halo_exchange!=2 if (!insert) { this->A->computeDiagonal(); } this->A->setView(OWNED); } //function object (functor) for thrust calls (it is a unary operator to add a constant) template<typename T> class add_constant_op { const T c; public: add_constant_op(T _c) : c(_c) {} __host__ __device__ T operator()(const T &x) const { return x + c; } }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::obtain_shift_l2g_reordering(index_type n, I64Vector_d &l2g, IVector_d &p, IVector_d &q) { /* WARNING: Exchange halo of the inverse_reordering, which is implicitly based on the local_to_global_map (l2g). Notice that it is implicit in the exchange_halo routine, since you are getting exactly the vector halo elements, which are exactly the elements you need. They however must be shifted by the partition starting points (starting global row indices, which are containe din array part_offsets). This allows us to avoid constructing the global vector for inverse permutation, as is done in reference MATLAB code. */ //Recall that part_offsets provide the starting point (global row index) of every partition, in other words, //they contain the prefix sum of number of rows assigned to each partition. Also, notice that part_offsets and //part_offsets_h have the same values on device and host, respectively. See below few lines for details: index_type tag = 1 * 133 + 3 * 7 + 0; //some random number for the tag index_type l = p.size(); q.resize(l); thrust::copy (p.begin(), p.end(), q.begin()); thrust::transform(q.begin(), q.end(), q.begin(), add_constant_op<index_type>(this->part_offsets[this->global_id()])); this->exchange_halo(q, tag); thrust::sequence (q.begin(), q.begin() + n); thrust::transform(q.begin(), q.begin() + n, q.begin(), add_constant_op<index_type>(this->part_offsets[this->global_id()])); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::unpack_partition(index_type *Bp, index_type *Bc, mat_value_type *Bv) { index_type l, n, nnz, offset; index_type *ir; index_type *Ap; index_type *Ac; mat_value_type *Av; IVector q; //some initializations this->A->getOffsetAndSizeForView(OWNED, &offset, &n); this->A->getNnzForView(OWNED, &nnz); l = this->inverse_renumbering.size(); ir = this->inverse_renumbering.raw(); Ap = this->A->row_offsets.raw(); Ac = this->A->col_indices.raw(); Av = this->A->values.raw(); //(i) reorder the matrix back (into mixed interior-boundary nodes) //applies to rows and columns (out-of-place) reorder_partition<index_type, mat_value_type, true, true> (n, nnz, Ap, Ac, Av, Bp, Bc, Bv, l, ir); cudaCheckError(); //obtain reordering q that combines the shift of the diagonal block with the off-diagonal block indices conversion from local to global this->obtain_shift_l2g_reordering(n, this->local_to_global_map, this->inverse_renumbering, q); cudaCheckError(); //(ii) reorder the matrix back (shift the diagonal block and convert off-diagonal block column indices from local to global) //applies columns only (in-place) reorder_partition<index_type, mat_value_type, false, true> (n, nnz, Bp, Bc, Bv, Bp, Bc, Bv, q.size(), q.raw()); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::createNeighToDestPartMap(IVector_h &neigh_to_part, IVector_h &neighbors, IVector_h &destination_part, int num_neighbors) { neigh_to_part.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { neigh_to_part[i] = destination_part[neighbors[i]]; } } template <class TConfig> void DistributedManagerBase<TConfig>::createConsolidatedNeighToPartMap(IVector_h &cons_neigh_to_part, IVector_h &neigh_to_part, int my_destination_part, IVector_h &destination_part, int &num_cons_neighbors) { // input: non-initialized cons_neigh_to_part // fine_neigh_to_part // my_destination_part // output: cons_neigh_to_part // num_cons_neighbors cons_neigh_to_part = neigh_to_part; thrust::sort(cons_neigh_to_part.begin(), cons_neigh_to_part.end()); cudaCheckError(); cons_neigh_to_part.erase(thrust::unique(cons_neigh_to_part.begin(), cons_neigh_to_part.end()), cons_neigh_to_part.end()); // Remove if fine_neigh maps to same coarse partition cons_neigh_to_part.erase(thrust::remove_if(cons_neigh_to_part.begin(), cons_neigh_to_part.end(), is_my_part(my_destination_part)), cons_neigh_to_part.end()); num_cons_neighbors = cons_neigh_to_part.size(); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::createNeighToConsNeigh(IVector_h &neigh_to_cons_neigh, IVector_h &cons_neigh_to_part, IVector_h &neigh_to_part, int my_destination_part, int &num_neighbors) { neigh_to_cons_neigh.resize(num_neighbors); thrust::lower_bound(cons_neigh_to_part.begin(), cons_neigh_to_part.end(), neigh_to_part.begin(), neigh_to_part.end(), neigh_to_cons_neigh.begin()); cudaCheckError(); // Flagging fine neighbors that go to same partition (haven't been found in previous step) for (int i = 0; i < num_neighbors; i++) { if ( neigh_to_part[i] == my_destination_part) { neigh_to_cons_neigh[i] = -1; } } } template <class TConfig> template <class IVector_hd> void DistributedManagerBase<TConfig>::consB2Lmaps(std::vector<IVector_hd> &dest_coarse_B2L_maps, std::vector<IVector_hd> &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors) { //Merge B2L fine maps per coarse destination dest_coarse_B2L_maps.resize(num_coarse_neighbors); std::vector<int> dest_coarse_B2L_maps_scratch_sizes(num_coarse_neighbors, 0); int my_id = this->global_id(); // Loop over the fine neighbors, to compute size of each dest_coarse_B2L_maps for (int i = 0; i < num_fine_neighbors; i++) { int k = fine_neigh_to_coarse_neigh[i]; if (k != -1) { dest_coarse_B2L_maps_scratch_sizes[k] += coarse_B2L_maps[i].size(); } } // Now fill dest_coarse_B2L_maps for (int k = 0; k < num_coarse_neighbors; k++) { dest_coarse_B2L_maps[k].resize( dest_coarse_B2L_maps_scratch_sizes[k] ); // Reset sizes to 0 (fill use as offset in next loop); dest_coarse_B2L_maps_scratch_sizes[k] = 0; } for (int i = 0; i < num_fine_neighbors; i++) { int k = fine_neigh_to_coarse_neigh[i]; if (k != -1) { int offset = dest_coarse_B2L_maps_scratch_sizes[k]; thrust::copy(coarse_B2L_maps[i].begin(), coarse_B2L_maps[i].end(), dest_coarse_B2L_maps[k].begin() + offset); dest_coarse_B2L_maps_scratch_sizes[k] += coarse_B2L_maps[i].size(); } } cudaCheckError(); int max_size = 0; for (int i = 0; i < num_coarse_neighbors; i++) { int size = dest_coarse_B2L_maps[i].size(); if (size > max_size) { max_size = size; } } // Remove duplicates (aggregates in boundary that go to same merged partition) for (int i = 0; i < num_coarse_neighbors; i++) { int size = dest_coarse_B2L_maps[i].size(); thrust::sort(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].begin() + size); index_type num_unique = thrust::unique(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].begin() + size) - dest_coarse_B2L_maps[i].begin(); dest_coarse_B2L_maps[i].erase(dest_coarse_B2L_maps[i].begin() + num_unique, dest_coarse_B2L_maps[i].end()); } cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::computeConsolidatedOffsets(const int my_id, const int my_destination_part, const bool is_root_partition, const int num_interior_rows, const int num_boundary_rows, IVector_h_vector &vertex_counts, const IVector_h &parts_to_consolidate, const int num_parts_to_consolidate, int &interior_offset, int &boundary_offset, int &total_interior_rows_in_merged, int &total_boundary_rows_in_merged, int &total_rows_in_merged, DistributedComms<TConfig> *comms) { IVector_h my_offsets(4); IVector_h my_sizes(2); my_sizes[0] = num_interior_rows; my_sizes[1] = num_boundary_rows; if (!is_root_partition) { //Send number of interior and boundary nodes to root comms->send_vector_async(my_sizes, my_destination_part, 777); comms->recv_vector(my_offsets, my_destination_part, 778); comms->send_vector_wait_all(my_sizes); } else { vertex_counts.resize(num_parts_to_consolidate); IVector_h child_sizes(2); IVector_h offsets_interior(num_parts_to_consolidate); IVector_h offsets_boundary(num_parts_to_consolidate); int count_int = 0; int count_bdy = 0; for (int i = 0; i < num_parts_to_consolidate; i++) { if (parts_to_consolidate[i] == my_id) { child_sizes = my_sizes; } else { comms->recv_vector(child_sizes, parts_to_consolidate[i], 777); } //Do a simple cumulative sum to determine total number of interior/boundary rows and their offsets on a per contributing partition basis offsets_interior[i] = count_int; offsets_boundary[i] = count_bdy; count_int += child_sizes[0]; count_bdy += child_sizes[1]; //Save them vertex_counts[i].resize(2); vertex_counts[i][0] = child_sizes[0]; vertex_counts[i][1] = child_sizes[1]; } for (int i = 0; i < num_parts_to_consolidate; i++) { //Send back to contributing partitions IVector_h offsets_to_send(4); offsets_to_send[0] = offsets_interior[i]; offsets_to_send[1] = offsets_boundary[i]; offsets_to_send[2] = count_int; offsets_to_send[3] = count_bdy; if (parts_to_consolidate[i] == my_id) { my_offsets = offsets_to_send; } else { comms->send_vector(offsets_to_send, parts_to_consolidate[i], 778); // cannot make async, rewriting internal buffer } } } interior_offset = my_offsets[0]; boundary_offset = my_offsets[1] + my_offsets[2] - num_interior_rows; total_interior_rows_in_merged = my_offsets[2]; total_boundary_rows_in_merged = my_offsets[3]; total_rows_in_merged = my_offsets[2] + my_offsets[3]; } template <class TConfig> template <class IVector_hd> void DistributedManagerBase<TConfig>::consB2LmapsOnRoot(int &num_consolidated_neighbors, std::vector<IVector_hd> &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, std::vector<IVector_hd> &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms) { // TODO: it is possible to optimize exchanges, for example fuse recv_vector in recreating coarse neigbours // output: num_consolidated_neighbor, consolidated_B2L_maps, consolidated_coarse_ids // input: dest_coarse_B2L_maps, is_root_partition, my_id, my_destination_part, num_fine_parts_to_consolidate, num_coarse_neighbors, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh if (my_destination_part != my_id) { //if not root, send coarse neighbor list using fine indices and the corresponding boundary lists IVector_h num_coarse_neigh(1); num_coarse_neigh[0] = num_coarse_neighbors; comms->send_vector_async(num_coarse_neigh, my_destination_part, 1111); comms->send_vector_async(coarse_neigh_to_fine_part, my_destination_part, 2222); comms->send_vector_async(num_bdy_per_coarse_neigh, my_destination_part, 3333); for (int i = 0; i < num_coarse_neighbors; i++) { comms->send_vector_async(dest_coarse_B2L_maps[i], my_destination_part, 4444 + i) ; } comms->send_vector_wait_all(num_coarse_neigh); comms->send_vector_wait_all(coarse_neigh_to_fine_part); comms->send_vector_wait_all(num_bdy_per_coarse_neigh); for (int i = 0; i < num_coarse_neighbors; i++) { comms->send_vector_wait_all(dest_coarse_B2L_maps[i]) ; } } if (is_root_partition) { IVector_h num_coarse_ids_from_part(fine_parts_to_consolidate); IVector_h_vector coarse_ids_from_part(num_fine_parts_to_consolidate); IVector_h_vector num_coarse_neigh_bdys_from_part(num_fine_parts_to_consolidate); //If root, receive sizes, and resize receive buffers int total_num_coarse_ids = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; IVector_h temp(1); if (current_part != my_id) { comms->recv_vector(temp, current_part, 1111); } else { temp[0] = num_coarse_neighbors; } num_coarse_ids_from_part[i] = temp[0]; coarse_ids_from_part[i].resize(temp[0]); num_coarse_neigh_bdys_from_part[i].resize(temp[0]); total_num_coarse_ids += temp[0]; } //Create a neighbor list for the consolidated coarse matrix, by merging coarse neighbor lists from partitions that are being merged consolidated_coarse_ids.resize(total_num_coarse_ids); int count = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; // Get from each partition the coarse partition ids in their B2L maps if (current_part != my_id) { comms->recv_vector(coarse_ids_from_part[i], current_part, 2222); comms->recv_vector(num_coarse_neigh_bdys_from_part[i], current_part, 3333); } else { coarse_ids_from_part[i] = coarse_neigh_to_fine_part; num_coarse_neigh_bdys_from_part[i] = num_bdy_per_coarse_neigh; } thrust::copy(coarse_ids_from_part[i].begin(), coarse_ids_from_part[i].end(), consolidated_coarse_ids.begin() + count); count += num_coarse_ids_from_part[i]; } cudaCheckError(); //eliminate duplicates thrust::sort(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end()); cudaCheckError(); consolidated_coarse_ids.erase(thrust::unique(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end()), consolidated_coarse_ids.end()); cudaCheckError(); num_consolidated_neighbors = consolidated_coarse_ids.size(); IVector_h_vector coarse_ids_from_part_to_consolidated_neighbor(num_fine_parts_to_consolidate);; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { coarse_ids_from_part_to_consolidated_neighbor[i].resize(num_coarse_ids_from_part[i]); thrust::lower_bound(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end(), coarse_ids_from_part[i].begin(), coarse_ids_from_part[i].end(), coarse_ids_from_part_to_consolidated_neighbor[i].begin()); } cudaCheckError(); // Map each coarse partition to new coarse ID consolidated_B2L_maps.resize(num_consolidated_neighbors); IVector_h consolidated_B2L_maps_sizes(num_consolidated_neighbors); // Offset in the consolidated_B2L_maps IVector_h_vector coarse_ids_offsets(num_fine_parts_to_consolidate); for (int i = 0; i < num_consolidated_neighbors; i++) { consolidated_B2L_maps_sizes[i] = 0; } // Compute the size of each consolidated_B2L_maps and offsets into it, where we will receive the parts coming from partitions that are getting merged into this one for (int i = 0; i < num_fine_parts_to_consolidate; i++) { coarse_ids_offsets[i].resize(num_coarse_ids_from_part[i]); for (int j = 0; j < num_coarse_ids_from_part[i]; j++) { int coarse_id = coarse_ids_from_part[i][j]; int k = num_coarse_neigh_bdys_from_part[i][j]; coarse_ids_offsets[i][j] = consolidated_B2L_maps_sizes[ coarse_ids_from_part_to_consolidated_neighbor[i][j] ]; consolidated_B2L_maps_sizes[ coarse_ids_from_part_to_consolidated_neighbor[i][j] ] += k; } } for (int i = 0; i < num_consolidated_neighbors; i++) { consolidated_B2L_maps[i].resize(consolidated_B2L_maps_sizes[i]); } // Receive the B2L maps from each child partition, concatenate them (gets sorted outside) for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; for (int j = 0; j < num_coarse_ids_from_part[i]; j++) { int my_coarse_neigh = coarse_ids_from_part_to_consolidated_neighbor[i][j]; int offset = coarse_ids_offsets[i][j]; if (current_part != my_id) { comms->recv_vector( consolidated_B2L_maps[my_coarse_neigh], current_part, 4444 + j, offset, num_coarse_neigh_bdys_from_part[i][j]); //Need to do proper tagging here, otherwise messages from the same source would get mixed up } else { thrust::copy(dest_coarse_B2L_maps[j].begin(), dest_coarse_B2L_maps[j].end(), consolidated_B2L_maps[my_coarse_neigh].begin() + offset); } } } cudaCheckError(); } } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateAndRenumberHalos(IVector_h &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_ids, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms) { consAndRenumberHalos(aggregates, manager_halo_offsets, halo_offsets, neighbors, num_fine_neighbors, consolidated_coarse_ids, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, num_all_aggregates, comms); } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateAndRenumberHalos(IVector_d &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_ids, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms) { consAndRenumberHalos(aggregates, manager_halo_offsets, halo_offsets, neighbors, num_fine_neighbors, consolidated_coarse_ids, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, num_all_aggregates, comms); } template <class TConfig> template <class IVector_hd> void DistributedManagerBase<TConfig>::consAndRenumberHalos(IVector_hd &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_neigh_to_fine_part, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms) { /* * EXAMPLE 2 This example is independent from the previous ones. Say partition 0 and 1 are merging (into 0) partition 0 is neighbors with 1,2,3 and partition 1 is neighbors with 0,3,4 Partitions 3 and 4 are merging (into partition 3) and partition 2 is not merging with anyone. This example details the renumbering of halo indices on partition 0 and partition 1. aggregates on partition 0: [(fine interior nodes) (fine boundary nodes) (fine halo from part 1) (fine halo from part 2) (fine halo from part 3)] [(fine interior nodes) (fine boundary nodes) (13 13 15) (12 15 17) (14 16 18)] aggregates on partition 1: [(fine interior nodes) (fine boundary nodes) (fine halo from part 0) (fine halo from part 3) (fine halo from part 4)] [(fine interior nodes) (fine boundary nodes) (14 16 17) (18 19 19) (15 15 17)] manager_halo_offsets on partition 0: [22 25 28 31] manager_halo_offsets on partition 1: [20 23 26 29] halo_offsets on both partitions are uninitialised: [0 0 0] and [0 0] neighbors on partition 0: [1 2 3] partition 1: [0 3 4] num_fine_neighbors partition 0: 3 partition 1: 3 consolidated_coarse_neigh_to_fine_part partition 0: [2 3] partition 1: [3] num_consolidated_neighbors partition 0: 2 partition 1: 1 destination_part [0 0 2 3 3] my_destination_part partition 0: 0 partition 1: 0 is_root_partition partition 0: true partition 1: false fine_parts_to_consolidate partition 0: [0 1] num_fine_parts_to_consolidate partition 0: 2 num_parts 5 my_id partition 0: 0 partition 1: 1 total_rows_in_merged partition 0 and 1: 24 (=sum of the two below) num_all_aggregates partition partition 0: 13 partition 1: 11 - will be updated with the number of halo aggregates */ // // Step 9.2 - com up with nonmerge lists // int num_fine_nonmerge_neighbors;// = fine_nonmerge_neighbors.size(); //NUmber of neighbors we are not merging with num_fine_nonmerge_neighbors = 0; for (int i = 0 ; i < num_fine_neighbors; i++) { if (destination_part[neighbors[i]] != my_destination_part) { num_fine_nonmerge_neighbors++; } } IVector_h halo_sizes(num_fine_nonmerge_neighbors); IVector_h fine_nonmerge_neigh_to_cons_fine_part(num_fine_nonmerge_neighbors); IVector_h fine_nonmerge_neighbor_to_fine_neighbor(num_fine_nonmerge_neighbors); num_fine_nonmerge_neighbors = 0; for (int i = 0 ; i < num_fine_neighbors; i++) { if (destination_part[neighbors[i]] != my_destination_part) { halo_sizes[num_fine_nonmerge_neighbors] = manager_halo_offsets[i + 1] - manager_halo_offsets[i]; fine_nonmerge_neighbor_to_fine_neighbor[num_fine_nonmerge_neighbors] = i; fine_nonmerge_neigh_to_cons_fine_part[num_fine_nonmerge_neighbors] = destination_part[neighbors[i]]; num_fine_nonmerge_neighbors++; } } /* * EXAMPLE 2 num_fine_nonmerge_neighbors partition 0: 2 partition 1: 2 fine_nonmerge_neighbor_to_fine_neighbor partition 0: [1 2] partition 1: [1 2] fine_nonmerge_neigh_to_cons_fine_part partition 0: [2 3] partition 1: [3 3] halo_sizes partition 0: [3 3] partition 1: [3 3] */ //Send them to root along with the halo parts of the aggregates vector if (!is_root_partition) { IVector_h num_fine_nonmerge_neigh(1); num_fine_nonmerge_neigh[0] = num_fine_nonmerge_neighbors; // TODO: async? might be faster. comms->send_vector(num_fine_nonmerge_neigh, my_destination_part, 1111); comms->send_vector(halo_sizes, my_destination_part, 2222); comms->send_vector(fine_nonmerge_neigh_to_cons_fine_part, my_destination_part, 3333); // Here check l2h_identity flag and act accordingly for (int i = 0; i < num_fine_nonmerge_neighbors; i++) { comms->send_vector_async(aggregates, my_destination_part, 4444 + i, manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[i]], halo_sizes[i]) ; } //comms->send_vector_wait_all(num_fine_nonmerge_neigh); //comms->send_vector_wait_all(halo_sizes); //comms->send_vector_wait_all(fine_nonmerge_neigh_to_cons_fine_part); comms->send_vector_wait_all(aggregates); /* * EXAMPLE 2 Partition 1 sends to partition 0: num_fine_nonmerge_neigh 2 halo_sizes [3 3] fine_nonmerge_neigh_to_cons_fine_part [3 3] for loop: sends two pieces: [(18 19 19)] [(15 15 17)] */ } if (is_root_partition) { // // Step 9.3 Root receives this info, creates metadata // std::vector<VecInt_t> num_fine_nonmerge_neigh_array(num_fine_parts_to_consolidate); IVector_h_vector halo_sizes_array(num_fine_parts_to_consolidate); IVector_h_vector fine_nonmerge_neigh_to_cons_fine_part_array(num_fine_parts_to_consolidate); std::vector<std::vector<IVector> > fine_halo_aggregates_to_root_array(num_fine_parts_to_consolidate); std::vector<VecInt_t> min_index_coarse_halo(num_consolidated_neighbors, 0x7FFFFFFF); std::vector<VecInt_t> max_index_coarse_halo(num_consolidated_neighbors, 0); std::vector<VecInt_t> fine_part_to_consolidated_neighbor(num_parts, -1); for (int i = 0; i < num_consolidated_neighbors; i++) { fine_part_to_consolidated_neighbor[consolidated_coarse_neigh_to_fine_part[i]] = i; } /* * EXAMPLE 2 everything from here on is for partition 0, since that is the root partition fine_part_to_consolidated_neighbor [-1 -1 0 1 -1] */ for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; IVector_h temp(1); if (current_part != my_id) { comms->recv_vector(temp, current_part, 1111); } else { temp[0] = num_fine_nonmerge_neighbors; } num_fine_nonmerge_neigh_array[i] = temp[0]; halo_sizes_array[i].resize(temp[0]); fine_nonmerge_neigh_to_cons_fine_part_array[i].resize(temp[0]); fine_halo_aggregates_to_root_array[i].resize(temp[0]); if (current_part != my_id) { comms->recv_vector(halo_sizes_array[i], current_part, 2222); } else { halo_sizes_array[i] = halo_sizes; } if (current_part != my_id) { comms->recv_vector(fine_nonmerge_neigh_to_cons_fine_part_array[i], current_part, 3333); } else { fine_nonmerge_neigh_to_cons_fine_part_array[i] = fine_nonmerge_neigh_to_cons_fine_part; } //Receive the halo regions for (int j = 0; j < temp[0]; j++) { fine_halo_aggregates_to_root_array[i][j].resize(halo_sizes_array[i][j]); if (current_part != my_id) { comms->recv_vector(fine_halo_aggregates_to_root_array[i][j], current_part, 4444 + j); } else { //HERE thrust::copy(aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[j]], aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[j]] + halo_sizes[j], fine_halo_aggregates_to_root_array[i][j].begin()); //TODO: not do this copying around on the root } #define MIN(a,b) a<b?a:b; #define MAX(a,b) a>b?a:b; //Find minimum and maximum halo indices as not to allocate too much scratch space later int min_index = thrust::reduce(fine_halo_aggregates_to_root_array[i][j].begin(), fine_halo_aggregates_to_root_array[i][j].end(), int(0x7FFFFFFF), thrust::minimum<int>()); int max_index = thrust::reduce(fine_halo_aggregates_to_root_array[i][j].begin(), fine_halo_aggregates_to_root_array[i][j].end(), int(0), thrust::maximum<int>()); min_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]] = MIN((int)min_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]], min_index); max_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]] = MAX((int)max_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]], max_index); } } cudaCheckError(); /* * EXAMPLE 2 num_fine_nonmerge_neigh_array = [2 2] halo_sizes_array = [[3 3][3 3]] fine_nonmerge_neigh_to_cons_fine_part_array[][] = [[2 3][3 3]] fine_halo_aggregates_to_root_array[from][to][fine halo vertex] [[[12 15 17][14 16 18]] [[18 19 19][15 15 17]]] min_index_coarse_halo[12 14] max_index_coarse_halo[17 19] */ halo_offsets[0] = total_rows_in_merged; //Now we have all the halo nodes, let's renumber them. int min_index = thrust::reduce(min_index_coarse_halo.begin(), min_index_coarse_halo.end(), int(0x7FFFFFFF), thrust::minimum<int>()); int max_index = thrust::reduce(max_index_coarse_halo.begin(), max_index_coarse_halo.end(), int(0), thrust::maximum<int>()); cudaCheckError(); // // Step 9.4 compute halo indices on root nodes // int scratch_size; if (num_consolidated_neighbors == 0) { scratch_size = 1; } else { scratch_size = max_index - min_index + 2; } IVector scratch(scratch_size); for (int i = 0; i < num_consolidated_neighbors; i++) { thrust::fill(scratch.begin(), scratch.end(), 0); int dest_part = consolidated_coarse_neigh_to_fine_part[i]; //Flag halo indices that occur for a specific coarse neighbor for (int j = 0; j < num_fine_parts_to_consolidate; j++) { for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++) { if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part) { int size = halo_sizes_array[j][k]; this->flag_halo_ids(size, scratch, fine_halo_aggregates_to_root_array[j][k], min_index_coarse_halo[i], max_index, min_index); } } } thrust::exclusive_scan(scratch.begin(), scratch.end(), scratch.begin(), halo_offsets[i]); //renumber them with the proper offset into our halo halo_offsets[i + 1] = scratch[scratch.size() - 1]; //now read them back for (int j = 0; j < num_fine_parts_to_consolidate; j++) { for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++) { if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part) { int size = halo_sizes_array[j][k]; int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); this->read_halo_ids(size, scratch, fine_halo_aggregates_to_root_array[j][k], min_index_coarse_halo[i]); //and send them back to contributing partitions cudaDeviceSynchronize(); //TODO: don't need to synchronize when using GPUDirect int current_part = fine_parts_to_consolidate[j]; int tag = 4444 + dest_part; if (current_part != my_id) { comms->send_vector_async(fine_halo_aggregates_to_root_array[j][k], current_part, tag); //!!!!: we are sending them back not in sequential order, need tags!!!! } else { thrust::copy(fine_halo_aggregates_to_root_array[j][k].begin(), fine_halo_aggregates_to_root_array[j][k].end(), aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[k]]); } } } } /* * EXAMPLE 2 the array that is sent back in pieces: fine_halo_aggregates_to_root_array[from][to][fine halo vertex] [[[24 25 26][27 29 31]] [[31 32 32][28 28 30]]] halo_offsets = [24 27 33] */ } // Loop over consolidated neighbors cudaCheckError(); // Wait for sends to have completed (this is to prevent fine_halo_aggregates_to_root_array to be destroyed before send has finished) for (int i = 0; i < num_consolidated_neighbors; i++) { int dest_part = consolidated_coarse_neigh_to_fine_part[i]; for (int j = 0; j < num_fine_parts_to_consolidate; j++) { for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++) { if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part) { int current_part = fine_parts_to_consolidate[j]; if (current_part != my_id) { comms->send_vector_wait_all(fine_halo_aggregates_to_root_array[j][k]); } } } } } // Loop over consolidated neighbors //Send total number of rows in the aggregated matrix for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; IVector_h total_rows(1); total_rows[0] = halo_offsets[num_consolidated_neighbors]; if (current_part != my_id) { comms->send_vector(total_rows, current_part, 5555); } else { num_all_aggregates = total_rows[0]; } } } // If is root partition if (!is_root_partition) { for (int i = 0; i < num_fine_nonmerge_neighbors; i++) { int tag = 4444 + fine_nonmerge_neigh_to_cons_fine_part[i]; comms->recv_vector(aggregates, my_destination_part, tag, manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[i]], halo_sizes[i]); } IVector_h total_rows(1); comms->recv_vector(total_rows, my_destination_part, 5555); num_all_aggregates = total_rows[0]; } /* * EXAMPLE 2 num_all_aggregates = 33 (both partitions 0 and 1 */ } template <class TConfig> void DistributedManagerBase<TConfig>::ipcExchangePtr(void *&ptr, bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_root_partition, int my_id, DistributedComms<TConfig> *comms) { cudaIpcMemHandle_t handle; if (is_root_partition) { cudaIpcGetMemHandle( (cudaIpcMemHandle_t *) &handle, ptr ) ; for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; if (current_part != my_id) { comms->send_raw_data(&handle, sizeof(handle), current_part, 456); } } } else { comms->recv_raw_data(&handle, sizeof(handle), my_root_partition, 456); cudaError_t err = cudaIpcOpenMemHandle( (void **) &ptr, handle, cudaIpcMemLazyEnablePeerAccess); } } template <class TConfig> void DistributedManagerBase<TConfig>::ipcWaitForChildren(bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_destination_part, int my_id, DistributedComms<TConfig> *comms) { cudaEvent_t event; cudaIpcEventHandle_t event_handle; cudaEventCreate(&event, cudaEventDisableTiming | cudaEventInterprocess); cudaIpcGetEventHandle( &event_handle, event); // Each rank record the event cudaEventRecord(event); if (is_root_partition) { std::vector<cudaEvent_t> child_events(num_parts_to_consolidate); std::vector<cudaIpcEventHandle_t> child_event_handles(num_parts_to_consolidate); // Root partition receives event_handles from child and stores in child_event_handles for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; if (current_part != my_id) { comms->recv_raw_data(&(child_event_handles[i]), sizeof(cudaIpcEventHandle_t), current_part, 987 + current_part); cudaIpcOpenEventHandle(&child_events[i], child_event_handles[i]); } } for (int i = 0; i < num_parts_to_consolidate; i++) { if (parts_to_consolidate[i] != my_id) { cudaEventSynchronize(child_events[i]); } } } else { comms->send_raw_data(&event_handle, sizeof(cudaIpcEventHandle_t), my_destination_part, 987 + my_id); } } template <class TConfig> void DistributedManagerBase<TConfig>::ipcWaitForRoot(bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_destination_part, int my_id, DistributedComms<TConfig> *comms) { cudaEvent_t event; cudaIpcEventHandle_t event_handle; cudaEventCreate(&event, cudaEventDisableTiming | cudaEventInterprocess); if (is_root_partition) { cudaIpcGetEventHandle( &event_handle, event); // Root records the event cudaEventRecord(event); // Root partition sends event_handles to child for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; if (current_part != my_id) { comms->send_raw_data(&event_handle, sizeof(event_handle), current_part, 988 + current_part); } } } else { comms->recv_raw_data(&event_handle, sizeof(event_handle), my_destination_part, 988 + my_id); cudaIpcOpenEventHandle(&event, event_handle); cudaEventSynchronize(event); } } template <class TConfig> void DistributedManagerBase<TConfig>::read_halo_ids(int size, IVector_d &scratch, IVector_d &halo_aggregates, VecInt_t min_index_coarse_halo) { int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); read_halo_ids_kernel <<< num_blocks, block_size>>>(scratch.raw(), halo_aggregates.raw(), min_index_coarse_halo, size); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::read_halo_ids(int size, IVector_h &scratch, IVector_h &halo_aggregates, VecInt_t min_index_coarse_halo) { FatalError("read_halo_ids not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED); } template <class TConfig> void DistributedManagerBase<TConfig>::flag_halo_ids(int size, IVector_d &scratch, IVector_d &halo_aggregates, VecInt_t min_index_coarse_halo, int max_index, int min_index) { int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); flag_halo_ids_kernel <<< num_blocks, block_size>>>(scratch.raw(), halo_aggregates.raw(), min_index_coarse_halo, size, max_index - min_index + 1); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::flag_halo_ids(int size, IVector_h &scratch, IVector_h &halo_aggregates, VecInt_t min_index_coarse_halo, int max_index, int min_index) { FatalError("flag_halo_ids not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::consolidateAndUploadAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &A) { FatalError("Fine level consolidation not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED); } template<class TConfig> void DistributedManagerBase<TConfig>::exchangeSolveResultsConsolidation(int &num_iters, std::vector<PODVector_h> &res_history, AMGX_STATUS &status, bool store_res_history) { int bsize = this->A->get_block_size(); PODVector_h res_history_tmp; if (!m_is_fine_level_consolidated) { return; } else { int my_id = this->getFineLevelComms()->get_global_id(); IVector_h my_num_iters(1); if (m_is_fine_level_root_partition) { my_num_iters[0] = num_iters; if (store_res_history) { // Pack the res_history vector into array res_history_tmp.resize( (num_iters + 1)*bsize); for (int i = 0; i < num_iters + 1; i++) { for (int j = 0; j < bsize; j++) { res_history_tmp[i * bsize + j] = res_history[i][j]; } } } for (int i = 0; i < m_num_fine_level_parts_to_consolidate; i++) { int current_part = m_fine_level_parts_to_consolidate[i]; if (my_id != current_part) { getFineLevelComms()->send_vector_async(my_num_iters, current_part, 245); if (store_res_history) { getFineLevelComms()->send_vector_async(res_history_tmp, current_part, 246); } } } for (int i = 0; i < m_num_fine_level_parts_to_consolidate; i++) { int current_part = m_fine_level_parts_to_consolidate[i]; if (my_id != current_part) { getFineLevelComms()->send_raw_data(&status, sizeof(status), current_part, 247); } } getFineLevelComms()->send_vector_wait_all(my_num_iters); if (store_res_history) { getFineLevelComms()->send_vector_wait_all(res_history_tmp); } } else { // Store num_iters getFineLevelComms()->recv_vector(my_num_iters, m_my_fine_level_destination_part, 245); num_iters = my_num_iters[0]; if (store_res_history) { // Fill res_history vector res_history.resize(num_iters + 1); res_history_tmp.resize( (num_iters + 1)*bsize); getFineLevelComms()->recv_vector(res_history_tmp, m_my_fine_level_destination_part, 246); for (int i = 0; i < num_iters + 1; i++) { res_history[i].resize(bsize); for (int j = 0; j < bsize; j++) { res_history[i][j] = res_history_tmp[i * bsize + j]; } } } getFineLevelComms()->recv_raw_data(&status, sizeof(status), m_my_fine_level_destination_part, 247); } } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::consolidateAndUploadAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A) { this->A = &in_A; this->createComms(this->A->getResources()); //refresh comms DistributedComms<TConfig> *comms = this->getComms(); int my_id = comms->get_global_id(); int num_parts = comms->get_num_partitions(); int num_rings = this->num_halo_rings(); int num_neighbors = this->neighbors.size(); // All partitions have to call this, otherwise it fails // Step 1: Figure out which partition should be consolidated together based on their host_name and their PCI-E slot ID IVector_h destination_part(num_parts); this->computeDestinationPartitionsWithCons(my_id, num_parts, destination_part, comms); int my_destination_part = destination_part[my_id]; // Check if I'm root partition and how many msgs I will receive bool is_root_partition = false; int num_parts_to_consolidate = 0; for (int i = 0; i < num_parts; i++) { if (destination_part[i] == my_id) { is_root_partition = true; num_parts_to_consolidate++; } } if (my_destination_part >= num_parts) { FatalError("During consolidation, sending data to partition that doesn't exist", AMGX_ERR_NOT_IMPLEMENTED); } // Create cons_part_to_part map IVector_h cons_part_to_part = destination_part; thrust::sort(cons_part_to_part.begin(), cons_part_to_part.end()); cudaCheckError(); cons_part_to_part.erase(thrust::unique(cons_part_to_part.begin(), cons_part_to_part.end()), cons_part_to_part.end()); cudaCheckError(); int num_cons_partitions = cons_part_to_part.size(); // If number of consolidated partitions is the same as number of partitions, simply call uploadAll if (num_cons_partitions == num_parts) { this->initializeUploadReorderAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A)); this->m_is_fine_level_consolidated = false; return; } if (is_root_partition) { this->A->getResources()->expandRootPool(); } this->m_is_fine_level_consolidated = true; if (num_rings != 1) { FatalError("num_rings > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED); } // Fill with b2l_maps IVector_h_vector B2L_maps_tmp; B2L_maps_tmp.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { B2L_maps_tmp[i] = this->cached_B2L_maps[i]; } bool useCudaIpc = this->m_use_cuda_ipc_consolidation; mat_value_type *data_hd = NULL; mat_value_type *diag_hd = NULL; int *col_indices_hd = NULL; int data_alloc = 0; int diag_alloc = 0; int col_alloc = 0; col_indices_hd = (int *) this->getDevicePointerForData((void *)col_indices, nnz * block_dimx * block_dimy * sizeof(int), &col_alloc); data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data, nnz * block_dimx * block_dimy * sizeof(mat_value_type), &data_alloc); if (diag != NULL) { diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag, nnz * block_dimx * block_dimy * sizeof(mat_value_type), &diag_alloc); } // Copy the original row_offsets array (this is required when replacing coefficients this->m_old_row_offsets_CONS.resize(n + 1); cudaMemcpy(this->m_old_row_offsets_CONS.raw(), row_ptrs, (n + 1)*sizeof(int), cudaMemcpyDefault); cudaCheckError(); this->m_old_nnz_CONS = nnz; // This function: // Creates fine level consolidated matrices // Modifies the btl_maps, lth_maps // Create part_to_cons_part map IVector_h part_to_cons_part(num_parts); thrust::lower_bound(cons_part_to_part.begin(), cons_part_to_part.end(), destination_part.begin(), destination_part.end(), part_to_cons_part.begin()); cudaCheckError(); IVector_h neigh_to_part; this->createNeighToDestPartMap(neigh_to_part, this->neighbors, destination_part, num_neighbors); IVector_h cons_neigh_to_part; int num_cons_neighbors; this->createConsolidatedNeighToPartMap(cons_neigh_to_part, neigh_to_part, my_destination_part, destination_part, num_cons_neighbors); IVector_h neigh_to_cons_neigh; this->createNeighToConsNeigh( neigh_to_cons_neigh, cons_neigh_to_part, neigh_to_part, my_destination_part, num_neighbors); // --------------------------------------- // MERGE B2L MAPS BASED ON DEST PARTITION // --------------------------------------- IVector_h_vector dest_B2L_maps; this->consolidateB2Lmaps(dest_B2L_maps, B2L_maps_tmp, neigh_to_cons_neigh, num_cons_neighbors, num_neighbors); // ------------------------------------ // Renumber interior and boundary rows // ------------------------------------ int num_interior_rows; int num_boundary_rows; IVector_h renumbering; this->createAggregatesRenumbering(renumbering, dest_B2L_maps, n, num_cons_neighbors, num_interior_rows, num_boundary_rows, num_rings); // -------------------------------------------------- // Create list of destination parts to consolidate // -------------------------------------------------- // Store whether or not this is a root partition on fine level IVector_h parts_to_consolidate; parts_to_consolidate.resize(num_parts_to_consolidate); int count = 0; for (int i = 0; i < num_parts; i++) { if (destination_part[i] == my_id) { parts_to_consolidate[count] = i; count++; } } // --------------------------------------------------------------------- // Each partition computes its offset for its interior and boundary nodes // --------------------------------------------------------------------- IVector_h_vector vertex_counts; int interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged; int total_rows_in_merged; this->computeConsolidatedOffsets(my_id, my_destination_part, is_root_partition, num_interior_rows, num_boundary_rows, vertex_counts, parts_to_consolidate, num_parts_to_consolidate, interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged, total_rows_in_merged, comms); // ----------------------------------- // Each partition renumber it's rows // ----------------------------------- int total_num_halos = 0; // Pack new bdy_ids for (int i = 0; i < num_neighbors; i++) { total_num_halos += this->cached_L2H_maps[i].size(); } IVector_h row_ids(n + total_num_halos, -1); this->m_row_ids_CONS.resize(n + total_num_halos); // Renumber the interior and boundary rows for (int i = 0; i < n; i++) { int new_id; if (renumbering.size() == 0) { new_id = i; } else { new_id = renumbering[i]; } new_id += ((new_id >= num_interior_rows) ? boundary_offset : interior_offset); row_ids[i] = new_id; } for (int i = 0; i < num_cons_neighbors; i++) { thrust::transform(dest_B2L_maps[i].begin(), dest_B2L_maps[i].end(), thrust::constant_iterator<index_type>(boundary_offset), dest_B2L_maps[i].begin(), thrust::plus<index_type>()); } cudaCheckError(); // ------------------------------------------------- // Send dest_B2L_maps to root partitions // ------------------------------------------------ IVector_h num_bdy_per_cons_neigh(num_cons_neighbors); for (int i = 0; i < num_cons_neighbors; i++) { num_bdy_per_cons_neigh[i] = dest_B2L_maps[i].size(); } IVector_h root_cons_neighbors; int root_num_cons_neighbors = 0; IVector_h_vector cons_B2L_maps; this->consolidateB2LmapsOnRoot(root_num_cons_neighbors, cons_B2L_maps, root_cons_neighbors, dest_B2L_maps, cons_neigh_to_part, num_bdy_per_cons_neigh, parts_to_consolidate, num_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_cons_neighbors, comms); IVector_h halo_ids_offsets(num_neighbors + 1); IVector_h halo_ids; int halo_ids_size = 0; halo_ids_offsets[0] = 0; for (int i = 0; i < num_neighbors; i++) { halo_ids_size += this->cached_L2H_maps[i].size(); halo_ids_offsets[i + 1] = halo_ids_size; } halo_ids.resize(halo_ids_size); // Do exchange with neighbors // Pack new bdy_ids IVector_h_vector bdy_ids; bdy_ids.resize(num_neighbors); for (int i = 0; i < num_neighbors; i++) { int size = this->cached_B2L_maps[i].size(); bdy_ids[i].resize(size); // Pack buffer for (int j = 0; j < size; j++) { bdy_ids[i][j] = row_ids[this->cached_B2L_maps[i][j]]; } } for (int i = 0; i < num_neighbors; i++) { comms->send_vector_async(bdy_ids[i], this->neighbors[i], 6666 + this->neighbors[i]); } for (int i = 0; i < num_neighbors; i++) { comms->recv_vector(halo_ids, this->neighbors[i], 6666 + my_id, halo_ids_offsets[i], this->cached_L2H_maps[i].size()); } for (int i = 0; i < num_neighbors; i++) { comms->send_vector_wait_all(bdy_ids[i]); } IVector_h halo_offsets(root_num_cons_neighbors + 1, 0); int root_num_rows; this->consolidateAndRenumberHalos(halo_ids, halo_ids_offsets, halo_offsets, this->neighbors, num_neighbors, root_cons_neighbors, root_num_cons_neighbors, destination_part, my_destination_part, is_root_partition, parts_to_consolidate, num_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, root_num_rows, comms); if (is_root_partition) { this->B2L_maps.resize(cons_B2L_maps.size()); for (int i = 0; i < cons_B2L_maps.size(); i++) { thrust::sort(cons_B2L_maps[i].begin(), cons_B2L_maps[i].end()); this->B2L_maps[i].copy(cons_B2L_maps[i]); // H2D copy of B2L maps } cudaCheckError(); } // Now renumber the row_ids based on lth_maps count = 0; for (int i = 0; i < num_neighbors; i++) { for (int j = 0; j < this->cached_L2H_maps[i].size(); j++) { row_ids[this->cached_L2H_maps[i][j]] = halo_ids[count]; count++; } } cudaMemcpy(this->m_row_ids_CONS.raw(), row_ids.raw(), (n + total_num_halos)*sizeof(int), cudaMemcpyDefault); cudaCheckError(); int bsize = block_dimx * block_dimy; if (is_root_partition) { this->A->row_offsets.resize(root_num_rows + 1); } void *root_row_ptr = (void *) this->A->row_offsets.raw(); if (useCudaIpc) { // ---------------------------------------------------- // 1. cudaIPC to get pointer to root's row_offset array // ---------------------------------------------------- this->ipcExchangePtr(root_row_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); cudaCheckError(); // ------------------------------------------------------------------- // 2. each rank copy it's row length on root partition using row_ids // ------------------------------------------------------------------- int cta_size = 128; int grid_size = min(4096, (n + total_num_halos + cta_size - 1) / cta_size); zero_copy_row_lengths_ids_offsets<mat_value_type> <<< grid_size, cta_size>>>(this->m_old_row_offsets_CONS.raw(), ((int *) root_row_ptr) /* IPC */, this->m_row_ids_CONS.raw(), n, total_num_halos, (mat_value_type *) diag); cudaCheckError(); // Root partition waits for children to be done writing their result this->ipcWaitForChildren(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); cudaCheckError(); } else // CudaIpcNotAvailable { this->checkPinnedBuffer( max( nnz * sizeof(mat_value_type), (n + 1)*max(sizeof(index_type), sizeof(value_type)) ) ); if (!is_root_partition) { IVector_h data_to_send(3); data_to_send[0] = n; data_to_send[1] = nnz; data_to_send[2] = total_num_halos; int dummy; void *row_ptrs_to_send = this->getHostPointerForData((void *)row_ptrs, sizeof(index_type) * (n + 1), &dummy); comms->send_vector(data_to_send, my_destination_part, 10000 + my_id); comms->send_raw_data(row_ptrs_to_send, (n + 1)*sizeof(int), my_destination_part, 10001 + my_id); comms->send_raw_data(&row_ids[0], (n + total_num_halos)*sizeof(int), my_destination_part, 10002 + my_id); } else { cudaEvent_t event; cudaEventCreate(&event); //TODO: Could use streams here //TODO: Avoid extra device to host copies std::vector<IVector_h> data_recv(num_parts_to_consolidate); for (int i = 0; i < num_parts_to_consolidate; i++) { data_recv[i].resize(3); int current_part = parts_to_consolidate[i]; if (current_part != my_id) { comms->recv_vector(data_recv[i], current_part, 10000 + current_part); } else { data_recv[i][0] = n; data_recv[i][1] = nnz; data_recv[i][2] = total_num_halos; } } this->m_child_n.resize(num_parts_to_consolidate); this->m_child_nnz.resize(num_parts_to_consolidate); this->m_child_num_halos.resize(num_parts_to_consolidate); this->m_child_row_ids.resize(num_parts_to_consolidate); this->m_child_old_row_offsets.resize(num_parts_to_consolidate); int max_n = 0; int max_nnz = 0; for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; this->m_child_n[i] = data_recv[i][0]; this->m_child_nnz[i] = data_recv[i][1]; this->m_child_num_halos[i] = data_recv[i][2]; if (this->m_child_n[i] > max_n) { max_n = this->m_child_n[i]; } if (this->m_child_nnz[i] > max_nnz) { max_nnz = this->m_child_nnz[i]; } this->m_child_row_ids[i].resize(this->m_child_n[i] + this->m_child_num_halos[i]); this->m_child_old_row_offsets[i].resize(this->m_child_n[i] + 1); } this->m_child_max_n = max_n; this->m_child_max_nnz = max_nnz; for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; int cta_size = 128; int grid_size = min(4096, (this->m_child_n[i] + this->m_child_num_halos[i] + cta_size - 1) / cta_size); if (current_part != my_id) { comms->recv_vector(this->m_child_old_row_offsets[i], current_part, 10001 + current_part, 0, this->m_child_n[i] + 1); comms->recv_vector(this->m_child_row_ids[i], current_part, 10002 + current_part, 0, this->m_child_n[i] + this->m_child_num_halos[i]); zero_copy_row_lengths_ids_offsets<mat_value_type> <<< grid_size, cta_size>>>(this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), this->m_child_row_ids[i].raw(), this->m_child_n[i], this->m_child_num_halos[i], (mat_value_type *) diag); // Wait for kernel to finish before overwriting host buffer cudaEventRecord(event); cudaEventSynchronize(event); } else { zero_copy_row_lengths_ids_offsets<mat_value_type> <<< grid_size, cta_size>>>(this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), this->m_row_ids_CONS.raw(), n, total_num_halos, (mat_value_type *) diag); cudaEventRecord(event); cudaEventSynchronize(event); } } cudaCheckError(); cudaEventDestroy(event); } // If root partition //TODO: is this necessary comms->barrier(); } //3. root does a exclusive_scan if (is_root_partition) { cudaEvent_t event; cudaEventCreate(&event); // Populate the halo rows with diagonal, increase the length of the halo rows thrust::fill(this->A->row_offsets.begin() + halo_offsets[0], this->A->row_offsets.begin() + halo_offsets[root_num_cons_neighbors], 1); thrust::exclusive_scan(this->A->row_offsets.begin(), this->A->row_offsets.end(), this->A->row_offsets.begin()); cudaEventRecord(event); cudaEventSynchronize(event); cudaCheckError(); this->A->set_initialized(0); this->A->delProps(DIAG); // We always insert the diagonal this->A->delProps(COO); // No COO this->A->setColsReorderedByColor(false); // Cols not reordered by color int nnz = this->A->row_offsets[root_num_rows]; // This is a device to host copy this->A->resize(root_num_rows, root_num_rows, nnz, block_dimx, block_dimy); this->A->set_num_nz(nnz); // num_nz doesn't include halo rows //this->A->set_initialized(1); cudaEventDestroy(event); } else { this->A->set_initialized(0); this->A->resize( 0, 0, 0, block_dimx, block_dimy ); this->A->delProps(DIAG); // We always insert the diagonal this->A->delProps(COO); // No COO this->A->setColsReorderedByColor(false); // Cols not reordered by color //this->A->set_initialized(1); } if (useCudaIpc) { // ---------------------------------------------- // 4. Do ipc consolidation of values and columns // ---------------------------------------------- // Child partition waits for parent to create row_offsets this->ipcWaitForRoot(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); void *root_col_ptr = (void *) this->A->col_indices.raw(); void *root_val_ptr = (void *) this->A->values.raw(); this->ipcExchangePtr(root_col_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); this->ipcExchangePtr(root_val_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); int cta_size2 = 128; int grid_size2 = min(4096, (n + cta_size2 - 1) / cta_size2); ipc_consolidation_upload_matrix<mat_value_type> <<< grid_size2, cta_size2>>>(n, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), ( (int *) root_row_ptr ) /*IPC*/, col_indices_hd, ( (int *) root_col_ptr) /*IPC*/, data_hd, ( (mat_value_type *) root_val_ptr ) /*IPC*/, diag_hd, bsize); cudaCheckError(); // Root partition waits for children to upload their matrices this->ipcWaitForChildren(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms); cudaCheckError(); // Child partitions close their mem handle (they are done upload data) if (!is_root_partition) { cudaIpcCloseMemHandle(root_row_ptr); cudaIpcCloseMemHandle(root_val_ptr); cudaIpcCloseMemHandle(root_col_ptr); } } else // If cudaIpcNotAvailable { if (!is_root_partition) { int dummy; void *col_indices_to_send = this->getHostPointerForData((void *)col_indices, sizeof(index_type) * nnz, &dummy); comms->send_raw_data(col_indices_to_send, nnz * sizeof(int), my_destination_part, 10000 + my_id); void *data_to_send = this->getHostPointerForData((void *)data, sizeof(mat_value_type) * nnz, &dummy); comms->send_raw_data(data_to_send, nnz * bsize * sizeof(mat_value_type), my_destination_part, 10001 + my_id); if (diag != NULL) { void *diag_to_send = this->getHostPointerForData((void *)diag, sizeof(mat_value_type) * n, &dummy); comms->send_raw_data(diag_to_send, n * bsize * sizeof(mat_value_type), my_destination_part, 10002 + my_id); } } else { cudaEvent_t event; cudaEventCreate(&event); //TODO: Could use streams here int *child_col_indices; mat_value_type *child_data; mat_value_type *child_diag = NULL; cudaHostAlloc( (void **) &child_col_indices, this->m_child_max_nnz * sizeof(int), cudaHostAllocMapped); cudaHostAlloc( (void **) &child_data, this->m_child_max_nnz * bsize * sizeof(mat_value_type), cudaHostAllocMapped); if (diag != NULL) { cudaHostAlloc( (void **) &child_diag, (this->m_child_max_n)*bsize * sizeof(mat_value_type), cudaHostAllocMapped); } for (int i = 0; i < num_parts_to_consolidate; i++) { int current_part = parts_to_consolidate[i]; int cta_size2 = 128; int grid_size2 = min(4096, (this->m_child_n[i] + cta_size2 - 1) / cta_size2); if (current_part != my_id) { comms->recv_raw_data(child_col_indices, this->m_child_nnz[i]*sizeof(int), current_part, 10000 + current_part); comms->recv_raw_data(child_data, this->m_child_nnz[i]*bsize * sizeof(mat_value_type), current_part, 10001 + current_part); if (diag != NULL) { comms->recv_raw_data(child_diag, this->m_child_n[i]*bsize * sizeof(mat_value_type), current_part, 10002 + current_part); } int *child_col_indices_hd; mat_value_type *child_data_hd; mat_value_type *child_diag_hd = NULL; cudaHostGetDevicePointer(&child_col_indices_hd, child_col_indices, 0); cudaHostGetDevicePointer(&child_data_hd, child_data, 0); if (diag != NULL) { cudaHostGetDevicePointer(&child_diag_hd, child_diag, 0); } ipc_consolidation_upload_matrix<mat_value_type> <<< grid_size2, cta_size2>>>(this->m_child_n[i], this->m_child_row_ids[i].raw(), this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), child_col_indices_hd, this->A->col_indices.raw(), child_data_hd, this->A->values.raw(), child_diag_hd, bsize); // Wait for kernel to finish before overwriting host buffer cudaEventRecord(event); cudaEventSynchronize(event); } else { ipc_consolidation_upload_matrix<mat_value_type> <<< grid_size2, cta_size2>>>(n, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), col_indices_hd, this->A->col_indices.raw(), data_hd, this->A->values.raw(), diag_hd, bsize); cudaEventRecord(event); cudaEventSynchronize(event); } } cudaCheckError(); cudaEventDestroy(event); cudaFreeHost(child_col_indices); cudaFreeHost(child_data); if (diag != NULL) { cudaFreeHost(child_diag); } } // If root partition //TODO: is this necessary comms->barrier(); } // Store the original fine level communicator this->m_is_fine_level_root_partition = is_root_partition; this->m_my_fine_level_destination_part = my_destination_part; // Create a clone of the original communicator this->m_fine_level_comms = comms; //this->_comms is the same pointer that this->m_fine_level_comms right now, so we can overwrite this->_comms, but make sure that we release m_fine_level_cons this->_comms = this->m_fine_level_comms->CloneSubComm(cons_part_to_part, is_root_partition); // this->_comms will be empty comm for non-root partition and new comm for root ranks only if root partition this->m_fine_level_id = my_id; if (is_root_partition) { int cta_size = 128; int grid_size3 = min(4096, ( (root_num_rows - halo_offsets[0]) + cta_size - 1) / cta_size); if (grid_size3 != 0) { set_halo_cols_values <<< grid_size3, cta_size>>>(this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), halo_offsets[0], root_num_rows, bsize); cudaCheckError(); } int my_cons_id = part_to_cons_part[my_id]; this->_global_id = my_cons_id; this->_num_interior_nodes = total_interior_rows_in_merged; this->_num_boundary_nodes = total_boundary_rows_in_merged; for (int i = 0; i < root_num_cons_neighbors; i++) { root_cons_neighbors[i] = part_to_cons_part[root_cons_neighbors[i]]; } this->_comms->set_neighbors(root_num_cons_neighbors); this->neighbors = root_cons_neighbors; this->halo_offsets = halo_offsets; // H2D copy of halo offsets this->m_num_fine_level_parts_to_consolidate = num_parts_to_consolidate; this->m_fine_level_parts_to_consolidate = parts_to_consolidate; this->set_num_halo_rings(num_rings); this->set_num_halo_rows(halo_offsets[root_num_cons_neighbors] - halo_offsets[0]); // B2L_maps has already been copied this->B2L_rings.resize(root_num_cons_neighbors); for (int i = 0; i < root_num_cons_neighbors; i++) { this->B2L_rings[i].resize(2); this->B2L_rings[i][0] = 0; this->B2L_rings[i][1] = cons_B2L_maps[i].size(); } this->set_initialized(this->A->row_offsets); this->A->set_initialized(0); this->A->delProps(DIAG); this->A->diag.resize(root_num_rows); this->A->computeDiagonal(); // this->A->setView(OWNED); cudaEventCreate(&(this->comm_event)); this->A->set_initialized(1); } else { this->neighbors.resize(0); this->halo_offsets.resize(0); } /* free memory (if needed) */ if (col_alloc) { cudaFree(col_indices_hd); } if (data_alloc) { cudaFree(data_hd); } if (diag_alloc) { cudaFree(diag_hd); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsNoCons(int n, int nnz, const mat_value_type *data_pinned, const mat_value_type *diag_pinned) { //matrix parameters //int num_nnz = this->A->get_num_nz(); int num_rows = this->halo_offsets[0]; int total_rows = num_rows + this->num_halo_rows(); int block_size = this->A->get_block_size(); mat_value_type *data_hd = NULL; mat_value_type *diag_hd = NULL; int data_alloc = 0; int diag_alloc = 0; //cuda parameters int num_blocks = min(4096, (num_rows + 127) / 128); /* WARNING: the number of non-zero elements (nnz) in the array data_pinned and A->values (num_nnz) might be different at this point. 1. If the matrix has CSR property and therefore diagonal is included in the matrix this values will be the same. 2. If the matrix has DIAG property and therefore diagonal is originally stored separately, and later appended to the array of values, and subsequently inserted into the matrix than num_nnz = nnz + n. We have to account for this fact when replacing the coefficients (and use nnz not num_nnz). obs.: see calls to computeDiagonal (matrix.cu), AMGX_matrix_upload and AMGX_replace_coefficients (amgx_c.cu), and uploadMatrix and replaceMatrixCoefficients[No|With]Cons (distributed_manager.cu) for details. */ /* check early exit */ if ((this->neighbors.size() == 0 || this->renumbering.size() == 0) && !this->m_is_fine_level_glued) { return; } cudaCheckError(); /* allocate if data and diag if they are not pinned */ data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data_pinned, nnz * block_size * sizeof(mat_value_type), &data_alloc); if (diag_pinned != NULL) { diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag_pinned, num_rows * block_size * sizeof(mat_value_type), &diag_alloc); } /* replace the values (reordering them if needed) */ if (insertDiagonals && diag_pinned != NULL) { replace_values_matrix <32> <<< num_blocks, 512>>>(data_hd, diag_hd, this->old_row_offsets.raw(), this->A->row_offsets.raw(), this->A->values.raw(), this->renumbering.raw(), block_size, num_rows); } else { replace_values_matrix <32> <<< num_blocks, 512>>>(data_hd, this->old_row_offsets.raw(), this->A->row_offsets.raw(), this->A->values.raw(), this->renumbering.raw(), block_size, num_rows); if (diag_pinned != NULL) { reorder_vector_values <<< num_blocks, 512>>>(this->A->values.raw() + this->A->row_offsets[total_rows]*block_size, diag_hd, this->renumbering.raw(), block_size, num_rows); } } cudaCheckError(); /* free memory (if needed) */ if (data_alloc) { cudaFree(data_hd); } if (diag_alloc) { cudaFree(diag_hd); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsWithCons(int n, int nnz, const mat_value_type *data_pinned, const mat_value_type *diag_pinned) { //matrix parameters //int num_nnz = this->A->get_num_nz(); /* WARNING: in consolidation, for non-root partitions, halo_offsets might be NULL due to the call halo_offsets.resize(0); at the end of the routine uploadMatrix->consolidateAndUploadAll. We should use the parameter n instead this->halo_offsets[0] for num_rows. */ int num_rows = n; int block_size = this->A->get_block_size(); mat_value_type *data_hd = NULL; mat_value_type *diag_hd = NULL; int data_alloc = 0; int diag_alloc = 0; data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data_pinned, nnz * block_size * sizeof(mat_value_type), &data_alloc); if (diag_pinned != NULL) { diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag_pinned, num_rows * block_size * sizeof(mat_value_type), &diag_alloc); } bool useCudaIpc = this->m_use_cuda_ipc_consolidation; if (useCudaIpc) { // Child partitions wait for root to be done this->ipcWaitForRoot(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); void *root_row_ptr = (void *) this->A->row_offsets.raw(); void *root_val_ptr = (void *) this->A->values.raw(); this->ipcExchangePtr(root_row_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); this->ipcExchangePtr(root_val_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); // replace the values, insert the diagonal int ncons = this->m_old_row_offsets_CONS.size() - 1; int cta_size = 128; int grid_size2 = min(4096, (ncons + cta_size - 1) / cta_size); ipc_consolidation_replace_values<mat_value_type> <<< grid_size2, cta_size>>>(ncons, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), ( (int *) root_row_ptr )/*IPC*/, data_hd, ( (mat_value_type *) root_val_ptr )/*IPC*/, diag_hd, this->A->get_block_size() ); cudaCheckError(); // Root partition wait for child to be done replacing their values this->ipcWaitForChildren(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); if (!this->m_is_fine_level_root_partition) { cudaIpcCloseMemHandle(root_row_ptr); cudaIpcCloseMemHandle(root_val_ptr); } } else // cudaIpcNotAvailable { if (this->m_is_fine_level_consolidated) // aggregation { int bsize = this->A->get_block_size(); int ncons = this->m_old_row_offsets_CONS.size() - 1; if (!this->m_is_fine_level_root_partition) { int dummy; int nnzcons = this->m_old_nnz_CONS; void *data_to_send = this->getHostPointerForData((void *)data_pinned, nnzcons * bsize * sizeof(mat_value_type), &dummy); this->getFineLevelComms()->send_raw_data(data_to_send, nnzcons * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10001 + this->fine_level_id()); if (diag_pinned != NULL) { void *diag_to_send = this->getHostPointerForData((void *)diag_pinned, ncons * bsize * sizeof(mat_value_type), &dummy); this->getFineLevelComms()->send_raw_data(diag_to_send, ncons * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10002 + this->fine_level_id()); } } else { cudaEvent_t event; cudaEventCreate(&event); //TODO: Could use streams here mat_value_type *child_data; mat_value_type *child_diag = NULL; cudaHostAlloc( (void **) &child_data, this->m_child_max_nnz * bsize * sizeof(mat_value_type), cudaHostAllocMapped); if (diag_pinned != NULL) { cudaHostAlloc( (void **) &child_diag, (this->m_child_max_n)*bsize * sizeof(mat_value_type), cudaHostAllocMapped); } for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; int cta_size2 = 128; int grid_size2 = min(4096, (this->m_child_n[i] + cta_size2 - 1) / cta_size2); if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_raw_data(child_data, this->m_child_nnz[i]*bsize * sizeof(mat_value_type), current_part, 10001 + current_part); if (diag_pinned != NULL) { this->getFineLevelComms()->recv_raw_data(child_diag, this->m_child_n[i]*bsize * sizeof(mat_value_type), current_part, 10002 + current_part); } mat_value_type *child_data_hd; mat_value_type *child_diag_hd = NULL; cudaHostGetDevicePointer(&child_data_hd, child_data, 0); if (diag_pinned != NULL) { cudaHostGetDevicePointer(&child_diag_hd, child_diag, 0); } ipc_consolidation_replace_values<mat_value_type> <<< grid_size2, cta_size2>>>(this->m_child_n[i], this->m_child_row_ids[i].raw(), this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), child_data_hd, this->A->values.raw(), child_diag_hd, bsize); // Wait for kernel to finish before overwriting host buffer cudaEventRecord(event); cudaEventSynchronize(event); } else { ipc_consolidation_replace_values<mat_value_type> <<< grid_size2, cta_size2>>>(ncons, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), data_hd, this->A->values.raw(), diag_hd, bsize); //cudaEventRecord(event); //cudaEventSynchronize(event); } } cudaCheckError(); cudaEventDestroy(event); cudaFreeHost(child_data); if (diag_pinned != NULL) { cudaFreeHost(child_diag); } } // If root partition //TODO: is this necessary this->getFineLevelComms()->barrier(); } //agg else if (this->m_is_fine_level_glued) // classical { int bsize = this->A->get_block_size(); int ncons = this->m_old_row_offsets_CONS.size() - 1; IVector_h nnz_off; nnz_off.resize(this->getConsolidationArrayOffsets().size()); IVector_h nnz_array; nnz_array.resize(this->getConsolidationArrayOffsets().size() - 1); this->getFineLevelComms()->all_gather( nnz, nnz_array, this->getConsolidationArrayOffsets().size() - 1); nnz_off[0] = 0; for (int i = 0; i < nnz_array.size(); i++) { nnz_off[i + 1] = nnz_off[i] + nnz_array[i]; } if (!this->m_is_fine_level_root_partition) { int dummy; void *data_to_send = this->getHostPointerForData((void *)data_pinned, nnz * bsize * sizeof(mat_value_type), &dummy); this->getFineLevelComms()->send_raw_data( data_to_send, nnz * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10001 + this->fine_level_id()); if (diag_pinned != NULL) { void *diag_to_send = this->getHostPointerForData((void *)diag_pinned, n * sizeof(mat_value_type), &dummy); this->getFineLevelComms()->send_raw_data( diag_to_send, n * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10002 + this->fine_level_id()); //diag.resize(0); cudaCheckError(); } //values.resize(0); cudaCheckError(); } else { //TODO: Could use streams here mat_value_type *child_data; mat_value_type *child_diag = NULL; // Assumes partions have been glued already this->A->getNnzForView(OWNED, &nnz); cudaHostAlloc( (void **) &child_data, nnz * bsize * sizeof(mat_value_type), cudaHostAllocMapped); if (diag_pinned != NULL) { cudaHostAlloc( (void **) &child_diag, this->halo_offsets[this->neighbors.size()]*bsize * sizeof(mat_value_type), cudaHostAllocMapped); } // roots copy their data memcpy ( &child_data[0], data_pinned, nnz_array[this->fine_level_id()]*sizeof(value_type)); if (diag_pinned != NULL) { memcpy ( &child_diag[0], diag_pinned, n * sizeof(value_type)); } for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; int current_offset = nnz_off[current_part] - nnz_off[this->fine_level_id()] ; int current_nnz = nnz_array[current_part]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_raw_data( &child_data[current_offset], current_nnz * bsize * sizeof(mat_value_type), current_part, 10001 + current_part); if (diag_pinned != NULL) this->getFineLevelComms()->recv_raw_data( &child_diag[this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()]], (this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part])*bsize * sizeof(mat_value_type), current_part, 10002 + current_part); } } cudaCheckError(); // we can follow the usual upload path for raw data now // Assumes partions have been glued already int os; this->A->getOffsetAndSizeForView(OWNED, &os, &n); replaceMatrixCoefficientsNoCons( n, nnz, child_data, child_diag); cudaCheckError(); cudaFreeHost(child_data); if (diag_pinned != NULL) { cudaFreeHost(child_diag); } } // If root partition //TODO: is this necessary this->getFineLevelComms()->barrier(); } // cla } // not ipc this->A->setView(OWNED); /* free memory (if needed) */ if (data_alloc) { cudaFree(data_hd); } if (diag_alloc) { cudaFree(diag_hd); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVector(VVector_v &v, const void *data, int n, int block_dim) { if (this->isFineLevelConsolidated() || (this->isFineLevelGlued() && !this->isGlued())) { transformAndUploadVectorWithCons(v, data, n, block_dim); } else { v.resize(n * block_dim); cudaCheckError(); // Upload on host cudaMemcpy(v.raw(), (value_type *)data, n * block_dim * sizeof(value_type), cudaMemcpyDefault); cudaCheckError(); // Permute based on renumbering vector transformVector(v); int tag = 0; // Exchange halos this->exchange_halo(v, tag); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVectorWithCons(VVector_v &v, const void *data_pinned, int n, int block_dim) { if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } bool useCudaIpc = this->m_use_cuda_ipc_consolidation; this->getFineLevelComms()->barrier(); void *root_temp_ptr = NULL; VVector_v temp; if (this->m_is_fine_level_root_partition && !this->m_is_fine_level_glued ) { temp.resize(this->halo_offsets[this->neighbors.size()]*v.get_block_size(), types::util<value_type>::get_zero()); temp.set_block_dimx(v.get_block_dimx()); temp.set_block_dimy(v.get_block_dimy()); root_temp_ptr = (void *) temp.raw(); } cudaCheckError(); int data_alloc = 0; value_type *data_hd = NULL; if (!this->m_is_fine_level_glued ) { data_hd = (value_type *) this->getDevicePointerForData((void *)data_pinned, n * block_dim * sizeof(value_type), &data_alloc); } if (useCudaIpc) { // Do IPC this->ipcExchangePtr(root_temp_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); int num_blocks = min(4096, (n + 511) / 512); reorder_vector_values <<< num_blocks, 512>>>( (value_type *) root_temp_ptr, data_hd, this->m_row_ids_CONS.raw(), v.get_block_size(), n); // Root partition waits for children to be done this->ipcWaitForChildren(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); if (!this->m_is_fine_level_root_partition) { cudaIpcCloseMemHandle(root_temp_ptr); } } else // If cudaIpcNotAvail { if (this->m_is_fine_level_consolidated) // aggregation { // Exchange the vector between root and child if (!this->m_is_fine_level_root_partition) { IVector_h size(1); size[0] = n; this->getFineLevelComms()->send_vector(size, this->m_my_fine_level_destination_part, 20000 + this->fine_level_id()); int dummy; void *data_to_send = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy); this->getFineLevelComms()->send_raw_data(data_to_send, n * v.get_block_size()*sizeof(value_type), this->m_my_fine_level_destination_part, 20001 + this->fine_level_id()); } else { cudaEvent_t event; cudaEventCreate(&event); IVector_h child_n(this->m_num_fine_level_parts_to_consolidate); int max_n = 0; for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_vector(child_n, current_part, 20000 + current_part, i, 1); } else { child_n[i] = n; } if (child_n[i] > max_n) { max_n = child_n[i]; } } value_type *child_data; cudaHostAlloc( (void **) &child_data, max_n * v.get_block_size()*sizeof(value_type), cudaHostAllocMapped); value_type *child_data_hd; cudaHostGetDevicePointer(&child_data_hd, child_data, 0); for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; int num_blocks = min(4096, (child_n[i] + 511) / 512); if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_raw_data(&child_data[0], child_n[i]*v.get_block_size()*sizeof(value_type), current_part, 20001 + current_part); reorder_vector_values <<< num_blocks, 512>>>( (value_type *) root_temp_ptr, child_data_hd, this->m_child_row_ids[i].raw(), v.get_block_size(), child_n[i]); cudaEventRecord(event); cudaEventSynchronize(event); cudaCheckError(); } else { reorder_vector_values <<< num_blocks, 512>>>( (value_type *) root_temp_ptr, data_hd, this->m_row_ids_CONS.raw(), v.get_block_size(), n); } } // Loop over parts to consolidate cudaCheckError(); cudaEventDestroy(event); cudaFreeHost(child_data); } // If root partition } //agg else if (this->m_is_fine_level_glued) // cla { value_type *child_data = NULL; if (!this->m_is_fine_level_root_partition) { int dummy; void *data_to_send = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy); this->getFineLevelComms()->send_raw_data( data_to_send, n * v.get_block_size()*sizeof(value_type), this->m_my_fine_level_destination_part, 20001 + this->fine_level_id()); //v.resize(0); // just in case something resized it betwen iterations cudaCheckError(); } else { cudaHostAlloc( (void **) &child_data, this->halo_offsets[this->neighbors.size()]*v.get_block_size()*sizeof(value_type), cudaHostAllocMapped); value_type *child_data_hd; cudaHostGetDevicePointer(&child_data_hd, child_data, 0); // roots copy their data int dummy; void *my_data = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy); memcpy ( &child_data[0], data_pinned, n * v.get_block_size()*sizeof(value_type)); // Loop over parts to consolidate for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_raw_data( &child_data[this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()]], sizeof(value_type) * (this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part]), current_part, 20001 + current_part ); } } // usual path // Upload on host cudaMemcpy(v.raw(), (value_type *)child_data, v.size()* sizeof(value_type), cudaMemcpyDefault); cudaCheckError(); } // If root partition // Permute based on renumbering vector transformVector(v); cudaCheckError(); // Exchange halos int tag = 0; this->exchange_halo(v, tag); cudaCheckError(); v.set_unconsolidated_size(n); // free host if (child_data) { cudaFreeHost(child_data); } cudaCheckError(); } //cla } // If cudaIpcAvailable if (!this->m_is_fine_level_glued) // not needed for classcical { if (this->m_is_fine_level_root_partition) { v.swap(temp); int tag = 0; // Root partitions do the exchange this->exchange_halo(v, tag); } v.set_unconsolidated_size(n * v.get_block_size()); v.set_transformed(); } /* free memory (if needed) */ if (data_alloc) { cudaFree(data_hd); } cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformVector(VVector_v &v) { if (this->neighbors.size() == 0) { return; } else if (this->renumbering.size() == 0) { v.resize(this->halo_offsets[this->neighbors.size()]*v.get_block_size()); return; } if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } if (v.size() < this->halo_offsets[this->neighbors.size()]*v.get_block_size()) { VVector_v temp(this->halo_offsets[this->neighbors.size()]*v.get_block_size(), types::util<value_type>::get_zero()); temp.set_block_dimx(v.get_block_dimx()); temp.set_block_dimy(v.get_block_dimy()); if (v.size() < this->halo_offsets[0]*this->A->get_block_dimx()) { FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED); } //reorder based on row permutation int size = this->halo_offsets[0]; int num_blocks = min(4096, (size + 511) / 512); reorder_vector_values <<< num_blocks, 512>>>(temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size); v.swap(temp); } else { VVector_v temp(this->halo_offsets[0]*v.get_block_size()); int size = this->halo_offsets[0]; int num_blocks = min(4096, (size + 511) / 512); reorder_vector_values <<< num_blocks, 512>>>(temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size); thrust::copy(temp.begin(), temp.end(), v.begin()); } cudaCheckError(); v.set_transformed(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v) { if (this->neighbors.size() == 0 || this->renumbering.size() == 0) { return; } if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } VVector_v temp(this->halo_offsets[0]*this->A->get_block_dimx()); if (v.size() < this->halo_offsets[0]*v.get_block_size()) { FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED); } //reorder based on row permutation int size = this->halo_offsets[0]; int num_blocks = min(4096, (size + 511) / 512); inverse_reorder_vector_values <<< num_blocks, 512>>>(temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size); //reorder_vector_values<<<num_blocks, 512>>>(temp.raw(), v.raw(), this->inverse_renumbering.raw(), v.get_block_size(), size); cudaCheckError(); v.resize(this->halo_offsets[0]*this->A->get_block_dimx()); thrust::copy(temp.begin(), temp.end(), v.begin()); cudaCheckError(); } template <class TConfig> void DistributedManagerBase<TConfig>::computeDestinationPartitions(INDEX_TYPE upper_threshold, float avg_size, const int num_parts, int &new_num_parts, bool &wantNeighbors) { m_destination_partitions.resize(num_parts); std::vector<int> dp(num_parts); if (avg_size < 1.f) { avg_size = 1.f; } // avoid floating point exception int wanted_num_fine_parts_to_consolidate = ( upper_threshold + (int) avg_size - 1) / ( (int) avg_size ); new_num_parts = (num_parts + wanted_num_fine_parts_to_consolidate - 1) / wanted_num_fine_parts_to_consolidate; for (int i = 0; i < num_parts; i++) { dp[i] = i % new_num_parts; } // example wantNeighbors = true -> destination_part = [0 0 0 0 4 4 4 4 8 8 8 8] // example wantNeighbors = false -> destination_part = [0 1 2 3 0 1 2 3 0 1 2 3] if (wantNeighbors) { std::sort (dp.begin(), dp.end()); m_destination_partitions[0] = 0; for (int i = 1; i < num_parts; i++) { if (dp[i - 1] < dp[i]) { m_destination_partitions[i] = i; } else { m_destination_partitions[i] = m_destination_partitions[i - 1]; } } } m_my_destination_part = m_destination_partitions[global_id()]; } template <class TConfig> void DistributedManagerBase<TConfig>::computeDestinationPartitionsWithCons(int my_id, int num_parts, IVector_h &destination_part, DistributedComms<TConfig> *comms) { int device_id = this->A->getResources()->getDevice(0); std::string my_hostname_tmp; comms->get_hostname(my_hostname_tmp); // Append PCI-E ID to string cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop, device_id); std::stringstream s; s << my_hostname_tmp << "_" << dev_prop.pciBusID << "_" << dev_prop.pciDeviceID; std::string my_hostname(s.str()); std::vector<std::string> hostnames; comms->exchange_hostnames(my_hostname, hostnames, num_parts); std::vector<std::string>::iterator low = std::find( hostnames.begin(), hostnames.end(), my_hostname ); int my_destination_part = low - hostnames.begin(); // Do a gather into destination_part comms->all_gather(my_destination_part, destination_part, num_parts); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVector(VVector_v &v_in, const void *data, int n, int block_dimy) { if (this->isFineLevelConsolidated() || this->isFineLevelGlued()) { revertAndDownloadVectorWithCons(v_in, data, n, block_dimy); } else { if ( n == 0 ) { FatalError("Cannot download if size = 0", AMGX_ERR_NOT_IMPLEMENTED); } if (data == NULL ) { FatalError("Cannot download to a NULL pointer", AMGX_ERR_NOT_IMPLEMENTED); } if (v_in.size() == 0 ) { FatalError("Cannot download an empty vector", AMGX_ERR_NOT_IMPLEMENTED); } VVector_v v_out; revertVector(v_in, v_out); cudaMemcpy((value_type *)data, v_out.raw(), n * block_dimy * sizeof(value_type), cudaMemcpyDefault); cudaCheckError(); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v_in, VVector_v &v_out) { if (!this->isFineLevelGlued() && this->neighbors.size() == 0 || this->renumbering.size() == 0) { return;} if (v_in.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } if (v_in.size() < this->halo_offsets[0]*v_in.get_block_size()) { FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED); } int size = this->halo_offsets[0]; if (v_out.size() != size * this->A->get_block_dimx()) { v_out.resize(size * this->A->get_block_dimx()); } //reorder based on row permutation int num_blocks = min(4096, (size + 511) / 512); inverse_reorder_vector_values <<< num_blocks, 512>>>(v_out.raw(), v_in.raw(), this->renumbering.raw(), v_in.get_block_size(), size); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVectorWithCons(VVector_v &v_in, const void *data_pinned, int n, int block_dimy) { if (v_in.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); } void *root_v_ptr = NULL; int size = v_in.get_unconsolidated_size(); int num_rows = size / v_in.get_block_size(); if (this->m_is_fine_level_root_partition) { root_v_ptr = (void *) v_in.raw(); } VVector_v temp; temp.set_block_dimx(v_in.get_block_dimx()); temp.set_block_dimy(v_in.get_block_dimy()); temp.resize(size); bool useCudaIpc = this->m_use_cuda_ipc_consolidation; if (useCudaIpc) { // Do IPC this->ipcExchangePtr(root_v_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); // Children partition waits for parent to be done updating vector this->ipcWaitForRoot(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms()); cudaCheckError(); //reorder based on row permutation int num_blocks = min(4096, (num_rows + 511) / 512); inverse_reorder_vector_values <<< num_blocks, 512>>>( temp.raw(), (value_type *) root_v_ptr, this->m_row_ids_CONS.raw(), v_in.get_block_size(), num_rows); cudaCheckError(); if (!this->m_is_fine_level_root_partition) { cudaIpcCloseMemHandle(root_v_ptr); } } else { if (this->m_is_fine_level_consolidated) // aggregation { if (this->m_is_fine_level_root_partition) { IVector_h child_n(this->m_num_fine_level_parts_to_consolidate); int max_n = 0; for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->recv_vector(child_n, current_part, 30000 + current_part, i, 1); } else { child_n[i] = num_rows; } if (child_n[i] > max_n) { max_n = child_n[i]; } } // Resize temp vector VVector_v child_temp;; child_temp.resize(max_n * v_in.get_block_size()); for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; // Pack the vector to be sent int num_blocks = min(4096, (child_n[i] + 511) / 512); if (current_part != this->fine_level_id()) { inverse_reorder_vector_values <<< num_blocks, 512>>>( child_temp.raw(), (value_type *) root_v_ptr, this->m_child_row_ids[i].raw(), v_in.get_block_size(), child_n[i]); this->getFineLevelComms()->send_vector(child_temp, current_part, 30001 + current_part, 0, child_n[i]*v_in.get_block_size()); } else { inverse_reorder_vector_values <<< num_blocks, 512>>>( temp.raw(), (value_type *) root_v_ptr, this->m_row_ids_CONS.raw(), v_in.get_block_size(), child_n[i]); } } cudaCheckError(); } else { IVector_h size(1); size[0] = num_rows; this->getFineLevelComms()->send_vector(size, this->m_my_fine_level_destination_part, 30000 + this->fine_level_id()); this->getFineLevelComms()->recv_vector(temp, this->m_my_fine_level_destination_part, 30001 + this->fine_level_id()); } } else if (this->m_is_fine_level_glued) // classical { if (this->m_is_fine_level_root_partition) { temp.resize(v_in.size()); revertVector(v_in, temp); cudaCheckError(); for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++) { int current_part = this->m_fine_level_parts_to_consolidate[i]; if (current_part != this->fine_level_id()) { this->getFineLevelComms()->send_vector( temp, current_part, current_part + 30001, this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()], this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part] ); cudaCheckError(); } } } else { this->getFineLevelComms()->recv_vector(temp, this->m_my_fine_level_destination_part, 30001 + this->fine_level_id()); cudaCheckError(); } temp.resize(this->getConsolidationArrayOffsets()[this->fine_level_id() + 1] - this->getConsolidationArrayOffsets()[this->fine_level_id()]); cudaCheckError(); } } // Copy on host cudaMemcpy((value_type *)data_pinned, temp.raw(), temp.size() * sizeof(value_type), cudaMemcpyDefault); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVector(VVector_v &v, const void *data, int n, int block_dim) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformVector(VVector_v &v) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVectorWithCons(VVector_v &v, const void *data, int n, int block_dim) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVector(VVector_v &v, const void *data, int n, int block_dim) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v_in, VVector_v &v_out) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVectorWithCons(VVector_v &v_in, const void *data, int n, int block_dim) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix() { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix_owned() { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::obtain_shift_l2g_reordering(index_type n, I64Vector_h &l2g, IVector_h &p, IVector_h &q) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::unpack_partition(index_type *Bp, index_type *Bc, mat_value_type *Bv) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::generatePoisson7pt(int nx, int ny, int nz, int P, int Q, int R) { FatalError("GeneratePoisson7pt only implemented on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> template <typename t_colIndex> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrix( int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets, const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const MatrixDistribution &dist) { FatalError("loadDistributedMatrix only implemented on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingB2Lmaps() { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingHaloRows() { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsNoCons(int n, int nnz, const mat_value_type *data, const mat_value_type *diag) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsWithCons(int n, int nnz, const mat_value_type *data, const mat_value_type *diag) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createRenumbering(IVector &renumbering) { if (this->neighbors.size() > 0) { FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED); } } template < class TConfig > void DistributedManagerBase<TConfig>::remove_boundary(IVector_h &flagArray, IVector_h &B2L_map, int size) { for (int i = 0; i < size; i++) { flagArray[B2L_map[i]] = 0; } } template < class TConfig > void DistributedManagerBase<TConfig>::get_unassigned(IVector_h &flagArray, IVector_h &B2L_map, IVector_h &partition_flags, int size, int fa_size/*, int rank*/) { for (int i = 0; i < size; i++) { if (B2L_map[i] < fa_size) { if (flagArray[B2L_map[i]] == 0) { flagArray[B2L_map[i]] = 1; partition_flags[i] = 1; } } } } template < class TConfig > void DistributedManagerBase<TConfig>::set_unassigned(IVector_h &partition_flags, IVector_h &partition_renum, IVector_h &B2L_map, IVector_h &renumbering, int size, int max_element, int renum_size/*, int rank*/) { for (int i = 0; i < size; i++) { if (B2L_map[i] < renum_size) { if (partition_flags[i] == 1) { renumbering[B2L_map[i]] = max_element + partition_renum[i]; } B2L_map[i] = renumbering[B2L_map[i]]; } } } /* print manager for target rank to a file or stdout */ template<class TConfig> void DistributedManagerBase<TConfig>::print(char *f, char *s, int trank) { DistributedManagerBase<TConfig> *m = this; int rank = 0; int level = 0; char filename[1024]; FILE *fid = NULL; int i, j, k, t1, t2; #ifdef AMGX_WITH_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); #endif //check target rank if (rank == trank) { //check whether to output to stdout or a file if (f == NULL) { fid = stdout; } else { level = m->A->amg_level_index; #ifdef _WIN32 _snprintf_s(filename, 1024, 1024, "%s_r%d_l%d.mtx", f, rank, level); #else snprintf(filename, 1024, "%s_r%d_l%d.mtx", f, rank, level); #endif fid = fopen(filename, "w"); } cudaDeviceSynchronize(); cudaCheckError(); fprintf(fid, "%s\n", s); //--- communication info --- //compare neighbors t1 = m->neighbors.size(); fprintf(fid, "neighbors %d\n", t1); for (i = 0; i < t1; i++) { k = m->neighbors[i]; fprintf(fid, "%d\n", k); } //compare B2L_rings t1 = B2L_rings.size(); fprintf(fid, "B2L_rings %d\n", t1); for (i = 0; i < t1; i++) { t2 = m->B2L_rings[i].size(); fprintf(fid, "B2L_rings-%d [%d]\n", i, t2); for (j = 0; j < t2; j++) { k = m->B2L_rings[i][j]; fprintf(fid, "%d\n", k); } } //compare B2L_maps t1 = B2L_maps.size(); fprintf(fid, "B2L_maps %d\n", t1); for (i = 0; i < t1; i++) { t2 = m->B2L_maps[i].size(); fprintf(fid, "B2L_maps-%d [%d]\n", i, t2); for (j = 0; j < t2; j++) { k = m->B2L_maps[i][j]; fprintf(fid, "%d\n", k); } } //compare L2H_maps t1 = L2H_maps.size(); fprintf(fid, "L2H_maps %d\n", t1); for (i = 0; i < t1; i++) { t2 = m->L2H_maps[i].size(); fprintf(fid, "L2H_maps-%d [%d]\n", i, t2); for (j = 0; j < t2; j++) { k = m->L2H_maps[i][j]; fprintf(fid, "%d\n", k); } } //--- matrix info --- fprintf(fid, "num_rows_global=%ld\n", num_rows_global); fprintf(fid, "_num_rows_interior=%d\n", m->_num_rows_interior); fprintf(fid, "_num_rows_owned=%d\n", m->_num_rows_owned); fprintf(fid, "_num_rows_full=%d\n", m->_num_rows_full); fprintf(fid, "_num_rows_all=%d\n", m->_num_rows_all); fprintf(fid, "_num_nz_interior=%d\n", m->_num_nz_interior); fprintf(fid, "_num_nz_owned=%d\n", m->_num_nz_owned); fprintf(fid, "_num_nz_full=%d\n", m->_num_nz_full); fprintf(fid, "_num_nz_all=%d\n", m->_num_nz_all); //compare # halo rows and halo offsets fprintf(fid, "# halo rings %d and rows %d\n", m->num_halo_rings(), m->num_halo_rows()); t1 = m->halo_offsets.size(); fprintf(fid, "halo_offsets %d\n", t1); for (i = 0; i < t1; i++) { k = m->halo_offsets[i]; fprintf(fid, "%d\n", k); } //compare halo ranges t1 = m->halo_ranges.size(); fprintf(fid, "halo_ranges %d\n", t1); for (i = 0; i < t1; i++) { k = m->halo_ranges[i]; fprintf(fid, "%d\n", k); } //compare halo ranges (host) t1 = m->halo_ranges_h.size(); fprintf(fid, "halo_ranges_h %d\n", t1); for (i = 0; i < t1; i++) { k = m->halo_ranges_h[i]; fprintf(fid, "%d\n", k); } //compare part offsets t1 = m->part_offsets.size(); fprintf(fid, "part_offsets %d\n", t1); for (i = 0; i < t1; i++) { k = m->part_offsets[i]; fprintf(fid, "%d\n", k); } //compare part offsets (host) t1 = m->part_offsets_h.size(); fprintf(fid, "part_offsets_h %d\n", t1); for (i = 0; i < t1; i++) { k = m->part_offsets_h[i]; fprintf(fid, "%d\n", k); } //compare interior row list t1 = m->interior_rows_list.size(); fprintf(fid, "interior_rows_list %d\n", t1); for (i = 0; i < t1; i++) { k = m->interior_rows_list[i]; fprintf(fid, "%d\n", k); } //compare boundary row list t1 = m->boundary_rows_list.size(); fprintf(fid, "boundary_rows_list %d\n", t1); for (i = 0; i < t1; i++) { k = m->boundary_rows_list[i]; fprintf(fid, "%d\n", k); } //compare halo1 row list t1 = m->halo1_rows_list.size(); fprintf(fid, "halo1_rows_list %d\n", t1); for (i = 0; i < t1; i++) { k = m->halo1_rows_list[i]; fprintf(fid, "%d\n", k); } fprintf(fid, "pointers halo_rows=%p and halo_btl=%p\n", m->halo_rows, m->halo_btl); //--- packing info --- //compare local to global map t1 = m->local_to_global_map.size(); fprintf(fid, "local_to_global_map %d\n", t1); for (i = 0; i < t1; i++) { k = m->local_to_global_map[i]; fprintf(fid, "%d\n", k); } //compare renumbering t1 = m->renumbering.size(); fprintf(fid, "renumbering %d\n", t1); for (i = 0; i < t1; i++) { k = m->renumbering[i]; fprintf(fid, "%d\n", k); } //compare inverse renumbering t1 = m->inverse_renumbering.size(); fprintf(fid, "inverse_renumbering %d\n", t1); for (i = 0; i < t1; i++) { k = m->inverse_renumbering[i]; fprintf(fid, "%d\n", k); } //--- GPU related and miscellaneous info //streams fprintf(fid, "streams i=%p, b=%p\n", m->get_int_stream(), m->get_bdy_stream()); //miscellaneous info int64_t bi = m->base_index(); //inlined function int np = m->get_num_partitions(); //inlined function int rp = (int)m->isRootPartition(); //cast from boolean to int fprintf(fid, "gid=%d,bi=%ld,np=%d,rp=%d,ir=%d,in=%d,bn=%d\n", m->global_id(), bi, np, rp, m->index_range(), m->num_interior_nodes(), m->num_boundary_nodes()); cudaDeviceSynchronize(); cudaGetLastError(); if (fid != stdout) { fclose(fid); } } } /* print manager for target rank to a file or stdout (for all ranks) */ template<class TConfig> void DistributedManagerBase<TConfig>::printToFile(char *f, char *s) { DistributedManagerBase<TConfig> *m = this; int rank = 0; #ifdef AMGX_WITH_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); #endif //notice that print will be called with different (target) rank on different ranks/processes m->print(f, s, rank); } /* compare two managers */ template<class TConfig> int DistributedManagerBase<TConfig>::compare(DistributedManagerBase<TConfig> *m2) { DistributedManagerBase<TConfig> *m1 = this; int i, j, t1, t2; //compare neighbors t1 = m1->neighbors.size(); t2 = m2->neighbors.size(); if (t1 != t2) { return 1; } for (i = 0; i < t1; i++) { if (m1->neighbors[i] != m2->neighbors[i]) { return 2; } } //compare B2L_rings for (i = 0; i < (m1->neighbors.size()); i++) { t1 = m1->B2L_rings[i].size(); t2 = m2->B2L_rings[i].size(); if (t1 != t2) { return 3; } for (j = 0; j < t1; j++) { if (m1->B2L_rings[i][j] != m2->B2L_rings[i][j]) { return 4; } } } //compare B2L_maps t1 = m1->B2L_maps.size(); t2 = m2->B2L_maps.size(); if (t1 != t2) { return 5; } for (i = 0; i < t1; i++) { if (m1->B2L_maps[i] != m2->B2L_maps[i]) { return 6; } } //compare L2H_maps t1 = m1->L2H_maps.size(); t2 = m2->L2H_maps.size(); if (t1 != t2) { return 7; } for (i = 0; i < t1; i++) { if (m1->L2H_maps[i] != m2->L2H_maps[i]) { return 8; } } return 0; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> DistributedManager< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::~DistributedManager< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >() { } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> DistributedManager< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::~DistributedManager< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >() { } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateB2Lmaps(IVector_h_vector &dest_coarse_B2L_maps, IVector_h_vector &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors) { consB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors); } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateB2Lmaps(IVector_d_vector &dest_coarse_B2L_maps, IVector_d_vector &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors) { consB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors); } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateB2LmapsOnRoot(int &num_consolidated_neighbors, IVector_h_vector &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, IVector_h_vector &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms) { consB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_ids, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, comms); } template <class TConfig> void DistributedManagerBase<TConfig>::consolidateB2LmapsOnRoot(int &num_consolidated_neighbors, IVector_d_vector &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, IVector_d_vector &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms) { consB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_ids, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, comms); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class DistributedManager<TemplateMode<CASE>::Type >; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template void DistributedManager<TemplateMode<CASE>::Type>::loadDistributedMatrix( \ int, int, const int, const int, const int*, const int *col_indices, const mat_value_type*, int, int, const void*, const MatrixDistribution &dist); \ template void DistributedManager<TemplateMode<CASE>::Type>::loadDistributedMatrix( \ int, int, const int, const int, const int*, const int64_t *col_indices, const mat_value_type*, int, int, const void*, const MatrixDistribution &dist); AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class DistributedManagerBase<TemplateMode<CASE>::Type >; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace amgx
upfirdn_2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. #define EIGEN_USE_GPU #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/shape_inference.h" #include <stdio.h> using namespace tensorflow; using namespace tensorflow::shape_inference; //------------------------------------------------------------------------ // Helpers. #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { hipError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == hipSuccess, errors::Internal(hipGetErrorName(err))); } while (false) static __host__ __device__ __forceinline__ int floorDiv(int a, int b) { int t = 1 - a / b; return (a + t * b) / b - t; } //------------------------------------------------------------------------ // CUDA kernel params. template <class T> struct UpFirDn2DKernelParams { const T* x; // [majorDim, inH, inW, minorDim] const T* k; // [kernelH, kernelW] T* y; // [majorDim, outH, outW, minorDim] int upx; int upy; int downx; int downy; int padx0; int padx1; int pady0; int pady1; int majorDim; int inH; int inW; int minorDim; int kernelH; int kernelW; int outH; int outW; int loopMajor; int loopX; }; //------------------------------------------------------------------------ // General CUDA implementation for large filter kernels. template <class T> static __global__ void UpFirDn2DKernel_large(const UpFirDn2DKernelParams<T> p) { // Calculate thread index. int minorIdx = blockIdx.x * blockDim.x + threadIdx.x; int outY = minorIdx / p.minorDim; minorIdx -= outY * p.minorDim; int outXBase = blockIdx.y * p.loopX * blockDim.y + threadIdx.y; int majorIdxBase = blockIdx.z * p.loopMajor; if (outXBase >= p.outW || outY >= p.outH || majorIdxBase >= p.majorDim) return; // Setup Y receptive field. int midY = outY * p.downy + p.upy - 1 - p.pady0; int inY = min(max(floorDiv(midY, p.upy), 0), p.inH); int h = min(max(floorDiv(midY + p.kernelH, p.upy), 0), p.inH) - inY; int kernelY = midY + p.kernelH - (inY + 1) * p.upy; // Loop over majorDim and outX. for (int loopMajor = 0, majorIdx = majorIdxBase; loopMajor < p.loopMajor && majorIdx < p.majorDim; loopMajor++, majorIdx++) for (int loopX = 0, outX = outXBase; loopX < p.loopX && outX < p.outW; loopX++, outX += blockDim.y) { // Setup X receptive field. int midX = outX * p.downx + p.upx - 1 - p.padx0; int inX = min(max(floorDiv(midX, p.upx), 0), p.inW); int w = min(max(floorDiv(midX + p.kernelW, p.upx), 0), p.inW) - inX; int kernelX = midX + p.kernelW - (inX + 1) * p.upx; // Initialize pointers. const T* xp = &p.x[((majorIdx * p.inH + inY) * p.inW + inX) * p.minorDim + minorIdx]; const T* kp = &p.k[kernelY * p.kernelW + kernelX]; int xpx = p.minorDim; int kpx = -p.upx; int xpy = p.inW * p.minorDim; int kpy = -p.upy * p.kernelW; // Inner loop. float v = 0.0f; for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { v += (float)(*xp) * (float)(*kp); xp += xpx; kp += kpx; } xp += xpy - w * xpx; kp += kpy - w * kpx; } // Store result. p.y[((majorIdx * p.outH + outY) * p.outW + outX) * p.minorDim + minorIdx] = (T)v; } } //------------------------------------------------------------------------ // Specialized CUDA implementation for small filter kernels. template <class T, int upx, int upy, int downx, int downy, int kernelW, int kernelH, int tileOutW, int tileOutH> static __global__ void UpFirDn2DKernel_small(const UpFirDn2DKernelParams<T> p) { //assert(kernelW % upx == 0); //assert(kernelH % upy == 0); const int tileInW = ((tileOutW - 1) * downx + kernelW - 1) / upx + 1; const int tileInH = ((tileOutH - 1) * downy + kernelH - 1) / upy + 1; __shared__ volatile float sk[kernelH][kernelW]; __shared__ volatile float sx[tileInH][tileInW]; // Calculate tile index. int minorIdx = blockIdx.x; int tileOutY = minorIdx / p.minorDim; minorIdx -= tileOutY * p.minorDim; tileOutY *= tileOutH; int tileOutXBase = blockIdx.y * p.loopX * tileOutW; int majorIdxBase = blockIdx.z * p.loopMajor; if (tileOutXBase >= p.outW | tileOutY >= p.outH | majorIdxBase >= p.majorDim) return; // Load filter kernel (flipped). for (int tapIdx = threadIdx.x; tapIdx < kernelH * kernelW; tapIdx += blockDim.x) { int ky = tapIdx / kernelW; int kx = tapIdx - ky * kernelW; float v = 0.0f; if (kx < p.kernelW & ky < p.kernelH) v = (float)p.k[(p.kernelH - 1 - ky) * p.kernelW + (p.kernelW - 1 - kx)]; sk[ky][kx] = v; } // Loop over majorDim and outX. for (int loopMajor = 0, majorIdx = majorIdxBase; loopMajor < p.loopMajor & majorIdx < p.majorDim; loopMajor++, majorIdx++) for (int loopX = 0, tileOutX = tileOutXBase; loopX < p.loopX & tileOutX < p.outW; loopX++, tileOutX += tileOutW) { // Load input pixels. int tileMidX = tileOutX * downx + upx - 1 - p.padx0; int tileMidY = tileOutY * downy + upy - 1 - p.pady0; int tileInX = floorDiv(tileMidX, upx); int tileInY = floorDiv(tileMidY, upy); __syncthreads(); for (int inIdx = threadIdx.x; inIdx < tileInH * tileInW; inIdx += blockDim.x) { int relInY = inIdx / tileInW; int relInX = inIdx - relInY * tileInW; int inX = relInX + tileInX; int inY = relInY + tileInY; float v = 0.0f; if (inX >= 0 & inY >= 0 & inX < p.inW & inY < p.inH) v = (float)p.x[((majorIdx * p.inH + inY) * p.inW + inX) * p.minorDim + minorIdx]; sx[relInY][relInX] = v; } // Loop over output pixels. __syncthreads(); for (int outIdx = threadIdx.x; outIdx < tileOutH * tileOutW; outIdx += blockDim.x) { int relOutY = outIdx / tileOutW; int relOutX = outIdx - relOutY * tileOutW; int outX = relOutX + tileOutX; int outY = relOutY + tileOutY; // Setup receptive field. int midX = tileMidX + relOutX * downx; int midY = tileMidY + relOutY * downy; int inX = floorDiv(midX, upx); int inY = floorDiv(midY, upy); int relInX = inX - tileInX; int relInY = inY - tileInY; int kernelX = (inX + 1) * upx - midX - 1; // flipped int kernelY = (inY + 1) * upy - midY - 1; // flipped // Inner loop. float v = 0.0f; #pragma unroll for (int y = 0; y < kernelH / upy; y++) #pragma unroll for (int x = 0; x < kernelW / upx; x++) v += sx[relInY + y][relInX + x] * sk[kernelY + y * upy][kernelX + x * upx]; // Store result. if (outX < p.outW & outY < p.outH) p.y[((majorIdx * p.outH + outY) * p.outW + outX) * p.minorDim + minorIdx] = (T)v; } } } //------------------------------------------------------------------------ // TensorFlow op. template <class T> struct UpFirDn2DOp : public OpKernel { UpFirDn2DKernelParams<T> m_attribs; UpFirDn2DOp(OpKernelConstruction* ctx) : OpKernel(ctx) { memset(&m_attribs, 0, sizeof(m_attribs)); OP_REQUIRES_OK(ctx, ctx->GetAttr("upx", &m_attribs.upx)); OP_REQUIRES_OK(ctx, ctx->GetAttr("upy", &m_attribs.upy)); OP_REQUIRES_OK(ctx, ctx->GetAttr("downx", &m_attribs.downx)); OP_REQUIRES_OK(ctx, ctx->GetAttr("downy", &m_attribs.downy)); OP_REQUIRES_OK(ctx, ctx->GetAttr("padx0", &m_attribs.padx0)); OP_REQUIRES_OK(ctx, ctx->GetAttr("padx1", &m_attribs.padx1)); OP_REQUIRES_OK(ctx, ctx->GetAttr("pady0", &m_attribs.pady0)); OP_REQUIRES_OK(ctx, ctx->GetAttr("pady1", &m_attribs.pady1)); OP_REQUIRES(ctx, m_attribs.upx >= 1 && m_attribs.upy >= 1, errors::InvalidArgument("upx and upy must be at least 1x1")); OP_REQUIRES(ctx, m_attribs.downx >= 1 && m_attribs.downy >= 1, errors::InvalidArgument("downx and downy must be at least 1x1")); } void Compute(OpKernelContext* ctx) { UpFirDn2DKernelParams<T> p = m_attribs; hipStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream(); const Tensor& x = ctx->input(0); // [majorDim, inH, inW, minorDim] const Tensor& k = ctx->input(1); // [kernelH, kernelW] p.x = x.flat<T>().data(); p.k = k.flat<T>().data(); OP_REQUIRES(ctx, x.dims() == 4, errors::InvalidArgument("input must have rank 4")); OP_REQUIRES(ctx, k.dims() == 2, errors::InvalidArgument("kernel must have rank 2")); OP_REQUIRES(ctx, x.NumElements() <= kint32max, errors::InvalidArgument("input too large")); OP_REQUIRES(ctx, k.NumElements() <= kint32max, errors::InvalidArgument("kernel too large")); p.majorDim = (int)x.dim_size(0); p.inH = (int)x.dim_size(1); p.inW = (int)x.dim_size(2); p.minorDim = (int)x.dim_size(3); p.kernelH = (int)k.dim_size(0); p.kernelW = (int)k.dim_size(1); OP_REQUIRES(ctx, p.kernelW >= 1 && p.kernelH >= 1, errors::InvalidArgument("kernel must be at least 1x1")); p.outW = (p.inW * p.upx + p.padx0 + p.padx1 - p.kernelW + p.downx) / p.downx; p.outH = (p.inH * p.upy + p.pady0 + p.pady1 - p.kernelH + p.downy) / p.downy; OP_REQUIRES(ctx, p.outW >= 1 && p.outH >= 1, errors::InvalidArgument("output must be at least 1x1")); Tensor* y = NULL; // [majorDim, outH, outW, minorDim] TensorShape ys; ys.AddDim(p.majorDim); ys.AddDim(p.outH); ys.AddDim(p.outW); ys.AddDim(p.minorDim); OP_REQUIRES_OK(ctx, ctx->allocate_output(0, ys, &y)); p.y = y->flat<T>().data(); OP_REQUIRES(ctx, y->NumElements() <= kint32max, errors::InvalidArgument("output too large")); // Choose CUDA kernel to use. void* cudaKernel = (void*)UpFirDn2DKernel_large<T>; int tileOutW = -1; int tileOutH = -1; if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 7 && p.kernelH <= 7 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 7,7, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 6 && p.kernelH <= 6 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 6,6, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 5 && p.kernelH <= 5 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 5,5, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 4 && p.kernelH <= 4 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 4,4, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 3 && p.kernelH <= 3 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 3,3, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 24 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 24,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 20 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 20,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 16 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 16,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 12 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 12,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 8 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 8,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 24) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,24, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 20) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,20, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 16) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,16, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 12) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,12, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,8, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 8 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 8,8, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 6 && p.kernelH <= 6 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 6,6, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 4 && p.kernelH <= 4 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 4,4, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 2 && p.kernelH <= 2 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 2,2, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 24 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 24,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 20 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 20,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 16 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 16,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 12 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 12,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 8 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 8,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 24) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,24, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 20) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,20, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 16) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,16, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 12) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,12, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,8, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 8 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 8,8, 32,8 >; tileOutW = 32; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 6 && p.kernelH <= 6 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 6,6, 32,8 >; tileOutW = 32; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 4 && p.kernelH <= 4 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 4,4, 32,8 >; tileOutW = 32; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 2 && p.kernelH <= 2 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 2,2, 32,8 >; tileOutW = 32; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 24 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 24,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 20 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 20,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 16 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 16,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 12 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 12,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 8 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 8,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 24) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,24, 32,16>; tileOutW = 32; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 20) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,20, 32,16>; tileOutW = 32; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 16) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,16, 32,16>; tileOutW = 32; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 12) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,12, 32,16>; tileOutW = 32; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,8, 32,16>; tileOutW = 32; tileOutH = 16; } // Choose launch params. dim3 blockSize; dim3 gridSize; if (tileOutW > 0 && tileOutH > 0) // small { p.loopMajor = (p.majorDim - 1) / 16384 + 1; p.loopX = 1; blockSize = dim3(32 * 8, 1, 1); gridSize = dim3(((p.outH - 1) / tileOutH + 1) * p.minorDim, (p.outW - 1) / (p.loopX * tileOutW) + 1, (p.majorDim - 1) / p.loopMajor + 1); } else // large { p.loopMajor = (p.majorDim - 1) / 16384 + 1; p.loopX = 4; blockSize = dim3(4, 32, 1); gridSize = dim3((p.outH * p.minorDim - 1) / blockSize.x + 1, (p.outW - 1) / (p.loopX * blockSize.y) + 1, (p.majorDim - 1) / p.loopMajor + 1); } // Launch CUDA kernel. void* args[] = {&p}; OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(cudaKernel, gridSize, blockSize, args, 0, stream)); } }; REGISTER_OP("UpFirDn2D") .Input ("x: T") .Input ("k: T") .Output ("y: T") .Attr ("T: {float, half}") .Attr ("upx: int = 1") .Attr ("upy: int = 1") .Attr ("downx: int = 1") .Attr ("downy: int = 1") .Attr ("padx0: int = 0") .Attr ("padx1: int = 0") .Attr ("pady0: int = 0") .Attr ("pady1: int = 0"); REGISTER_KERNEL_BUILDER(Name("UpFirDn2D").Device(DEVICE_GPU).TypeConstraint<float>("T"), UpFirDn2DOp<float>); REGISTER_KERNEL_BUILDER(Name("UpFirDn2D").Device(DEVICE_GPU).TypeConstraint<Eigen::half>("T"), UpFirDn2DOp<Eigen::half>);
upfirdn_2d.cu
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. #define EIGEN_USE_GPU #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/shape_inference.h" #include <stdio.h> using namespace tensorflow; using namespace tensorflow::shape_inference; //------------------------------------------------------------------------ // Helpers. #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal(cudaGetErrorName(err))); } while (false) static __host__ __device__ __forceinline__ int floorDiv(int a, int b) { int t = 1 - a / b; return (a + t * b) / b - t; } //------------------------------------------------------------------------ // CUDA kernel params. template <class T> struct UpFirDn2DKernelParams { const T* x; // [majorDim, inH, inW, minorDim] const T* k; // [kernelH, kernelW] T* y; // [majorDim, outH, outW, minorDim] int upx; int upy; int downx; int downy; int padx0; int padx1; int pady0; int pady1; int majorDim; int inH; int inW; int minorDim; int kernelH; int kernelW; int outH; int outW; int loopMajor; int loopX; }; //------------------------------------------------------------------------ // General CUDA implementation for large filter kernels. template <class T> static __global__ void UpFirDn2DKernel_large(const UpFirDn2DKernelParams<T> p) { // Calculate thread index. int minorIdx = blockIdx.x * blockDim.x + threadIdx.x; int outY = minorIdx / p.minorDim; minorIdx -= outY * p.minorDim; int outXBase = blockIdx.y * p.loopX * blockDim.y + threadIdx.y; int majorIdxBase = blockIdx.z * p.loopMajor; if (outXBase >= p.outW || outY >= p.outH || majorIdxBase >= p.majorDim) return; // Setup Y receptive field. int midY = outY * p.downy + p.upy - 1 - p.pady0; int inY = min(max(floorDiv(midY, p.upy), 0), p.inH); int h = min(max(floorDiv(midY + p.kernelH, p.upy), 0), p.inH) - inY; int kernelY = midY + p.kernelH - (inY + 1) * p.upy; // Loop over majorDim and outX. for (int loopMajor = 0, majorIdx = majorIdxBase; loopMajor < p.loopMajor && majorIdx < p.majorDim; loopMajor++, majorIdx++) for (int loopX = 0, outX = outXBase; loopX < p.loopX && outX < p.outW; loopX++, outX += blockDim.y) { // Setup X receptive field. int midX = outX * p.downx + p.upx - 1 - p.padx0; int inX = min(max(floorDiv(midX, p.upx), 0), p.inW); int w = min(max(floorDiv(midX + p.kernelW, p.upx), 0), p.inW) - inX; int kernelX = midX + p.kernelW - (inX + 1) * p.upx; // Initialize pointers. const T* xp = &p.x[((majorIdx * p.inH + inY) * p.inW + inX) * p.minorDim + minorIdx]; const T* kp = &p.k[kernelY * p.kernelW + kernelX]; int xpx = p.minorDim; int kpx = -p.upx; int xpy = p.inW * p.minorDim; int kpy = -p.upy * p.kernelW; // Inner loop. float v = 0.0f; for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { v += (float)(*xp) * (float)(*kp); xp += xpx; kp += kpx; } xp += xpy - w * xpx; kp += kpy - w * kpx; } // Store result. p.y[((majorIdx * p.outH + outY) * p.outW + outX) * p.minorDim + minorIdx] = (T)v; } } //------------------------------------------------------------------------ // Specialized CUDA implementation for small filter kernels. template <class T, int upx, int upy, int downx, int downy, int kernelW, int kernelH, int tileOutW, int tileOutH> static __global__ void UpFirDn2DKernel_small(const UpFirDn2DKernelParams<T> p) { //assert(kernelW % upx == 0); //assert(kernelH % upy == 0); const int tileInW = ((tileOutW - 1) * downx + kernelW - 1) / upx + 1; const int tileInH = ((tileOutH - 1) * downy + kernelH - 1) / upy + 1; __shared__ volatile float sk[kernelH][kernelW]; __shared__ volatile float sx[tileInH][tileInW]; // Calculate tile index. int minorIdx = blockIdx.x; int tileOutY = minorIdx / p.minorDim; minorIdx -= tileOutY * p.minorDim; tileOutY *= tileOutH; int tileOutXBase = blockIdx.y * p.loopX * tileOutW; int majorIdxBase = blockIdx.z * p.loopMajor; if (tileOutXBase >= p.outW | tileOutY >= p.outH | majorIdxBase >= p.majorDim) return; // Load filter kernel (flipped). for (int tapIdx = threadIdx.x; tapIdx < kernelH * kernelW; tapIdx += blockDim.x) { int ky = tapIdx / kernelW; int kx = tapIdx - ky * kernelW; float v = 0.0f; if (kx < p.kernelW & ky < p.kernelH) v = (float)p.k[(p.kernelH - 1 - ky) * p.kernelW + (p.kernelW - 1 - kx)]; sk[ky][kx] = v; } // Loop over majorDim and outX. for (int loopMajor = 0, majorIdx = majorIdxBase; loopMajor < p.loopMajor & majorIdx < p.majorDim; loopMajor++, majorIdx++) for (int loopX = 0, tileOutX = tileOutXBase; loopX < p.loopX & tileOutX < p.outW; loopX++, tileOutX += tileOutW) { // Load input pixels. int tileMidX = tileOutX * downx + upx - 1 - p.padx0; int tileMidY = tileOutY * downy + upy - 1 - p.pady0; int tileInX = floorDiv(tileMidX, upx); int tileInY = floorDiv(tileMidY, upy); __syncthreads(); for (int inIdx = threadIdx.x; inIdx < tileInH * tileInW; inIdx += blockDim.x) { int relInY = inIdx / tileInW; int relInX = inIdx - relInY * tileInW; int inX = relInX + tileInX; int inY = relInY + tileInY; float v = 0.0f; if (inX >= 0 & inY >= 0 & inX < p.inW & inY < p.inH) v = (float)p.x[((majorIdx * p.inH + inY) * p.inW + inX) * p.minorDim + minorIdx]; sx[relInY][relInX] = v; } // Loop over output pixels. __syncthreads(); for (int outIdx = threadIdx.x; outIdx < tileOutH * tileOutW; outIdx += blockDim.x) { int relOutY = outIdx / tileOutW; int relOutX = outIdx - relOutY * tileOutW; int outX = relOutX + tileOutX; int outY = relOutY + tileOutY; // Setup receptive field. int midX = tileMidX + relOutX * downx; int midY = tileMidY + relOutY * downy; int inX = floorDiv(midX, upx); int inY = floorDiv(midY, upy); int relInX = inX - tileInX; int relInY = inY - tileInY; int kernelX = (inX + 1) * upx - midX - 1; // flipped int kernelY = (inY + 1) * upy - midY - 1; // flipped // Inner loop. float v = 0.0f; #pragma unroll for (int y = 0; y < kernelH / upy; y++) #pragma unroll for (int x = 0; x < kernelW / upx; x++) v += sx[relInY + y][relInX + x] * sk[kernelY + y * upy][kernelX + x * upx]; // Store result. if (outX < p.outW & outY < p.outH) p.y[((majorIdx * p.outH + outY) * p.outW + outX) * p.minorDim + minorIdx] = (T)v; } } } //------------------------------------------------------------------------ // TensorFlow op. template <class T> struct UpFirDn2DOp : public OpKernel { UpFirDn2DKernelParams<T> m_attribs; UpFirDn2DOp(OpKernelConstruction* ctx) : OpKernel(ctx) { memset(&m_attribs, 0, sizeof(m_attribs)); OP_REQUIRES_OK(ctx, ctx->GetAttr("upx", &m_attribs.upx)); OP_REQUIRES_OK(ctx, ctx->GetAttr("upy", &m_attribs.upy)); OP_REQUIRES_OK(ctx, ctx->GetAttr("downx", &m_attribs.downx)); OP_REQUIRES_OK(ctx, ctx->GetAttr("downy", &m_attribs.downy)); OP_REQUIRES_OK(ctx, ctx->GetAttr("padx0", &m_attribs.padx0)); OP_REQUIRES_OK(ctx, ctx->GetAttr("padx1", &m_attribs.padx1)); OP_REQUIRES_OK(ctx, ctx->GetAttr("pady0", &m_attribs.pady0)); OP_REQUIRES_OK(ctx, ctx->GetAttr("pady1", &m_attribs.pady1)); OP_REQUIRES(ctx, m_attribs.upx >= 1 && m_attribs.upy >= 1, errors::InvalidArgument("upx and upy must be at least 1x1")); OP_REQUIRES(ctx, m_attribs.downx >= 1 && m_attribs.downy >= 1, errors::InvalidArgument("downx and downy must be at least 1x1")); } void Compute(OpKernelContext* ctx) { UpFirDn2DKernelParams<T> p = m_attribs; cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream(); const Tensor& x = ctx->input(0); // [majorDim, inH, inW, minorDim] const Tensor& k = ctx->input(1); // [kernelH, kernelW] p.x = x.flat<T>().data(); p.k = k.flat<T>().data(); OP_REQUIRES(ctx, x.dims() == 4, errors::InvalidArgument("input must have rank 4")); OP_REQUIRES(ctx, k.dims() == 2, errors::InvalidArgument("kernel must have rank 2")); OP_REQUIRES(ctx, x.NumElements() <= kint32max, errors::InvalidArgument("input too large")); OP_REQUIRES(ctx, k.NumElements() <= kint32max, errors::InvalidArgument("kernel too large")); p.majorDim = (int)x.dim_size(0); p.inH = (int)x.dim_size(1); p.inW = (int)x.dim_size(2); p.minorDim = (int)x.dim_size(3); p.kernelH = (int)k.dim_size(0); p.kernelW = (int)k.dim_size(1); OP_REQUIRES(ctx, p.kernelW >= 1 && p.kernelH >= 1, errors::InvalidArgument("kernel must be at least 1x1")); p.outW = (p.inW * p.upx + p.padx0 + p.padx1 - p.kernelW + p.downx) / p.downx; p.outH = (p.inH * p.upy + p.pady0 + p.pady1 - p.kernelH + p.downy) / p.downy; OP_REQUIRES(ctx, p.outW >= 1 && p.outH >= 1, errors::InvalidArgument("output must be at least 1x1")); Tensor* y = NULL; // [majorDim, outH, outW, minorDim] TensorShape ys; ys.AddDim(p.majorDim); ys.AddDim(p.outH); ys.AddDim(p.outW); ys.AddDim(p.minorDim); OP_REQUIRES_OK(ctx, ctx->allocate_output(0, ys, &y)); p.y = y->flat<T>().data(); OP_REQUIRES(ctx, y->NumElements() <= kint32max, errors::InvalidArgument("output too large")); // Choose CUDA kernel to use. void* cudaKernel = (void*)UpFirDn2DKernel_large<T>; int tileOutW = -1; int tileOutH = -1; if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 7 && p.kernelH <= 7 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 7,7, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 6 && p.kernelH <= 6 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 6,6, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 5 && p.kernelH <= 5 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 5,5, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 4 && p.kernelH <= 4 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 4,4, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 3 && p.kernelH <= 3 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 3,3, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 24 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 24,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 20 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 20,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 16 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 16,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 12 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 12,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 8 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 8,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 24) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,24, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 20) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,20, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 16) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,16, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 12) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,12, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 1,8, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 8 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 8,8, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 6 && p.kernelH <= 6 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 6,6, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 4 && p.kernelH <= 4 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 4,4, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 2 && p.kernelH <= 2 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 2,2, 64,16>; tileOutW = 64; tileOutH = 16; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 24 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 24,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 20 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 20,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 16 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 16,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 12 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 12,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 2 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 8 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,1, 1,1, 8,1, 128,8>; tileOutW = 128; tileOutH = 8; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 24) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,24, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 20) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,20, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 16) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,16, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 12) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,12, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 1 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,2, 1,1, 1,8, 32,32>; tileOutW = 32; tileOutH = 32; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 8 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 8,8, 32,8 >; tileOutW = 32; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 6 && p.kernelH <= 6 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 6,6, 32,8 >; tileOutW = 32; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 4 && p.kernelH <= 4 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 4,4, 32,8 >; tileOutW = 32; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 2 && p.kernelH <= 2 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 2,2, 32,8 >; tileOutW = 32; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 24 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 24,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 20 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 20,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 16 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 16,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 12 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 12,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 1 && p.kernelW <= 8 && p.kernelH <= 1 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,1, 8,1, 64,8 >; tileOutW = 64; tileOutH = 8; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 24) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,24, 32,16>; tileOutW = 32; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 20) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,20, 32,16>; tileOutW = 32; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 16) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,16, 32,16>; tileOutW = 32; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 12) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,12, 32,16>; tileOutW = 32; tileOutH = 16; } if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 2 && p.kernelW <= 1 && p.kernelH <= 8 ) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,2, 1,8, 32,16>; tileOutW = 32; tileOutH = 16; } // Choose launch params. dim3 blockSize; dim3 gridSize; if (tileOutW > 0 && tileOutH > 0) // small { p.loopMajor = (p.majorDim - 1) / 16384 + 1; p.loopX = 1; blockSize = dim3(32 * 8, 1, 1); gridSize = dim3(((p.outH - 1) / tileOutH + 1) * p.minorDim, (p.outW - 1) / (p.loopX * tileOutW) + 1, (p.majorDim - 1) / p.loopMajor + 1); } else // large { p.loopMajor = (p.majorDim - 1) / 16384 + 1; p.loopX = 4; blockSize = dim3(4, 32, 1); gridSize = dim3((p.outH * p.minorDim - 1) / blockSize.x + 1, (p.outW - 1) / (p.loopX * blockSize.y) + 1, (p.majorDim - 1) / p.loopMajor + 1); } // Launch CUDA kernel. void* args[] = {&p}; OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(cudaKernel, gridSize, blockSize, args, 0, stream)); } }; REGISTER_OP("UpFirDn2D") .Input ("x: T") .Input ("k: T") .Output ("y: T") .Attr ("T: {float, half}") .Attr ("upx: int = 1") .Attr ("upy: int = 1") .Attr ("downx: int = 1") .Attr ("downy: int = 1") .Attr ("padx0: int = 0") .Attr ("padx1: int = 0") .Attr ("pady0: int = 0") .Attr ("pady1: int = 0"); REGISTER_KERNEL_BUILDER(Name("UpFirDn2D").Device(DEVICE_GPU).TypeConstraint<float>("T"), UpFirDn2DOp<float>); REGISTER_KERNEL_BUILDER(Name("UpFirDn2D").Device(DEVICE_GPU).TypeConstraint<Eigen::half>("T"), UpFirDn2DOp<Eigen::half>);
e0d6e75761e5f8c7a01906c118777eb6d10c3b79.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************* * * kpp_integrate_cuda_prototype.cu * Prototype file for kpp CUDA kernel * * Copyright 2016 The Cyprus Institute * * Developers: Michail Alvanos - [email protected] * Giannis Ashiotis * Theodoros Christoudias - [email protected] * ********************************************************************/ #include <stdio.h> #include <unistd.h> #include "hip/hip_runtime.h" =#=#=#=#=#=#=#=#=#=#=defines_vars_2=#=#=#=#=#=#=#=#=#=#= #define BLOCKSIZE 64 //#define MAX_VL_GLO 12288 /* elements that will pass in each call */ #define REDUCTION_SIZE_1 64 #define REDUCTION_SIZE_2 32 =#=#=#=#=#=#=#=#=#=#=defines_vars_1=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_1=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_2=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_3=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_4=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_5=#=#=#=#=#=#=#=#=#=#= #define ifun 0 #define ijac 1 #define istp 2 #define iacc 3 #define irej 4 #define idec 5 #define isol 6 #define isng 7 #define itexit 0 #define ihexit 1 #define ZERO 0.0 #define ONE 1.0 #define HALF 0.5 /* * Fortran to C macros * GPU-friendly array deffinition * i:VL_GLO, j:NVAR * */ #define conc(i,j) conc[(j)*VL_GLO+(i)] #define khet_st(i,j) khet_st[(j)*VL_GLO+(i)] #define khet_tr(i,j) khet_tr[(j)*VL_GLO+(i)] #define jx(i,j) jx[j*VL_GLO+i] #define istatus(i,j) istatus[(j)*(VL_GLO)+(i)] #define rstatus(i,j) rstatus[(j)*(VL_GLO)+(i)] #define ROUND128(X) (X + (128 - 1)) & ~(128 - 1) #define rconst(i,j) rconst[(j)] /* Temporary arrays allocated in stack */ #define var(i,j) var[(j)] #define fix(i,j) fix[(j)] #define jcb(i,j) jcb[(j)] #define varDot(i,j) varDot[j] #define varNew(i,j) varNew[(j)] #define Fcn0(i,j) Fcn0[(j)] #define Fcn(i,j) Fcn[(j)] #define Fcn(i,j) Fcn[(j)] #define dFdT(i,j) dFdT[(j)] #define varErr(i,j) varErr[(j)] #define K(i,j,k) K[(j)*(NVAR)+(k)] #define jac0(i,j) jac0[(j)] #define Ghimj(i,j) Ghimj[(j)] /* Enable debug flags for GPU */ #define DEBUG #ifdef DEBUG #define GPU_DEBUG()\ gpuErrchk( hipPeekAtLastError() ); \ gpuErrchk( hipDeviceSynchronize() ); #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #else /* If debug flags are disabled */ #define GPU_DEBUG() #define gpuErrchk(ans) ans #endif /** prefetches into L1 cache */ __device__ inline void prefetch_gl1(const void *p) { #if __CUDA_ARCH__ <= 300 asm("prefetch.global.L1 [%0];": :"l"(p)); #endif } __device__ inline void prefetch_ll1(const void *p) { #if __CUDA_ARCH__ <= 300 asm("prefetch.local.L1 [%0];": :"l"(p)); #endif } /** prefetches into L2 cache */ __device__ inline void prefetch_gl2(const void *p) { #if __CUDA_ARCH__ <= 300 asm("prefetch.global.L2 [%0];": :"l"(p)); #endif } __device__ inline void prefetch_ll2(const void *p) { #if __CUDA_ARCH__ <= 300 asm("prefetch.local.L2 [%0];": :"l"(p)); #endif } __device__ void update_rconst(const double * __restrict__ var, const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, double * __restrict__ rconst, const double * __restrict__ temp_gpu, const double * __restrict__ press_gpu, const double * __restrict__ cair_gpu, const int VL_GLO); /* This runs on CPU */ double machine_eps_flt() { double machEps = 1.0f; do { machEps /= 2.0f; // If next epsilon yields 1, then break, because current // epsilon is the machine epsilon. } while ((double)(1.0 + (machEps/2.0)) != 1.0); return machEps; } /* This runs on GPU */ __device__ double machine_eps_flt_cuda() { typedef union { long i64; double f64; } flt_64; flt_64 s; s.f64 = 1.; s.i64++; return (s.f64 - 1.); } __device__ static double alpha_AN(const int n, const int ro2type, const double temp, const double cair){ double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0; double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a; /* IF (ro2type = 1) THEN m = 0.4 ! primary RO2 ELSE IF (ro2type = 2) THEN m = 1. ! secondary RO2 ELSE IF (ro2type = 3) THEN m = 0.3 ! tertiary RO2 ELSE m = 1. */ double m = 1.; Y0_298K = alpha*exp(beta*n); Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0)); Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf)); zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2)); k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta); alpha_a = k_ratio/(1+ k_ratio) *m; return alpha_a; } __device__ static double alpha_AN(const int n, const int ro2type, const int bcarb, const int gcarb, const int abic, const double temp, const double cair){ double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0; double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a; double bcf=1., gcf=1., abf=1.; double m = 1.; //According to Teng, ref3189 if (bcarb == 1) { bcf = 0.19; }// derived from Praske, ref3190: alpha_AN = 0.03 for the secondary HMKO2 relative to alpha_AN for 6C RO2 (0.16) if (gcarb == 1) {gcf = 0.44; }// derived from Praske, ref3190: alpha_AN = 0.07 for the primary HMKO2 relative to alpha_AN for 6C RO2 (0.16) if (abic == 1) { abf = 0.24; }// derived from the ratio of AN- yield for toluene from Elrod et al. (ref3180), 5.5 0x1.9206e69676542p+ 229t & // 200 torr, and this SAR for linear alkyl RO2 with 9 heavy atoms, 23.3% Y0_298K = alpha*exp(beta*n); Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0)); Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf)); zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2)); k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta); alpha_a = k_ratio/(1+ k_ratio) *m*bcf*gcf*abf; return alpha_a; } __device__ static double k_RO2_HO2(const double temp, const int nC){ return 2.91e-13*exp(1300./temp)*(1.-exp(-0.245*nC)); // ref1630 } __device__ double ros_ErrorNorm(double * __restrict__ var, double * __restrict__ varNew, double * __restrict__ varErr, const double * __restrict__ absTol, const double * __restrict__ relTol, const int vectorTol ) { double err, scale, varMax; err = ZERO; if (vectorTol){ for (int i=0;i<NVAR - 16;i+=16){ prefetch_ll1(&varErr[i]); prefetch_ll1(&absTol[i]); prefetch_ll1(&relTol[i]); prefetch_ll1(&var[i]); prefetch_ll1(&varNew[i]); } for (int i=0; i<NVAR; i++) { varMax = fmax(fabs(var[i]),fabs(varNew[i])); scale = absTol[i]+ relTol[i]*varMax; err += pow((double)varErr[i]/scale,2.0); } err = sqrt((double) err/NVAR); }else{ for (int i=0;i<NVAR - 16;i+=16){ prefetch_ll1(&varErr[i]); prefetch_ll1(&var[i]); prefetch_ll1(&varNew[i]); } for (int i=0; i<NVAR; i++) { varMax = fmax(fabs(var[i]),fabs(varNew[i])); scale = absTol[0]+ relTol[0]*varMax; err += pow((double)varErr[i]/scale,2.0); } err = sqrt((double) err/NVAR); } return err; } =#=#=#=#=#=#=#=#=#=#=kppSolve=#=#=#=#=#=#=#=#=#=#= __device__ void ros_Solve(double * __restrict__ Ghimj, double * __restrict__ K, int &Nsol, const int istage, const int ros_S) { #pragma unroll 4 for (int i=0;i<LU_NONZERO-16;i+=16){ prefetch_ll1(&Ghimj[i]); } kppSolve(Ghimj, K, istage, ros_S); Nsol++; } =#=#=#=#=#=#=#=#=#=#=kppDecomp=#=#=#=#=#=#=#=#=#=#= __device__ void ros_Decomp(double * __restrict__ Ghimj, int &Ndec, int VL_GLO) { kppDecomp(Ghimj, VL_GLO); Ndec++; } =#=#=#=#=#=#=#=#=#=#=ros_PrepareMatrix=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=Jac_sp=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=Fun=#=#=#=#=#=#=#=#=#=#= __device__ void ros_FunTimeDerivative(const double T, double roundoff, double * __restrict__ var, const double * __restrict__ fix, const double * __restrict__ rconst, double *dFdT, double *Fcn0, int &Nfun, const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; const double DELTAMIN = 1.0E-6; double delta,one_over_delta; delta = sqrt(roundoff)*fmax(DELTAMIN,fabs(T)); one_over_delta = 1.0/delta; Fun(var, fix, rconst, dFdT, Nfun, VL_GLO); for (int i=0; i < NVAR; i++){ dFdT(index,i) = (dFdT(index,i) - Fcn0(index,i)) * one_over_delta; } } __device__ static int ros_Integrator(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T, // Rosenbrock method coefficients const int ros_S, const double * __restrict__ ros_M, const double * __restrict__ ros_E, const double * __restrict__ ros_A, const double * __restrict__ ros_C, const double * __restrict__ ros_Alpha, const double * __restrict__ ros_Gamma, const double ros_ELO, const int * ros_NewF, // Integration parameters const int autonomous, const int vectorTol, const int Max_no_steps, const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, // Status parameters int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng, // cuda global mem buffers const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0, double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr, // for update_rconst const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // VL_GLO const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; double H, Hnew, HC, HG, Fac; // Tau - not used double Err; //*varErr; int direction; int rejectLastH, rejectMoreH; const double DELTAMIN = 1.0E-5; // ~~~> Initial preparations T = Tstart; Hexit = 0.0; H = fmin(Hstart,Hmax); if (fabs(H) <= 10.0*roundoff) H = DELTAMIN; if (Tend >= Tstart) { direction = + 1; } else { direction = - 1; } rejectLastH=0; rejectMoreH=0; // ~~~> Time loop begins below // TimeLoop: while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO)) { if (Nstp > Max_no_steps) // Too many steps return -6; // Step size too small if (H <= roundoff){ // Step size too small //if (((T+ 0.1*H) == T) || (H <= roundoff)) { return -7; } // ~~~> Limit H if necessary to avoid going beyond Tend Hexit = H; H = fmin(H,fabs(Tend-T)); // ~~~> Compute the function at current time Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write // ~~~> Compute the function derivative with respect to T if (!autonomous) ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read // ~~~> Compute the Jacobian at current time Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ // ~~~> Repeat step calculation until current step accepted // UntilAccepted: while(1) { ros_PrepareMatrix(H, direction, ros_Gamma[0], jac0, Ghimj, Nsng, Ndec, VL_GLO); // ~~~> Compute the stages // Stage: for (int istage=0; istage < ros_S; istage++) { // For the 1st istage the function has been computed previously if (istage == 0) { for (int i=0; i<NVAR; i++){ varNew(index,i) = Fcn0(index,i); // FCN0 Read } } else if(ros_NewF[istage]) { for (int i=0; i<NVAR; i++){ varNew(index,i) = var(index,i); } for (int j=0; j < (istage); j++){ for (int i=0; i<NVAR; i++){ varNew(index,i) = K(index,j,i)*ros_A[(istage)*(istage-1)/2 + j] + varNew(index,i); } } Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap } for (int i=0; i<NVAR; i++) K(index,istage,i) = varNew(index,i); for (int j=0; j<(istage); j++) { HC = ros_C[(istage)*(istage-1)/2 + j]/(direction*H); for (int i=0; i<NVAR; i++){ double tmp = K(index,j,i); K(index,istage,i) += tmp*HC; } } if ((!autonomous) && (ros_Gamma[istage] )) { HG = direction*H*ros_Gamma[istage]; for (int i=0; i<NVAR; i++){ K(index,istage,i) += dFdT(index,i)*HG; } } // R ,RW, RW, R, R ros_Solve(Ghimj, K, Nsol, istage, ros_S); } // Stage // ~~~> Compute the new solution for (int i=0; i<NVAR; i++){ double tmpNew = var(index,i); /// VAR READ double tmpErr = ZERO; for (int j=0; j<ros_S; j++){ double tmp = K(index,j,i); #ifdef DEBUG if (isnan(tmp)){ printf("Solver detected NAN!"); tmp = 0; } #endif tmpNew += tmp*ros_M[j]; tmpErr += tmp*ros_E[j]; } varNew(index,i) = tmpNew; // varNew is killed varErr(index,i) = tmpErr; } Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ // ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/ros_ELO))); Hnew = H*Fac; // ~~~> Check the error magnitude and adjust step size Nstp = Nstp+ 1; if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step { Nacc = Nacc + 1; for (int j=0; j<NVAR ; j++) var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read T = T + direction*H; Hnew = fmax(Hmin,fmin(Hnew,Hmax)); if (rejectLastH) // No step size increase after a rejected step Hnew = fmin(Hnew,H); rejectLastH = 0; rejectMoreH = 0; H = Hnew; break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED } else // ~~~> Reject step { if (rejectMoreH) Hnew = H*FacRej; rejectMoreH = rejectLastH; rejectLastH = 1; H = Hnew; if (Nacc >= 1) Nrej += 1; } // Err <= 1 } // UntilAccepted } // TimeLoop // ~~~> Succesful exit return 0; // ~~~> The integration was successful } typedef struct { double ros_A[15]; double ros_C[15]; int ros_NewF[8]; double ros_M[6]; double ros_E[6]; double ros_Alpha[6]; double ros_Gamma[6]; double ros_ELO; int ros_S; } ros_t; /* * Lookup tables for different ROS for branch elimination. It is much faster in GPU. */ __device__ __constant__ ros_t ros[5] = { { {.58578643762690495119831127579030,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */ {-1.17157287525380990239662255158060,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */ {1,1,0,0,0,0,0,0}, /* ros_NewF */ {.87867965644035742679746691368545,.29289321881345247559915563789515,0,0,0,0}, /* ros_M */ {.29289321881345247559915563789515,.29289321881345247559915563789515,0,0,0,0}, /* ros_E */ {0,1.0,0,0,0,0}, /* ros_Alpha */ {1.70710678118654752440084436210485,-1.70710678118654752440084436210485,0,0,0,0}, /* ros_Gamma */ 2.0, /* ros_ELO */ 2, /* ros_S*/ }, /* Ros2 */ { {1.0,1.0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */ {-0.10156171083877702091975600115545E+01, 0.40759956452537699824805835358067E+01,0.92076794298330791242156818474003E+01,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */ {1,1,0,0,0,0,0,0}, /* ros_NewF */ {0.1E+01,0.61697947043828245592553615689730E+01,-0.42772256543218573326238373806514E+00,0,0,0}, /* ros_M */ {0.5E+00,- 0.29079558716805469821718236208017E+01,0.22354069897811569627360909276199E+00,0,0,0}, /* ros_E */ {0.0E+00,0.43586652150845899941601945119356E+00,0.43586652150845899941601945119356E+00,0,0,0}, /* ros_Alpha */ {0.43586652150845899941601945119356E+00,0.24291996454816804366592249683314E+00,0.21851380027664058511513169485832E+01,0,0,0}, /* ros_Gamma */ 3.0, /* ros_ELO */ 3 }, /* Ros3 */ { {0.2000000000000000E+01, 0.1867943637803922E+01, 0.2344449711399156E+00, 0.1867943637803922E+01, 0.2344449711399156E+00,0,0,0,0,0,0,0,0,0,0}, /* ros_A */ {-0.7137615036412310E+01,0.2580708087951457E+01,0.6515950076447975E+00, - 0.2137148994382534E+01, - 0.3214669691237626E+00, - 0.6949742501781779E+00 ,0,0,0,0,0,0,0,0,0}, /* ros_C */ {1,1,1,0,0,0,0,0}, /* ros_NewF */ {0.2255570073418735E+01, 0.2870493262186792E+00, 0.4353179431840180E+00, 0.1093502252409163E+01,0,0}, /* ros_M */ { -0.2815431932141155E+00, -0.7276199124938920E-01, -0.1082196201495311E+00, -0.1093502252409163E+01, 0, 0}, /* ros_E */ {0.0, 0.1145640000000000E+01, 0.6552168638155900E+00, 0.6552168638155900E+00,0,0}, /* ros_Alpha */ { 0.5728200000000000E+00, -0.1769193891319233E+01, 0.7592633437920482E+00, -0.1049021087100450E+00,0,0}, /* ros_Gamma */ 4.0, /* ros_ELO */ 4 }, /* Ros4 */ { { 0.0E+00, 2.0E+00, 0.0E+00, 2.0E+00, 0.0E+00, 1.0E+00, 0,0,0,0,0,0,0,0,0}, /* ros_A */ { 4.0E+00, 1.0E+00, - 1.0E+00, 1.0E+00, - 1.0E+00, - 2.66666666666666666666666666666666, 0,0,0,0,0,0,0,0,0}, /* ros_C */ {1,0,1,1,0,0,0,0}, /* ros_NewF */ {2.0,0,1.0,1.0,0,0}, /* ros_M */ {0,0,0,1.0,0,0}, /* ros_E */ {0,0,1.0,1.0,0,0}, /* ros_Alpha */ {0.5,1.5,0,0,0,0}, /* ros_Gamma */ 3.0, /* ros_ELO */ 4 }, /* Rodas3 */ { { 0.1544000000000000E+01, 0.9466785280815826E+00, 0.2557011698983284E+00, 0.3314825187068521E+01, 0.2896124015972201E+01, 0.9986419139977817E+00, 0.1221224509226641E+01, 0.6019134481288629E+01, 0.1253708332932087E+02, -0.6878860361058950E+00, 0.1221224509226641E+01, 0.6019134481288629E+01, 0.1253708332932087E+02, -0.6878860361058950E+00, 1.0E+00}, /* ros_A */ { -0.5668800000000000E+01, -0.2430093356833875E+01, -0.2063599157091915E+00, -0.1073529058151375E+00, -0.9594562251023355E+01, -0.2047028614809616E+02, 0.7496443313967647E+01, -0.1024680431464352E+02, -0.3399990352819905E+02, 0.1170890893206160E+02, 0.8083246795921522E+01, -0.7981132988064893E+01, -0.3152159432874371E+02, 0.1631930543123136E+02, -0.6058818238834054E+01}, /* ros_C */ {1,1,1,1,1,1,0,0}, /* ros_NewF */ {0.1221224509226641E+01,0.6019134481288629E+01,0.1253708332932087E+02,- 0.6878860361058950E+00,1,1}, /* ros_M */ {0,0,0,0,0,1.0}, /* ros_E */ {0.000, 0.386, 0.210, 0.630, 1.000, 1.000}, /* ros_Alpha */ {0.2500000000000000E+00, -0.1043000000000000E+00, 0.1035000000000000E+00, 0.3620000000000023E-01, 0, 0}, /* ros_Gamma */ 4.0, /* ros_ELO */ 6 } /* Rodas4 */ }; //__device__ double rconst_local[MAX_VL_GLO*NREACT]; /* Initialize rconst local */ //__device__ double * rconst_local; __device__ double k_3rd(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc) /* * * temp temperature [K] * cair air concentration [molecules/cm3] * k0_300K low pressure limit at 300 K * n exponent for low pressure limit * kinf_300K high pressure limit at 300 K * m exponent for high pressure limit * fc broadening factor (usually fc=0.6) * */ { double zt_help, k0_T, kinf_T, k_ratio, k_3rd_r; zt_help = 300.0/temp; k0_T = k0_300K *pow(zt_help,n) *cair; kinf_T = kinf_300K *pow(zt_help,m); k_ratio = k0_T/kinf_T; k_3rd_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio),2))); return k_3rd_r; } __device__ double k_3rd_iupac(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc) /* * * temp temperature [K] * cair air concentration [molecules/cm3] * k0_300K low pressure limit at 300 K * n exponent for low pressure limit * kinf_300K high pressure limit at 300 K * m exponent for high pressure limit * fc broadening factor (e.g. 0.45 or 0.6...) * nu N * */ { double zt_help, k0_T, kinf_T, k_ratio, nu, k_3rd_iupac_r; zt_help = 300.0/temp; k0_T = k0_300K *pow(zt_help,n) *cair; kinf_T = kinf_300K *pow(zt_help,m); k_ratio = k0_T/kinf_T; nu = 0.75- 1.27*log10(fc); k_3rd_iupac_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio)/nu,2))); return k_3rd_iupac_r; } double * temp_gpu; double * press_gpu; double * cair_gpu; =#=#=#=#=#=#=#=#=#=#=update_rconst=#=#=#=#=#=#=#=#=#=#= __global__ void Rosenbrock(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus, // values calculated from icntrl and rcntrl at host const int autonomous, const int vectorTol, const int UplimTol, const int method, const int Max_no_steps, double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst, const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff, // cuda global mem buffers const double * __restrict__ absTol, const double * __restrict__ relTol, // for update_rconst const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // global input const double * __restrict__ temp_gpu, const double * __restrict__ press_gpu, const double * __restrict__ cair_gpu, // extra const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; /* * In theory someone can aggregate accesses together, * however due to algorithm, threads access * different parts of memory, making it harder to * optimize accesses. * */ double *Ghimj = &d_Ghimj[index*LU_NONZERO]; double *K = &d_K[index*NVAR*6]; double *varNew = &d_varNew[index*NVAR]; double *Fcn0 = &d_Fcn0[index*NVAR]; double *dFdT = &d_dFdT[index*NVAR]; double *jac0 = &d_jac0[index*LU_NONZERO]; double *varErr = &d_varErr[index*NVAR]; double *var = &d_var[index*NSPEC]; double *fix = &d_fix[index*NFIX]; double *rconst = &d_rconst[index*NREACT]; if (index < VL_GLO) { int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng; double Texit, Hexit; Nfun = 0; Njac = 0; Nstp = 0; Nacc = 0; Nrej = 0; Ndec = 0; Nsol = 0; Nsng = 0; /* FIXME: add check for method */ const double *ros_A = &ros[method-1].ros_A[0]; const double *ros_C = &ros[method-1].ros_C[0]; const double *ros_M = &ros[method-1].ros_M[0]; const double *ros_E = &ros[method-1].ros_E[0]; const double *ros_Alpha = &ros[method-1].ros_Alpha[0]; const double *ros_Gamma = &ros[method-1].ros_Gamma[0]; const int *ros_NewF = &ros[method-1].ros_NewF[0]; const int ros_S = ros[method-1].ros_S; const double ros_ELO = ros[method-1].ros_ELO; /* Copy data from global memory to temporary array */ /* * Optimization note: if we ever have enough constant * memory, we could use it for storing the data. * In current architectures if we use constant memory * only a few threads will be able to run on the fly. * */ for (int i=0; i<NSPEC; i++) var(index,i) = conc(index,i); for (int i=0; i<NFIX; i++) fix(index,i) = conc(index,NVAR+i); update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO); ros_Integrator(var, fix, Tstart, Tend, Texit, // Rosenbrock method coefficients ros_S, ros_M, ros_E, ros_A, ros_C, ros_Alpha, ros_Gamma, ros_ELO, ros_NewF, // Integration parameters autonomous, vectorTol, Max_no_steps, roundoff, Hmin, Hmax, Hstart, Hexit, FacMin, FacMax, FacRej, FacSafe, // Status parameters Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng, // cuda global mem buffers rconst, absTol, relTol, varNew, Fcn0, K, dFdT, jac0, Ghimj, varErr, // For update rconst khet_st, khet_tr, jx, VL_GLO ); for (int i=0; i<NVAR; i++) conc(index,i) = var(index,i); /* Statistics */ istatus(index,ifun) = Nfun; istatus(index,ijac) = Njac; istatus(index,istp) = Nstp; istatus(index,iacc) = Nacc; istatus(index,irej) = Nrej; istatus(index,idec) = Ndec; istatus(index,isol) = Nsol; istatus(index,isng) = Nsng; // Last T and H rstatus(index,itexit) = Texit; rstatus(index,ihexit) = Hexit; } } =#=#=#=#=#=#=#=#=#=#=special_ros=#=#=#=#=#=#=#=#=#=#= // no int8 in CUDA :( __global__ void reduce_istatus_1(int *istatus, int4 *tmp_out_1, int4 *tmp_out_2, int VL_GLO, int *xNacc, int *xNrej) { int index = blockIdx.x*blockDim.x+threadIdx.x; int idx_1 = threadIdx.x; int global_size = blockDim.x*gridDim.x; int foo; //no int8 in CUDA :( int4 accumulator_1 = make_int4(0,0,0,0); int4 accumulator_2 = make_int4(0,0,0,0); while (index < VL_GLO) { accumulator_1.x += istatus(index,0); accumulator_1.y += istatus(index,1); accumulator_1.z += istatus(index,2); //some dirty work on the side... foo = istatus(index,3); xNacc[index] = foo; accumulator_1.w += foo; foo = istatus(index,4); xNrej[index] = foo; accumulator_2.x += foo; accumulator_2.y += istatus(index,5); accumulator_2.z += istatus(index,6); accumulator_2.w += istatus(index,7); index += global_size; } //no int8 in CUDA :( __shared__ int4 buffer_1[REDUCTION_SIZE_1]; __shared__ int4 buffer_2[REDUCTION_SIZE_1]; buffer_1[idx_1] = accumulator_1; buffer_2[idx_1] = accumulator_2; __syncthreads(); int idx_2, active_threads = blockDim.x; int4 tmp_1, tmp_2; while (active_threads != 1) { active_threads /= 2; if (idx_1 < active_threads) { idx_2 = idx_1+active_threads; tmp_1 = buffer_1[idx_1]; tmp_2 = buffer_1[idx_2]; tmp_1.x += tmp_2.x; tmp_1.y += tmp_2.y; tmp_1.z += tmp_2.z; tmp_1.w += tmp_2.w; buffer_1[idx_1] = tmp_1; tmp_1 = buffer_2[idx_1]; tmp_2 = buffer_2[idx_2]; tmp_1.x += tmp_2.x; tmp_1.y += tmp_2.y; tmp_1.z += tmp_2.z; tmp_1.w += tmp_2.w; buffer_2[idx_1] = tmp_1; } __syncthreads(); } if (idx_1 == 0) { tmp_out_1[blockIdx.x] = buffer_1[0]; tmp_out_2[blockIdx.x] = buffer_2[0]; } } __global__ void reduce_istatus_2(int4 *tmp_out_1, int4 *tmp_out_2, int *out) { int idx_1 = threadIdx.x; //no int8 in CUDA :( __shared__ int4 buffer_1[REDUCTION_SIZE_2]; __shared__ int4 buffer_2[REDUCTION_SIZE_2]; buffer_1[idx_1] = tmp_out_1[idx_1]; buffer_2[idx_1] = tmp_out_2[idx_1]; __syncthreads(); int idx_2, active_threads = blockDim.x; int4 tmp_1, tmp_2; while (active_threads != 1) { active_threads /= 2; if (idx_1 < active_threads) { idx_2 = idx_1+active_threads; tmp_1 = buffer_1[idx_1]; tmp_2 = buffer_1[idx_2]; tmp_1.x += tmp_2.x; tmp_1.y += tmp_2.y; tmp_1.z += tmp_2.z; tmp_1.w += tmp_2.w; buffer_1[idx_1] = tmp_1; tmp_1 = buffer_2[idx_1]; tmp_2 = buffer_2[idx_2]; tmp_1.x += tmp_2.x; tmp_1.y += tmp_2.y; tmp_1.z += tmp_2.z; tmp_1.w += tmp_2.w; buffer_2[idx_1] = tmp_1; } __syncthreads(); } if (idx_1 == 0) { tmp_1 = buffer_1[0]; tmp_2 = buffer_2[0]; out[0] = tmp_1.x; out[1] = tmp_1.y; out[2] = tmp_1.z; out[3] = tmp_1.w; out[4] = tmp_2.x; out[5] = tmp_2.y; out[6] = tmp_2.z; out[7] = tmp_2.w; } } /* Assuming different processes */ enum { TRUE=1, FALSE=0 } ; double *d_conc, *d_temp, *d_press, *d_cair, *d_khet_st, *d_khet_tr, *d_jx, *d_jac0, *d_Ghimj, *d_varNew, *d_K, *d_varErr, *d_dFdT, *d_Fcn0, *d_var, *d_fix, *d_rconst; int initialized = FALSE; /* Device pointers pointing to GPU */ double *d_rstatus, *d_absTol, *d_relTol; int *d_istatus, *d_istatus_rd, *d_xNacc, *d_xNrej; int4 *d_tmp_out_1, *d_tmp_out_2; /* Allocate arrays on device for Rosenbrock */ __host__ void init_first_time(int pe, int VL_GLO, int size_khet_st, int size_khet_tr, int size_jx ){ /* Select the proper GPU CARD */ int deviceCount, device; gpuErrchk( hipGetDeviceCount(&deviceCount) ); device = pe % deviceCount; gpuErrchk( hipSetDevice(device) ); printf("PE[%d]: selected %d of total %d\n",pe,device,deviceCount); hipDeviceSetCacheConfig(hipFuncCachePreferL1); gpuErrchk( hipMalloc ((void **) &d_conc , sizeof(double)*VL_GLO*(NSPEC)) ); gpuErrchk( hipMalloc ((void **) &d_khet_st, sizeof(double)*VL_GLO*size_khet_st) ); gpuErrchk( hipMalloc ((void **) &d_khet_tr, sizeof(double)*VL_GLO*size_khet_tr) ); gpuErrchk( hipMalloc ((void **) &d_jx , sizeof(double)*VL_GLO*size_jx) ); gpuErrchk( hipMalloc ((void **) &d_rstatus , sizeof(double)*VL_GLO*2) ); gpuErrchk( hipMalloc ((void **) &d_istatus , sizeof(int)*VL_GLO*8) ); gpuErrchk( hipMalloc ((void **) &d_absTol , sizeof(double)*NVAR) ); gpuErrchk( hipMalloc ((void **) &d_relTol , sizeof(double)*NVAR) ); /* Allocate input arrays */ gpuErrchk( hipMalloc ((void **) &temp_gpu , sizeof(double)*VL_GLO) ); gpuErrchk( hipMalloc ((void **) &press_gpu , sizeof(double)*VL_GLO) ); gpuErrchk( hipMalloc ((void **) &cair_gpu , sizeof(double)*VL_GLO) ); /* Allocate arrays on device for reducing metrics */ gpuErrchk( hipMalloc ((void **) &d_istatus_rd , sizeof(int)*8)); gpuErrchk( hipMalloc ((void **) &d_tmp_out_1 , sizeof(int4)*64)); gpuErrchk( hipMalloc ((void **) &d_tmp_out_2 , sizeof(int4)*64)); gpuErrchk( hipMalloc ((void **) &d_xNacc , sizeof(int)*VL_GLO)); gpuErrchk( hipMalloc ((void **) &d_xNrej , sizeof(int)*VL_GLO)); /* Allocate arrays for solvers on device global memory to reduce the stack usage */ gpuErrchk( hipMalloc ((void **) &d_jac0, sizeof(double)*VL_GLO*LU_NONZERO) ); gpuErrchk( hipMalloc ((void **) &d_Ghimj, sizeof(double)*VL_GLO*LU_NONZERO) ); gpuErrchk( hipMalloc ((void **) &d_varNew, sizeof(double)*VL_GLO*NVAR) ); gpuErrchk( hipMalloc ((void **) &d_Fcn0, sizeof(double)*VL_GLO*NVAR) ); gpuErrchk( hipMalloc ((void **) &d_dFdT, sizeof(double)*VL_GLO*NVAR) ); gpuErrchk( hipMalloc ((void **) &d_K, sizeof(double)*VL_GLO*NVAR*6) ); // TODO: Change size according to solver steps gpuErrchk( hipMalloc ((void **) &d_varErr, sizeof(double)*VL_GLO*NVAR) ); gpuErrchk( hipMalloc ((void **) &d_var, sizeof(double)*VL_GLO*NSPEC) ); gpuErrchk( hipMalloc ((void **) &d_fix, sizeof(double)*VL_GLO*NFIX) ); gpuErrchk( hipMalloc ((void **) &d_rconst, sizeof(double)*VL_GLO*NREACT) ); initialized = TRUE; } /* * TODO: We should call it in some point.. */ extern "C" void finalize_cuda(){ /* Free memory on the device */ gpuErrchk( hipFree(d_conc ) ); gpuErrchk( hipFree(d_temp ) ); gpuErrchk( hipFree(d_press ) ); gpuErrchk( hipFree(d_cair ) ); gpuErrchk( hipFree(d_khet_st ) ); gpuErrchk( hipFree(d_khet_tr ) ); gpuErrchk( hipFree(d_jx ) ); gpuErrchk( hipFree(d_rstatus ) ); gpuErrchk( hipFree(d_istatus ) ); gpuErrchk( hipFree(d_absTol ) ); gpuErrchk( hipFree(d_relTol ) ); gpuErrchk( hipFree(d_istatus_rd ) ); gpuErrchk( hipFree(d_tmp_out_1 ) ); gpuErrchk( hipFree(d_tmp_out_2 ) ); gpuErrchk( hipFree(d_xNacc ) ); gpuErrchk( hipFree(d_xNrej ) ); gpuErrchk( hipFree(temp_gpu ) ); gpuErrchk( hipFree(press_gpu ) ); gpuErrchk( hipFree(cair_gpu ) ); gpuErrchk( hipFree(d_jac0 ) ); gpuErrchk( hipFree(d_Ghimj ) ); gpuErrchk( hipFree(d_varNew ) ); gpuErrchk( hipFree(d_Fcn0 ) ); gpuErrchk( hipFree(d_dFdT ) ); gpuErrchk( hipFree(d_K ) ); gpuErrchk( hipFree(d_varErr ) ); gpuErrchk( hipFree(d_var ) ); gpuErrchk( hipFree(d_fix ) ); gpuErrchk( hipFree(d_rconst ) ); } extern "C" void kpp_integrate_cuda_( int *pe_p, int *sizes, double *time_step_len_p, double *conc, double *temp, double *press, double *cair, double *khet_st, double *khet_tr, double *jx, double *absTol, double *relTol, int *ierr, int *istatus, int *xNacc, int *xNrej, double *rndoff, int *icntrl=NULL, double *rcntrl=NULL ) /* // TODO * Parameters: * pe_p: scalar int - processor element * VL_GLO: scalar int - size of the system * NSPEC: scalar int - number of species * NREACT: scalar int - number of reactions * NVAR: scalar int - * * Input data: * conc: 2D array of doubles - size: vl_glo x number of species * temp: 1D array of doubles - size: vl_glo * press: 1D array of doubles - size: vl_glo * cair: 1D array of doubles - size: vl_glo * khet_st: 2D array of doubles - size: vl_glo x number of species * khet_tr: 2D array of doubles - size: vl_glo x number of species * jx: 2D array of doubles - size: vl_glo x number of species * absTol: 1D array of doubles - size: number of species * relTol: 1D array of doubles - size: number of species * Control: * icntrl: 1D array of ints - size: 4 * sizes: 1D array of ints - size: 4 * rcntrl: 1D array of doubles - size: 7 * * */ { const double DELTAMIN = 1.0E-5; int VL_GLO = sizes[0]; int size_khet_st = sizes[1]; int size_khet_tr = sizes[2]; int size_jx = sizes[3]; double roundoff = *rndoff; double Tstart,Tend; Tstart = ZERO; Tend = *time_step_len_p; int pe = *pe_p; // variables from rcntrl and icntrl int autonomous, vectorTol, UplimTol, method, Max_no_steps; double Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe; //int rcntrl_bool = 0, icntrl_bool=0; if (rcntrl == NULL) { rcntrl = new double[7]; for (int i=0; i < 7; i++) rcntrl[i] = 0.0; } if (icntrl == NULL) { icntrl = new int[4]; for (int i=0; i < 4; i++) icntrl[i] = 0; } /* Allocate arrays on device for update_rconst kernel*/ if (initialized == FALSE) init_first_time(pe, VL_GLO, size_khet_st, size_khet_tr, size_jx); /* Copy data from host memory to device memory */ gpuErrchk( hipMemcpy(d_conc , conc , sizeof(double)*VL_GLO*NSPEC , hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(temp_gpu , temp , sizeof(double)*VL_GLO , hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(press_gpu , press , sizeof(double)*VL_GLO , hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(cair_gpu , cair , sizeof(double)*VL_GLO , hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(d_khet_st, khet_st , sizeof(double)*VL_GLO*size_khet_st , hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(d_khet_tr, khet_tr , sizeof(double)*VL_GLO*size_khet_tr , hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(d_jx , jx , sizeof(double)*VL_GLO*size_jx , hipMemcpyHostToDevice) ); /* Copy arrays from host memory to device memory for Rosenbrock */ gpuErrchk( hipMemcpy(d_absTol, absTol, sizeof(double)*NVAR, hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(d_relTol, relTol, sizeof(double)*NVAR, hipMemcpyHostToDevice) ); /* Compute execution configuration for update_rconst */ int block_size, grid_size; block_size = BLOCKSIZE; grid_size = (VL_GLO + block_size - 1)/block_size; dim3 dimBlock(block_size); dim3 dimGrid(grid_size); /* Execute the kernel */ //update_rconst<<<dimGrid,dimBlock>>>(d_conc, d_khet_st, d_khet_tr, d_jx, VL_GLO); GPU_DEBUG(); // *------------------------------------------------------* // | Default values vs input settings (icntrl, rcntrl) | // *------------------------------------------------------* int ierr_tmp=0; { // autonomous or time dependent ODE. Default is time dependent. autonomous = !(icntrl[0] == 0); // For Scalar tolerances (icntrl[1].NE.0) the code uses absTol(0) and relTol(0) // For Vector tolerances (icntrl[1] == 0) the code uses absTol(0:NVAR) and relTol(0:NVAR) if (icntrl[1] == 0) { vectorTol = 1; //bool UplimTol = NVAR; } else { vectorTol = 0; UplimTol = 1; } // The particular Rosenbrock method chosen if (icntrl[2] == 0) { method = 4; } else if ((icntrl[2] >= 1) && (icntrl[2] <= 5)) { method = icntrl[2]; } else { printf("User-selected Rosenbrock method: icntrl[2]=%d\n",method); ierr_tmp = -2; } // The maximum number of steps admitted if (icntrl[3] == 0) { Max_no_steps = 100000; } else if (icntrl[3] > 0) { Max_no_steps=icntrl[3]; } else { printf("User-selected max no. of steps: icntrl[3]=%d\n",icntrl[3]); ierr_tmp = -1; } // Unit roundoff (1+ roundoff>1) roundoff = machine_eps_flt(); // Lower bound on the step size: (positive value) if (rcntrl[0] == ZERO) { Hmin = ZERO; } else if (rcntrl[0] > ZERO) { Hmin = rcntrl[0]; } else { printf("User-selected Hmin: rcntrl[0]=%f\n",rcntrl[0]); ierr_tmp = -3; } // Upper bound on the step size: (positive value) if (rcntrl[1] == ZERO) { Hmax = fabs(Tend-Tstart); } else if (rcntrl[1] > ZERO) { Hmax = fmin(fabs(rcntrl[1]),fabs(Tend-Tstart)); } else { printf("User-selected Hmax: rcntrl[1]=%f\n",rcntrl[1]); ierr_tmp = -3; } // Starting step size: (positive value) if (rcntrl[2] == ZERO) { Hstart = fmax(Hmin,DELTAMIN); } else if (rcntrl[2] > ZERO) { Hstart = fmin(fabs(rcntrl[2]),fabs(Tend-Tstart)); } else { printf("User-selected Hstart: rcntrl[2]=%f\n",rcntrl[2]); ierr_tmp = -3; } // Step size can be changed s.t. FacMin < Hnew/Hexit < FacMax if (rcntrl[3] == ZERO) { FacMin = 0.2; } else if (rcntrl[3] > ZERO) { FacMin = rcntrl[3]; } else { printf("User-selected FacMin: rcntrl[3]=%f\n",rcntrl[3]); ierr_tmp = -4; } if (rcntrl[4] == ZERO) { FacMax = 6.0; } else if (rcntrl[4] > ZERO) { FacMax = rcntrl[4]; } else { printf("User-selected FacMax: rcntrl[4]=%f\n",rcntrl[4]); ierr_tmp = -4; } // FacRej: Factor to decrease step after 2 succesive rejections if (rcntrl[5] == ZERO) { FacRej = 0.1; } else if (rcntrl[5] > ZERO) { FacRej = rcntrl[5]; } else { printf("User-selected FacRej: rcntrl[5]=%f\n",rcntrl[5]); ierr_tmp = -4; } // FacSafe: Safety Factor in the computation of new step size if (rcntrl[6] == ZERO) { FacSafe = 0.9; } else if (rcntrl[6] > ZERO) { FacSafe = rcntrl[6]; } else { printf("User-selected FacSafe: rcntrl[6]=%f\n",rcntrl[6]); ierr_tmp = -4; } // Check if tolerances are reasonable for (int i=0; i < UplimTol; i++) { if ((absTol[i] <= ZERO) || (relTol[i] <= 10.0*roundoff) || (relTol[i] >= 1.0)) { printf("CCC absTol(%d) = %f \n",i,absTol[i]); printf("CCC relTol(%d) = %f \n",i,relTol[i]); ierr_tmp = -5; } } } =#=#=#=#=#=#=#=#=#=#=call_kernel=#=#=#=#=#=#=#=#=#=#= GPU_DEBUG(); hipLaunchKernelGGL(( reduce_istatus_1), dim3(REDUCTION_SIZE_2),dim3(REDUCTION_SIZE_1), 0, 0, d_istatus, d_tmp_out_1, d_tmp_out_2, VL_GLO, d_xNacc, d_xNrej); GPU_DEBUG(); hipLaunchKernelGGL(( reduce_istatus_2), dim3(1),dim3(REDUCTION_SIZE_2), 0, 0, d_tmp_out_1, d_tmp_out_2, d_istatus_rd); GPU_DEBUG(); /* Copy the result back */ gpuErrchk( hipMemcpy( conc , d_conc , sizeof(double)*VL_GLO*NVAR, hipMemcpyDeviceToHost) ); gpuErrchk( hipMemcpy( xNacc , d_xNacc , sizeof(int)*VL_GLO , hipMemcpyDeviceToHost) ); gpuErrchk( hipMemcpy( xNrej , d_xNrej , sizeof(int)*VL_GLO , hipMemcpyDeviceToHost) ); return; }
e0d6e75761e5f8c7a01906c118777eb6d10c3b79.cu
/************************************************************* * * kpp_integrate_cuda_prototype.cu * Prototype file for kpp CUDA kernel * * Copyright 2016 The Cyprus Institute * * Developers: Michail Alvanos - [email protected] * Giannis Ashiotis * Theodoros Christoudias - [email protected] * ********************************************************************/ #include <stdio.h> #include <unistd.h> #include "cuda.h" =#=#=#=#=#=#=#=#=#=#=defines_vars_2=#=#=#=#=#=#=#=#=#=#= #define BLOCKSIZE 64 //#define MAX_VL_GLO 12288 /* elements that will pass in each call */ #define REDUCTION_SIZE_1 64 #define REDUCTION_SIZE_2 32 =#=#=#=#=#=#=#=#=#=#=defines_vars_1=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_1=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_2=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_3=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_4=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=defines_ind_5=#=#=#=#=#=#=#=#=#=#= #define ifun 0 #define ijac 1 #define istp 2 #define iacc 3 #define irej 4 #define idec 5 #define isol 6 #define isng 7 #define itexit 0 #define ihexit 1 #define ZERO 0.0 #define ONE 1.0 #define HALF 0.5 /* * Fortran to C macros * GPU-friendly array deffinition * i:VL_GLO, j:NVAR * */ #define conc(i,j) conc[(j)*VL_GLO+(i)] #define khet_st(i,j) khet_st[(j)*VL_GLO+(i)] #define khet_tr(i,j) khet_tr[(j)*VL_GLO+(i)] #define jx(i,j) jx[j*VL_GLO+i] #define istatus(i,j) istatus[(j)*(VL_GLO)+(i)] #define rstatus(i,j) rstatus[(j)*(VL_GLO)+(i)] #define ROUND128(X) (X + (128 - 1)) & ~(128 - 1) #define rconst(i,j) rconst[(j)] /* Temporary arrays allocated in stack */ #define var(i,j) var[(j)] #define fix(i,j) fix[(j)] #define jcb(i,j) jcb[(j)] #define varDot(i,j) varDot[j] #define varNew(i,j) varNew[(j)] #define Fcn0(i,j) Fcn0[(j)] #define Fcn(i,j) Fcn[(j)] #define Fcn(i,j) Fcn[(j)] #define dFdT(i,j) dFdT[(j)] #define varErr(i,j) varErr[(j)] #define K(i,j,k) K[(j)*(NVAR)+(k)] #define jac0(i,j) jac0[(j)] #define Ghimj(i,j) Ghimj[(j)] /* Enable debug flags for GPU */ #define DEBUG #ifdef DEBUG #define GPU_DEBUG()\ gpuErrchk( cudaPeekAtLastError() ); \ gpuErrchk( cudaDeviceSynchronize() ); #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #else /* If debug flags are disabled */ #define GPU_DEBUG() #define gpuErrchk(ans) ans #endif /** prefetches into L1 cache */ __device__ inline void prefetch_gl1(const void *p) { #if __CUDA_ARCH__ <= 300 asm("prefetch.global.L1 [%0];": :"l"(p)); #endif } __device__ inline void prefetch_ll1(const void *p) { #if __CUDA_ARCH__ <= 300 asm("prefetch.local.L1 [%0];": :"l"(p)); #endif } /** prefetches into L2 cache */ __device__ inline void prefetch_gl2(const void *p) { #if __CUDA_ARCH__ <= 300 asm("prefetch.global.L2 [%0];": :"l"(p)); #endif } __device__ inline void prefetch_ll2(const void *p) { #if __CUDA_ARCH__ <= 300 asm("prefetch.local.L2 [%0];": :"l"(p)); #endif } __device__ void update_rconst(const double * __restrict__ var, const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, double * __restrict__ rconst, const double * __restrict__ temp_gpu, const double * __restrict__ press_gpu, const double * __restrict__ cair_gpu, const int VL_GLO); /* This runs on CPU */ double machine_eps_flt() { double machEps = 1.0f; do { machEps /= 2.0f; // If next epsilon yields 1, then break, because current // epsilon is the machine epsilon. } while ((double)(1.0 + (machEps/2.0)) != 1.0); return machEps; } /* This runs on GPU */ __device__ double machine_eps_flt_cuda() { typedef union { long i64; double f64; } flt_64; flt_64 s; s.f64 = 1.; s.i64++; return (s.f64 - 1.); } __device__ static double alpha_AN(const int n, const int ro2type, const double temp, const double cair){ double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0; double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a; /* IF (ro2type = 1) THEN m = 0.4 ! primary RO2 ELSE IF (ro2type = 2) THEN m = 1. ! secondary RO2 ELSE IF (ro2type = 3) THEN m = 0.3 ! tertiary RO2 ELSE m = 1. */ double m = 1.; Y0_298K = alpha*exp(beta*n); Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0)); Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf)); zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2)); k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta); alpha_a = k_ratio/(1+ k_ratio) *m; return alpha_a; } __device__ static double alpha_AN(const int n, const int ro2type, const int bcarb, const int gcarb, const int abic, const double temp, const double cair){ double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0; double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a; double bcf=1., gcf=1., abf=1.; double m = 1.; //According to Teng, ref3189 if (bcarb == 1) { bcf = 0.19; }// derived from Praske, ref3190: alpha_AN = 0.03 for the secondary HMKO2 relative to alpha_AN for 6C RO2 (0.16) if (gcarb == 1) {gcf = 0.44; }// derived from Praske, ref3190: alpha_AN = 0.07 for the primary HMKO2 relative to alpha_AN for 6C RO2 (0.16) if (abic == 1) { abf = 0.24; }// derived from the ratio of AN- yield for toluene from Elrod et al. (ref3180), 5.5 0x1.9206e69676542p+ 229t & // 200 torr, and this SAR for linear alkyl RO2 with 9 heavy atoms, 23.3% Y0_298K = alpha*exp(beta*n); Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0)); Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf)); zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2)); k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta); alpha_a = k_ratio/(1+ k_ratio) *m*bcf*gcf*abf; return alpha_a; } __device__ static double k_RO2_HO2(const double temp, const int nC){ return 2.91e-13*exp(1300./temp)*(1.-exp(-0.245*nC)); // ref1630 } __device__ double ros_ErrorNorm(double * __restrict__ var, double * __restrict__ varNew, double * __restrict__ varErr, const double * __restrict__ absTol, const double * __restrict__ relTol, const int vectorTol ) { double err, scale, varMax; err = ZERO; if (vectorTol){ for (int i=0;i<NVAR - 16;i+=16){ prefetch_ll1(&varErr[i]); prefetch_ll1(&absTol[i]); prefetch_ll1(&relTol[i]); prefetch_ll1(&var[i]); prefetch_ll1(&varNew[i]); } for (int i=0; i<NVAR; i++) { varMax = fmax(fabs(var[i]),fabs(varNew[i])); scale = absTol[i]+ relTol[i]*varMax; err += pow((double)varErr[i]/scale,2.0); } err = sqrt((double) err/NVAR); }else{ for (int i=0;i<NVAR - 16;i+=16){ prefetch_ll1(&varErr[i]); prefetch_ll1(&var[i]); prefetch_ll1(&varNew[i]); } for (int i=0; i<NVAR; i++) { varMax = fmax(fabs(var[i]),fabs(varNew[i])); scale = absTol[0]+ relTol[0]*varMax; err += pow((double)varErr[i]/scale,2.0); } err = sqrt((double) err/NVAR); } return err; } =#=#=#=#=#=#=#=#=#=#=kppSolve=#=#=#=#=#=#=#=#=#=#= __device__ void ros_Solve(double * __restrict__ Ghimj, double * __restrict__ K, int &Nsol, const int istage, const int ros_S) { #pragma unroll 4 for (int i=0;i<LU_NONZERO-16;i+=16){ prefetch_ll1(&Ghimj[i]); } kppSolve(Ghimj, K, istage, ros_S); Nsol++; } =#=#=#=#=#=#=#=#=#=#=kppDecomp=#=#=#=#=#=#=#=#=#=#= __device__ void ros_Decomp(double * __restrict__ Ghimj, int &Ndec, int VL_GLO) { kppDecomp(Ghimj, VL_GLO); Ndec++; } =#=#=#=#=#=#=#=#=#=#=ros_PrepareMatrix=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=Jac_sp=#=#=#=#=#=#=#=#=#=#= =#=#=#=#=#=#=#=#=#=#=Fun=#=#=#=#=#=#=#=#=#=#= __device__ void ros_FunTimeDerivative(const double T, double roundoff, double * __restrict__ var, const double * __restrict__ fix, const double * __restrict__ rconst, double *dFdT, double *Fcn0, int &Nfun, const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; const double DELTAMIN = 1.0E-6; double delta,one_over_delta; delta = sqrt(roundoff)*fmax(DELTAMIN,fabs(T)); one_over_delta = 1.0/delta; Fun(var, fix, rconst, dFdT, Nfun, VL_GLO); for (int i=0; i < NVAR; i++){ dFdT(index,i) = (dFdT(index,i) - Fcn0(index,i)) * one_over_delta; } } __device__ static int ros_Integrator(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T, // Rosenbrock method coefficients const int ros_S, const double * __restrict__ ros_M, const double * __restrict__ ros_E, const double * __restrict__ ros_A, const double * __restrict__ ros_C, const double * __restrict__ ros_Alpha, const double * __restrict__ ros_Gamma, const double ros_ELO, const int * ros_NewF, // Integration parameters const int autonomous, const int vectorTol, const int Max_no_steps, const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, // Status parameters int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng, // cuda global mem buffers const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0, double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr, // for update_rconst const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // VL_GLO const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; double H, Hnew, HC, HG, Fac; // Tau - not used double Err; //*varErr; int direction; int rejectLastH, rejectMoreH; const double DELTAMIN = 1.0E-5; // ~~~> Initial preparations T = Tstart; Hexit = 0.0; H = fmin(Hstart,Hmax); if (fabs(H) <= 10.0*roundoff) H = DELTAMIN; if (Tend >= Tstart) { direction = + 1; } else { direction = - 1; } rejectLastH=0; rejectMoreH=0; // ~~~> Time loop begins below // TimeLoop: while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO)) { if (Nstp > Max_no_steps) // Too many steps return -6; // Step size too small if (H <= roundoff){ // Step size too small //if (((T+ 0.1*H) == T) || (H <= roundoff)) { return -7; } // ~~~> Limit H if necessary to avoid going beyond Tend Hexit = H; H = fmin(H,fabs(Tend-T)); // ~~~> Compute the function at current time Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write // ~~~> Compute the function derivative with respect to T if (!autonomous) ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read // ~~~> Compute the Jacobian at current time Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ // ~~~> Repeat step calculation until current step accepted // UntilAccepted: while(1) { ros_PrepareMatrix(H, direction, ros_Gamma[0], jac0, Ghimj, Nsng, Ndec, VL_GLO); // ~~~> Compute the stages // Stage: for (int istage=0; istage < ros_S; istage++) { // For the 1st istage the function has been computed previously if (istage == 0) { for (int i=0; i<NVAR; i++){ varNew(index,i) = Fcn0(index,i); // FCN0 Read } } else if(ros_NewF[istage]) { for (int i=0; i<NVAR; i++){ varNew(index,i) = var(index,i); } for (int j=0; j < (istage); j++){ for (int i=0; i<NVAR; i++){ varNew(index,i) = K(index,j,i)*ros_A[(istage)*(istage-1)/2 + j] + varNew(index,i); } } Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap } for (int i=0; i<NVAR; i++) K(index,istage,i) = varNew(index,i); for (int j=0; j<(istage); j++) { HC = ros_C[(istage)*(istage-1)/2 + j]/(direction*H); for (int i=0; i<NVAR; i++){ double tmp = K(index,j,i); K(index,istage,i) += tmp*HC; } } if ((!autonomous) && (ros_Gamma[istage] )) { HG = direction*H*ros_Gamma[istage]; for (int i=0; i<NVAR; i++){ K(index,istage,i) += dFdT(index,i)*HG; } } // R ,RW, RW, R, R ros_Solve(Ghimj, K, Nsol, istage, ros_S); } // Stage // ~~~> Compute the new solution for (int i=0; i<NVAR; i++){ double tmpNew = var(index,i); /// VAR READ double tmpErr = ZERO; for (int j=0; j<ros_S; j++){ double tmp = K(index,j,i); #ifdef DEBUG if (isnan(tmp)){ printf("Solver detected NAN!"); tmp = 0; } #endif tmpNew += tmp*ros_M[j]; tmpErr += tmp*ros_E[j]; } varNew(index,i) = tmpNew; // varNew is killed varErr(index,i) = tmpErr; } Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ // ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/ros_ELO))); Hnew = H*Fac; // ~~~> Check the error magnitude and adjust step size Nstp = Nstp+ 1; if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step { Nacc = Nacc + 1; for (int j=0; j<NVAR ; j++) var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read T = T + direction*H; Hnew = fmax(Hmin,fmin(Hnew,Hmax)); if (rejectLastH) // No step size increase after a rejected step Hnew = fmin(Hnew,H); rejectLastH = 0; rejectMoreH = 0; H = Hnew; break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED } else // ~~~> Reject step { if (rejectMoreH) Hnew = H*FacRej; rejectMoreH = rejectLastH; rejectLastH = 1; H = Hnew; if (Nacc >= 1) Nrej += 1; } // Err <= 1 } // UntilAccepted } // TimeLoop // ~~~> Succesful exit return 0; // ~~~> The integration was successful } typedef struct { double ros_A[15]; double ros_C[15]; int ros_NewF[8]; double ros_M[6]; double ros_E[6]; double ros_Alpha[6]; double ros_Gamma[6]; double ros_ELO; int ros_S; } ros_t; /* * Lookup tables for different ROS for branch elimination. It is much faster in GPU. */ __device__ __constant__ ros_t ros[5] = { { {.58578643762690495119831127579030,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */ {-1.17157287525380990239662255158060,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */ {1,1,0,0,0,0,0,0}, /* ros_NewF */ {.87867965644035742679746691368545,.29289321881345247559915563789515,0,0,0,0}, /* ros_M */ {.29289321881345247559915563789515,.29289321881345247559915563789515,0,0,0,0}, /* ros_E */ {0,1.0,0,0,0,0}, /* ros_Alpha */ {1.70710678118654752440084436210485,-1.70710678118654752440084436210485,0,0,0,0}, /* ros_Gamma */ 2.0, /* ros_ELO */ 2, /* ros_S*/ }, /* Ros2 */ { {1.0,1.0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */ {-0.10156171083877702091975600115545E+01, 0.40759956452537699824805835358067E+01,0.92076794298330791242156818474003E+01,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */ {1,1,0,0,0,0,0,0}, /* ros_NewF */ {0.1E+01,0.61697947043828245592553615689730E+01,-0.42772256543218573326238373806514E+00,0,0,0}, /* ros_M */ {0.5E+00,- 0.29079558716805469821718236208017E+01,0.22354069897811569627360909276199E+00,0,0,0}, /* ros_E */ {0.0E+00,0.43586652150845899941601945119356E+00,0.43586652150845899941601945119356E+00,0,0,0}, /* ros_Alpha */ {0.43586652150845899941601945119356E+00,0.24291996454816804366592249683314E+00,0.21851380027664058511513169485832E+01,0,0,0}, /* ros_Gamma */ 3.0, /* ros_ELO */ 3 }, /* Ros3 */ { {0.2000000000000000E+01, 0.1867943637803922E+01, 0.2344449711399156E+00, 0.1867943637803922E+01, 0.2344449711399156E+00,0,0,0,0,0,0,0,0,0,0}, /* ros_A */ {-0.7137615036412310E+01,0.2580708087951457E+01,0.6515950076447975E+00, - 0.2137148994382534E+01, - 0.3214669691237626E+00, - 0.6949742501781779E+00 ,0,0,0,0,0,0,0,0,0}, /* ros_C */ {1,1,1,0,0,0,0,0}, /* ros_NewF */ {0.2255570073418735E+01, 0.2870493262186792E+00, 0.4353179431840180E+00, 0.1093502252409163E+01,0,0}, /* ros_M */ { -0.2815431932141155E+00, -0.7276199124938920E-01, -0.1082196201495311E+00, -0.1093502252409163E+01, 0, 0}, /* ros_E */ {0.0, 0.1145640000000000E+01, 0.6552168638155900E+00, 0.6552168638155900E+00,0,0}, /* ros_Alpha */ { 0.5728200000000000E+00, -0.1769193891319233E+01, 0.7592633437920482E+00, -0.1049021087100450E+00,0,0}, /* ros_Gamma */ 4.0, /* ros_ELO */ 4 }, /* Ros4 */ { { 0.0E+00, 2.0E+00, 0.0E+00, 2.0E+00, 0.0E+00, 1.0E+00, 0,0,0,0,0,0,0,0,0}, /* ros_A */ { 4.0E+00, 1.0E+00, - 1.0E+00, 1.0E+00, - 1.0E+00, - 2.66666666666666666666666666666666, 0,0,0,0,0,0,0,0,0}, /* ros_C */ {1,0,1,1,0,0,0,0}, /* ros_NewF */ {2.0,0,1.0,1.0,0,0}, /* ros_M */ {0,0,0,1.0,0,0}, /* ros_E */ {0,0,1.0,1.0,0,0}, /* ros_Alpha */ {0.5,1.5,0,0,0,0}, /* ros_Gamma */ 3.0, /* ros_ELO */ 4 }, /* Rodas3 */ { { 0.1544000000000000E+01, 0.9466785280815826E+00, 0.2557011698983284E+00, 0.3314825187068521E+01, 0.2896124015972201E+01, 0.9986419139977817E+00, 0.1221224509226641E+01, 0.6019134481288629E+01, 0.1253708332932087E+02, -0.6878860361058950E+00, 0.1221224509226641E+01, 0.6019134481288629E+01, 0.1253708332932087E+02, -0.6878860361058950E+00, 1.0E+00}, /* ros_A */ { -0.5668800000000000E+01, -0.2430093356833875E+01, -0.2063599157091915E+00, -0.1073529058151375E+00, -0.9594562251023355E+01, -0.2047028614809616E+02, 0.7496443313967647E+01, -0.1024680431464352E+02, -0.3399990352819905E+02, 0.1170890893206160E+02, 0.8083246795921522E+01, -0.7981132988064893E+01, -0.3152159432874371E+02, 0.1631930543123136E+02, -0.6058818238834054E+01}, /* ros_C */ {1,1,1,1,1,1,0,0}, /* ros_NewF */ {0.1221224509226641E+01,0.6019134481288629E+01,0.1253708332932087E+02,- 0.6878860361058950E+00,1,1}, /* ros_M */ {0,0,0,0,0,1.0}, /* ros_E */ {0.000, 0.386, 0.210, 0.630, 1.000, 1.000}, /* ros_Alpha */ {0.2500000000000000E+00, -0.1043000000000000E+00, 0.1035000000000000E+00, 0.3620000000000023E-01, 0, 0}, /* ros_Gamma */ 4.0, /* ros_ELO */ 6 } /* Rodas4 */ }; //__device__ double rconst_local[MAX_VL_GLO*NREACT]; /* Initialize rconst local */ //__device__ double * rconst_local; __device__ double k_3rd(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc) /* * * temp temperature [K] * cair air concentration [molecules/cm3] * k0_300K low pressure limit at 300 K * n exponent for low pressure limit * kinf_300K high pressure limit at 300 K * m exponent for high pressure limit * fc broadening factor (usually fc=0.6) * */ { double zt_help, k0_T, kinf_T, k_ratio, k_3rd_r; zt_help = 300.0/temp; k0_T = k0_300K *pow(zt_help,n) *cair; kinf_T = kinf_300K *pow(zt_help,m); k_ratio = k0_T/kinf_T; k_3rd_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio),2))); return k_3rd_r; } __device__ double k_3rd_iupac(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc) /* * * temp temperature [K] * cair air concentration [molecules/cm3] * k0_300K low pressure limit at 300 K * n exponent for low pressure limit * kinf_300K high pressure limit at 300 K * m exponent for high pressure limit * fc broadening factor (e.g. 0.45 or 0.6...) * nu N * */ { double zt_help, k0_T, kinf_T, k_ratio, nu, k_3rd_iupac_r; zt_help = 300.0/temp; k0_T = k0_300K *pow(zt_help,n) *cair; kinf_T = kinf_300K *pow(zt_help,m); k_ratio = k0_T/kinf_T; nu = 0.75- 1.27*log10(fc); k_3rd_iupac_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio)/nu,2))); return k_3rd_iupac_r; } double * temp_gpu; double * press_gpu; double * cair_gpu; =#=#=#=#=#=#=#=#=#=#=update_rconst=#=#=#=#=#=#=#=#=#=#= __global__ void Rosenbrock(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus, // values calculated from icntrl and rcntrl at host const int autonomous, const int vectorTol, const int UplimTol, const int method, const int Max_no_steps, double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst, const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff, // cuda global mem buffers const double * __restrict__ absTol, const double * __restrict__ relTol, // for update_rconst const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // global input const double * __restrict__ temp_gpu, const double * __restrict__ press_gpu, const double * __restrict__ cair_gpu, // extra const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; /* * In theory someone can aggregate accesses together, * however due to algorithm, threads access * different parts of memory, making it harder to * optimize accesses. * */ double *Ghimj = &d_Ghimj[index*LU_NONZERO]; double *K = &d_K[index*NVAR*6]; double *varNew = &d_varNew[index*NVAR]; double *Fcn0 = &d_Fcn0[index*NVAR]; double *dFdT = &d_dFdT[index*NVAR]; double *jac0 = &d_jac0[index*LU_NONZERO]; double *varErr = &d_varErr[index*NVAR]; double *var = &d_var[index*NSPEC]; double *fix = &d_fix[index*NFIX]; double *rconst = &d_rconst[index*NREACT]; if (index < VL_GLO) { int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng; double Texit, Hexit; Nfun = 0; Njac = 0; Nstp = 0; Nacc = 0; Nrej = 0; Ndec = 0; Nsol = 0; Nsng = 0; /* FIXME: add check for method */ const double *ros_A = &ros[method-1].ros_A[0]; const double *ros_C = &ros[method-1].ros_C[0]; const double *ros_M = &ros[method-1].ros_M[0]; const double *ros_E = &ros[method-1].ros_E[0]; const double *ros_Alpha = &ros[method-1].ros_Alpha[0]; const double *ros_Gamma = &ros[method-1].ros_Gamma[0]; const int *ros_NewF = &ros[method-1].ros_NewF[0]; const int ros_S = ros[method-1].ros_S; const double ros_ELO = ros[method-1].ros_ELO; /* Copy data from global memory to temporary array */ /* * Optimization note: if we ever have enough constant * memory, we could use it for storing the data. * In current architectures if we use constant memory * only a few threads will be able to run on the fly. * */ for (int i=0; i<NSPEC; i++) var(index,i) = conc(index,i); for (int i=0; i<NFIX; i++) fix(index,i) = conc(index,NVAR+i); update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO); ros_Integrator(var, fix, Tstart, Tend, Texit, // Rosenbrock method coefficients ros_S, ros_M, ros_E, ros_A, ros_C, ros_Alpha, ros_Gamma, ros_ELO, ros_NewF, // Integration parameters autonomous, vectorTol, Max_no_steps, roundoff, Hmin, Hmax, Hstart, Hexit, FacMin, FacMax, FacRej, FacSafe, // Status parameters Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng, // cuda global mem buffers rconst, absTol, relTol, varNew, Fcn0, K, dFdT, jac0, Ghimj, varErr, // For update rconst khet_st, khet_tr, jx, VL_GLO ); for (int i=0; i<NVAR; i++) conc(index,i) = var(index,i); /* Statistics */ istatus(index,ifun) = Nfun; istatus(index,ijac) = Njac; istatus(index,istp) = Nstp; istatus(index,iacc) = Nacc; istatus(index,irej) = Nrej; istatus(index,idec) = Ndec; istatus(index,isol) = Nsol; istatus(index,isng) = Nsng; // Last T and H rstatus(index,itexit) = Texit; rstatus(index,ihexit) = Hexit; } } =#=#=#=#=#=#=#=#=#=#=special_ros=#=#=#=#=#=#=#=#=#=#= // no int8 in CUDA :( __global__ void reduce_istatus_1(int *istatus, int4 *tmp_out_1, int4 *tmp_out_2, int VL_GLO, int *xNacc, int *xNrej) { int index = blockIdx.x*blockDim.x+threadIdx.x; int idx_1 = threadIdx.x; int global_size = blockDim.x*gridDim.x; int foo; //no int8 in CUDA :( int4 accumulator_1 = make_int4(0,0,0,0); int4 accumulator_2 = make_int4(0,0,0,0); while (index < VL_GLO) { accumulator_1.x += istatus(index,0); accumulator_1.y += istatus(index,1); accumulator_1.z += istatus(index,2); //some dirty work on the side... foo = istatus(index,3); xNacc[index] = foo; accumulator_1.w += foo; foo = istatus(index,4); xNrej[index] = foo; accumulator_2.x += foo; accumulator_2.y += istatus(index,5); accumulator_2.z += istatus(index,6); accumulator_2.w += istatus(index,7); index += global_size; } //no int8 in CUDA :( __shared__ int4 buffer_1[REDUCTION_SIZE_1]; __shared__ int4 buffer_2[REDUCTION_SIZE_1]; buffer_1[idx_1] = accumulator_1; buffer_2[idx_1] = accumulator_2; __syncthreads(); int idx_2, active_threads = blockDim.x; int4 tmp_1, tmp_2; while (active_threads != 1) { active_threads /= 2; if (idx_1 < active_threads) { idx_2 = idx_1+active_threads; tmp_1 = buffer_1[idx_1]; tmp_2 = buffer_1[idx_2]; tmp_1.x += tmp_2.x; tmp_1.y += tmp_2.y; tmp_1.z += tmp_2.z; tmp_1.w += tmp_2.w; buffer_1[idx_1] = tmp_1; tmp_1 = buffer_2[idx_1]; tmp_2 = buffer_2[idx_2]; tmp_1.x += tmp_2.x; tmp_1.y += tmp_2.y; tmp_1.z += tmp_2.z; tmp_1.w += tmp_2.w; buffer_2[idx_1] = tmp_1; } __syncthreads(); } if (idx_1 == 0) { tmp_out_1[blockIdx.x] = buffer_1[0]; tmp_out_2[blockIdx.x] = buffer_2[0]; } } __global__ void reduce_istatus_2(int4 *tmp_out_1, int4 *tmp_out_2, int *out) { int idx_1 = threadIdx.x; //no int8 in CUDA :( __shared__ int4 buffer_1[REDUCTION_SIZE_2]; __shared__ int4 buffer_2[REDUCTION_SIZE_2]; buffer_1[idx_1] = tmp_out_1[idx_1]; buffer_2[idx_1] = tmp_out_2[idx_1]; __syncthreads(); int idx_2, active_threads = blockDim.x; int4 tmp_1, tmp_2; while (active_threads != 1) { active_threads /= 2; if (idx_1 < active_threads) { idx_2 = idx_1+active_threads; tmp_1 = buffer_1[idx_1]; tmp_2 = buffer_1[idx_2]; tmp_1.x += tmp_2.x; tmp_1.y += tmp_2.y; tmp_1.z += tmp_2.z; tmp_1.w += tmp_2.w; buffer_1[idx_1] = tmp_1; tmp_1 = buffer_2[idx_1]; tmp_2 = buffer_2[idx_2]; tmp_1.x += tmp_2.x; tmp_1.y += tmp_2.y; tmp_1.z += tmp_2.z; tmp_1.w += tmp_2.w; buffer_2[idx_1] = tmp_1; } __syncthreads(); } if (idx_1 == 0) { tmp_1 = buffer_1[0]; tmp_2 = buffer_2[0]; out[0] = tmp_1.x; out[1] = tmp_1.y; out[2] = tmp_1.z; out[3] = tmp_1.w; out[4] = tmp_2.x; out[5] = tmp_2.y; out[6] = tmp_2.z; out[7] = tmp_2.w; } } /* Assuming different processes */ enum { TRUE=1, FALSE=0 } ; double *d_conc, *d_temp, *d_press, *d_cair, *d_khet_st, *d_khet_tr, *d_jx, *d_jac0, *d_Ghimj, *d_varNew, *d_K, *d_varErr, *d_dFdT, *d_Fcn0, *d_var, *d_fix, *d_rconst; int initialized = FALSE; /* Device pointers pointing to GPU */ double *d_rstatus, *d_absTol, *d_relTol; int *d_istatus, *d_istatus_rd, *d_xNacc, *d_xNrej; int4 *d_tmp_out_1, *d_tmp_out_2; /* Allocate arrays on device for Rosenbrock */ __host__ void init_first_time(int pe, int VL_GLO, int size_khet_st, int size_khet_tr, int size_jx ){ /* Select the proper GPU CARD */ int deviceCount, device; gpuErrchk( cudaGetDeviceCount(&deviceCount) ); device = pe % deviceCount; gpuErrchk( cudaSetDevice(device) ); printf("PE[%d]: selected %d of total %d\n",pe,device,deviceCount); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); gpuErrchk( cudaMalloc ((void **) &d_conc , sizeof(double)*VL_GLO*(NSPEC)) ); gpuErrchk( cudaMalloc ((void **) &d_khet_st, sizeof(double)*VL_GLO*size_khet_st) ); gpuErrchk( cudaMalloc ((void **) &d_khet_tr, sizeof(double)*VL_GLO*size_khet_tr) ); gpuErrchk( cudaMalloc ((void **) &d_jx , sizeof(double)*VL_GLO*size_jx) ); gpuErrchk( cudaMalloc ((void **) &d_rstatus , sizeof(double)*VL_GLO*2) ); gpuErrchk( cudaMalloc ((void **) &d_istatus , sizeof(int)*VL_GLO*8) ); gpuErrchk( cudaMalloc ((void **) &d_absTol , sizeof(double)*NVAR) ); gpuErrchk( cudaMalloc ((void **) &d_relTol , sizeof(double)*NVAR) ); /* Allocate input arrays */ gpuErrchk( cudaMalloc ((void **) &temp_gpu , sizeof(double)*VL_GLO) ); gpuErrchk( cudaMalloc ((void **) &press_gpu , sizeof(double)*VL_GLO) ); gpuErrchk( cudaMalloc ((void **) &cair_gpu , sizeof(double)*VL_GLO) ); /* Allocate arrays on device for reducing metrics */ gpuErrchk( cudaMalloc ((void **) &d_istatus_rd , sizeof(int)*8)); gpuErrchk( cudaMalloc ((void **) &d_tmp_out_1 , sizeof(int4)*64)); gpuErrchk( cudaMalloc ((void **) &d_tmp_out_2 , sizeof(int4)*64)); gpuErrchk( cudaMalloc ((void **) &d_xNacc , sizeof(int)*VL_GLO)); gpuErrchk( cudaMalloc ((void **) &d_xNrej , sizeof(int)*VL_GLO)); /* Allocate arrays for solvers on device global memory to reduce the stack usage */ gpuErrchk( cudaMalloc ((void **) &d_jac0, sizeof(double)*VL_GLO*LU_NONZERO) ); gpuErrchk( cudaMalloc ((void **) &d_Ghimj, sizeof(double)*VL_GLO*LU_NONZERO) ); gpuErrchk( cudaMalloc ((void **) &d_varNew, sizeof(double)*VL_GLO*NVAR) ); gpuErrchk( cudaMalloc ((void **) &d_Fcn0, sizeof(double)*VL_GLO*NVAR) ); gpuErrchk( cudaMalloc ((void **) &d_dFdT, sizeof(double)*VL_GLO*NVAR) ); gpuErrchk( cudaMalloc ((void **) &d_K, sizeof(double)*VL_GLO*NVAR*6) ); // TODO: Change size according to solver steps gpuErrchk( cudaMalloc ((void **) &d_varErr, sizeof(double)*VL_GLO*NVAR) ); gpuErrchk( cudaMalloc ((void **) &d_var, sizeof(double)*VL_GLO*NSPEC) ); gpuErrchk( cudaMalloc ((void **) &d_fix, sizeof(double)*VL_GLO*NFIX) ); gpuErrchk( cudaMalloc ((void **) &d_rconst, sizeof(double)*VL_GLO*NREACT) ); initialized = TRUE; } /* * TODO: We should call it in some point.. */ extern "C" void finalize_cuda(){ /* Free memory on the device */ gpuErrchk( cudaFree(d_conc ) ); gpuErrchk( cudaFree(d_temp ) ); gpuErrchk( cudaFree(d_press ) ); gpuErrchk( cudaFree(d_cair ) ); gpuErrchk( cudaFree(d_khet_st ) ); gpuErrchk( cudaFree(d_khet_tr ) ); gpuErrchk( cudaFree(d_jx ) ); gpuErrchk( cudaFree(d_rstatus ) ); gpuErrchk( cudaFree(d_istatus ) ); gpuErrchk( cudaFree(d_absTol ) ); gpuErrchk( cudaFree(d_relTol ) ); gpuErrchk( cudaFree(d_istatus_rd ) ); gpuErrchk( cudaFree(d_tmp_out_1 ) ); gpuErrchk( cudaFree(d_tmp_out_2 ) ); gpuErrchk( cudaFree(d_xNacc ) ); gpuErrchk( cudaFree(d_xNrej ) ); gpuErrchk( cudaFree(temp_gpu ) ); gpuErrchk( cudaFree(press_gpu ) ); gpuErrchk( cudaFree(cair_gpu ) ); gpuErrchk( cudaFree(d_jac0 ) ); gpuErrchk( cudaFree(d_Ghimj ) ); gpuErrchk( cudaFree(d_varNew ) ); gpuErrchk( cudaFree(d_Fcn0 ) ); gpuErrchk( cudaFree(d_dFdT ) ); gpuErrchk( cudaFree(d_K ) ); gpuErrchk( cudaFree(d_varErr ) ); gpuErrchk( cudaFree(d_var ) ); gpuErrchk( cudaFree(d_fix ) ); gpuErrchk( cudaFree(d_rconst ) ); } extern "C" void kpp_integrate_cuda_( int *pe_p, int *sizes, double *time_step_len_p, double *conc, double *temp, double *press, double *cair, double *khet_st, double *khet_tr, double *jx, double *absTol, double *relTol, int *ierr, int *istatus, int *xNacc, int *xNrej, double *rndoff, int *icntrl=NULL, double *rcntrl=NULL ) /* // TODO * Parameters: * pe_p: scalar int - processor element * VL_GLO: scalar int - size of the system * NSPEC: scalar int - number of species * NREACT: scalar int - number of reactions * NVAR: scalar int - * * Input data: * conc: 2D array of doubles - size: vl_glo x number of species * temp: 1D array of doubles - size: vl_glo * press: 1D array of doubles - size: vl_glo * cair: 1D array of doubles - size: vl_glo * khet_st: 2D array of doubles - size: vl_glo x number of species * khet_tr: 2D array of doubles - size: vl_glo x number of species * jx: 2D array of doubles - size: vl_glo x number of species * absTol: 1D array of doubles - size: number of species * relTol: 1D array of doubles - size: number of species * Control: * icntrl: 1D array of ints - size: 4 * sizes: 1D array of ints - size: 4 * rcntrl: 1D array of doubles - size: 7 * * */ { const double DELTAMIN = 1.0E-5; int VL_GLO = sizes[0]; int size_khet_st = sizes[1]; int size_khet_tr = sizes[2]; int size_jx = sizes[3]; double roundoff = *rndoff; double Tstart,Tend; Tstart = ZERO; Tend = *time_step_len_p; int pe = *pe_p; // variables from rcntrl and icntrl int autonomous, vectorTol, UplimTol, method, Max_no_steps; double Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe; //int rcntrl_bool = 0, icntrl_bool=0; if (rcntrl == NULL) { rcntrl = new double[7]; for (int i=0; i < 7; i++) rcntrl[i] = 0.0; } if (icntrl == NULL) { icntrl = new int[4]; for (int i=0; i < 4; i++) icntrl[i] = 0; } /* Allocate arrays on device for update_rconst kernel*/ if (initialized == FALSE) init_first_time(pe, VL_GLO, size_khet_st, size_khet_tr, size_jx); /* Copy data from host memory to device memory */ gpuErrchk( cudaMemcpy(d_conc , conc , sizeof(double)*VL_GLO*NSPEC , cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(temp_gpu , temp , sizeof(double)*VL_GLO , cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(press_gpu , press , sizeof(double)*VL_GLO , cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(cair_gpu , cair , sizeof(double)*VL_GLO , cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_khet_st, khet_st , sizeof(double)*VL_GLO*size_khet_st , cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_khet_tr, khet_tr , sizeof(double)*VL_GLO*size_khet_tr , cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_jx , jx , sizeof(double)*VL_GLO*size_jx , cudaMemcpyHostToDevice) ); /* Copy arrays from host memory to device memory for Rosenbrock */ gpuErrchk( cudaMemcpy(d_absTol, absTol, sizeof(double)*NVAR, cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_relTol, relTol, sizeof(double)*NVAR, cudaMemcpyHostToDevice) ); /* Compute execution configuration for update_rconst */ int block_size, grid_size; block_size = BLOCKSIZE; grid_size = (VL_GLO + block_size - 1)/block_size; dim3 dimBlock(block_size); dim3 dimGrid(grid_size); /* Execute the kernel */ //update_rconst<<<dimGrid,dimBlock>>>(d_conc, d_khet_st, d_khet_tr, d_jx, VL_GLO); GPU_DEBUG(); // *------------------------------------------------------* // | Default values vs input settings (icntrl, rcntrl) | // *------------------------------------------------------* int ierr_tmp=0; { // autonomous or time dependent ODE. Default is time dependent. autonomous = !(icntrl[0] == 0); // For Scalar tolerances (icntrl[1].NE.0) the code uses absTol(0) and relTol(0) // For Vector tolerances (icntrl[1] == 0) the code uses absTol(0:NVAR) and relTol(0:NVAR) if (icntrl[1] == 0) { vectorTol = 1; //bool UplimTol = NVAR; } else { vectorTol = 0; UplimTol = 1; } // The particular Rosenbrock method chosen if (icntrl[2] == 0) { method = 4; } else if ((icntrl[2] >= 1) && (icntrl[2] <= 5)) { method = icntrl[2]; } else { printf("User-selected Rosenbrock method: icntrl[2]=%d\n",method); ierr_tmp = -2; } // The maximum number of steps admitted if (icntrl[3] == 0) { Max_no_steps = 100000; } else if (icntrl[3] > 0) { Max_no_steps=icntrl[3]; } else { printf("User-selected max no. of steps: icntrl[3]=%d\n",icntrl[3]); ierr_tmp = -1; } // Unit roundoff (1+ roundoff>1) roundoff = machine_eps_flt(); // Lower bound on the step size: (positive value) if (rcntrl[0] == ZERO) { Hmin = ZERO; } else if (rcntrl[0] > ZERO) { Hmin = rcntrl[0]; } else { printf("User-selected Hmin: rcntrl[0]=%f\n",rcntrl[0]); ierr_tmp = -3; } // Upper bound on the step size: (positive value) if (rcntrl[1] == ZERO) { Hmax = fabs(Tend-Tstart); } else if (rcntrl[1] > ZERO) { Hmax = fmin(fabs(rcntrl[1]),fabs(Tend-Tstart)); } else { printf("User-selected Hmax: rcntrl[1]=%f\n",rcntrl[1]); ierr_tmp = -3; } // Starting step size: (positive value) if (rcntrl[2] == ZERO) { Hstart = fmax(Hmin,DELTAMIN); } else if (rcntrl[2] > ZERO) { Hstart = fmin(fabs(rcntrl[2]),fabs(Tend-Tstart)); } else { printf("User-selected Hstart: rcntrl[2]=%f\n",rcntrl[2]); ierr_tmp = -3; } // Step size can be changed s.t. FacMin < Hnew/Hexit < FacMax if (rcntrl[3] == ZERO) { FacMin = 0.2; } else if (rcntrl[3] > ZERO) { FacMin = rcntrl[3]; } else { printf("User-selected FacMin: rcntrl[3]=%f\n",rcntrl[3]); ierr_tmp = -4; } if (rcntrl[4] == ZERO) { FacMax = 6.0; } else if (rcntrl[4] > ZERO) { FacMax = rcntrl[4]; } else { printf("User-selected FacMax: rcntrl[4]=%f\n",rcntrl[4]); ierr_tmp = -4; } // FacRej: Factor to decrease step after 2 succesive rejections if (rcntrl[5] == ZERO) { FacRej = 0.1; } else if (rcntrl[5] > ZERO) { FacRej = rcntrl[5]; } else { printf("User-selected FacRej: rcntrl[5]=%f\n",rcntrl[5]); ierr_tmp = -4; } // FacSafe: Safety Factor in the computation of new step size if (rcntrl[6] == ZERO) { FacSafe = 0.9; } else if (rcntrl[6] > ZERO) { FacSafe = rcntrl[6]; } else { printf("User-selected FacSafe: rcntrl[6]=%f\n",rcntrl[6]); ierr_tmp = -4; } // Check if tolerances are reasonable for (int i=0; i < UplimTol; i++) { if ((absTol[i] <= ZERO) || (relTol[i] <= 10.0*roundoff) || (relTol[i] >= 1.0)) { printf("CCC absTol(%d) = %f \n",i,absTol[i]); printf("CCC relTol(%d) = %f \n",i,relTol[i]); ierr_tmp = -5; } } } =#=#=#=#=#=#=#=#=#=#=call_kernel=#=#=#=#=#=#=#=#=#=#= GPU_DEBUG(); reduce_istatus_1<<<REDUCTION_SIZE_2,REDUCTION_SIZE_1>>>(d_istatus, d_tmp_out_1, d_tmp_out_2, VL_GLO, d_xNacc, d_xNrej); GPU_DEBUG(); reduce_istatus_2<<<1,REDUCTION_SIZE_2>>>(d_tmp_out_1, d_tmp_out_2, d_istatus_rd); GPU_DEBUG(); /* Copy the result back */ gpuErrchk( cudaMemcpy( conc , d_conc , sizeof(double)*VL_GLO*NVAR, cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy( xNacc , d_xNacc , sizeof(int)*VL_GLO , cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy( xNrej , d_xNrej , sizeof(int)*VL_GLO , cudaMemcpyDeviceToHost) ); return; }
314c81b94bc71208b883d494e2361f369963a0e2.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // include for cuda syntax intellisense #include <device_launch_parameters.h> #include <hip/hip_runtime.h> // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 256 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f //Jack12 add #define emptyCell -1 #define useSharedMemory /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3* dev_pos_buf; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; // Jack12 add float maxSearchRange; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to hipFree in Boids::endSimulation. hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!"); hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params maxSearchRange = ::max(::max(rule1Distance, rule2Distance), rule3Distance); gridCellWidth = 2.0f * maxSearchRange; /*float lambda = 5.0f; gridCellWidth = std::cbrt(numObjects * lambda);*/ int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!"); hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int) ); checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!"); hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!"); hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!"); // for thrust sort dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices); dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices); // Jack12 2.3 hipMalloc((void**)&dev_pos_buf, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos_buf failed!"); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ #pragma region naive_rule_1_3 __device__ glm::vec3 naive_rule_1(const int& N, const int& iSelf, const glm::vec3* pos, const glm::vec3* vel) { glm::vec3 perceived_center = glm::vec3(0.0f, 0.0f, 0.0f); int num_of_neighbors = 0; for (int i = 0; i < N; i++) { if (i == iSelf) { continue; } if (glm::distance(pos[i], pos[iSelf]) < rule1Distance) { num_of_neighbors++; perceived_center += pos[i]; } } if (num_of_neighbors != 0) { perceived_center /= (float)num_of_neighbors; perceived_center = (perceived_center - pos[iSelf]) * rule1Scale; } else { // the tricky part is this line perceived_center = glm::vec3(0.0f, 0.0f, 0.0f); } return perceived_center; } __device__ glm::vec3 naive_rule_2(const int& N, const int& iSelf, const glm::vec3* pos, const glm::vec3* vel) { glm::vec3 out; for (int i = 0; i < N; i++) { if (i == iSelf) { continue; } if (glm::distance(pos[i], pos[iSelf]) < rule2Distance) { out -= pos[i] - pos[iSelf]; } } return out * rule2Scale; } __device__ glm::vec3 naive_rule_3(const int& N, const int& iSelf, const glm::vec3* pos, const glm::vec3* vel) { glm::vec3 out; // perceived_velocity int num_of_neighbors = 0; for (int i = 0; i < N; i++) { if (i == iSelf) { continue; } if (glm::distance(pos[i], pos[iSelf]) < rule3Distance) { num_of_neighbors++; out += vel[i]; } } out = num_of_neighbors != 0 ? (out / (float)num_of_neighbors) : out; return out * rule3Scale; } #pragma endregion naive_rule_1_3 /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { glm::vec3 out(0.0f); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves // Rule 2: boids try to stay a distance d away from each other // Rule 3: boids try to match the speed of surrounding boids out += naive_rule_1(N, iSelf, pos, vel); out += naive_rule_2(N, iSelf, pos, vel); out += naive_rule_3(N, iSelf, pos, vel); return out; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } // Compute a new velocity based on pos and vel1 glm::vec3 tmp_v = vel1[index] + computeVelocityChange(N, index, pos, vel1); // Clamp the speed tmp_v = glm::length(tmp_v) < maxSpeed ? tmp_v : glm::normalize(tmp_v) * maxSpeed; // Record the new velocity into vel2. Question: why NOT vel1? vel2[index] = tmp_v; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *arrayIndices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } arrayIndices[index] = index; glm::vec3 idx_pos = pos[index]; glm::ivec3 grid_3d_idx = (idx_pos - gridMin) * inverseCellWidth ; // need to clamp grid_3d_idx = glm::clamp(grid_3d_idx, glm::ivec3(0), glm::ivec3(gridResolution - 1)); gridIndices[index] = gridIndex3Dto1D(grid_3d_idx.x, grid_3d_idx.y, grid_3d_idx.z, gridResolution); } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int prv_index = (index + N - 1) % N; // if index == 0, get N - 1 int nxt_index = (index + N + 1) % N; int grid_index = particleGridIndices[index]; int grid_prv_index = particleGridIndices[prv_index]; int grid_nxt_index = particleGridIndices[nxt_index]; // here stores the index after sorting, which stands for the start and end index for boid_index in one cell //gridCellStartIndices[grid_index] = (grid_index != grid_prv_index) ? index : gridCellStartIndices[grid_index]; //gridCellEndIndices[grid_index] = (grid_index != grid_nxt_index) ? index : gridCellEndIndices[grid_index]; // why this work while the up not? if (grid_index != grid_prv_index) { gridCellStartIndices[grid_index] = index; } if (grid_index != grid_nxt_index) { gridCellEndIndices[grid_index] = index; } } #pragma region 2_3 __global__ void kernAlignVelPos( int N, glm::vec3* pos1, glm::vec3* pos2, glm::vec3* vel1, glm::vec3* vel2, int *dev_arrayindices ) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int AlignIdx = dev_arrayindices[index]; pos2[index] = pos1[AlignIdx]; vel2[index] = vel1[AlignIdx]; } #pragma endregion 2_3 __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, float maxSearchRange, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2, bool isAligned = false){ // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } #ifdef useSharedMemory int t = threadIdx.x; extern __shared__ glm::vec3 share_pos[]; share_pos[t] = pos[index]; __syncthreads(); glm::vec3 cur_pos = share_pos[t]; #else glm::vec3 cur_pos = pos[index]; #endif // useSharedMemory // - Identify the grid cell that this particle is in glm::vec3 cur_pos_2_origin = cur_pos - gridMin; glm::ivec3 cur_cell_idx = cur_pos_2_origin * inverseCellWidth; // - Identify which cells may contain neighbors. This isn't always 8. glm::ivec3 min_cell_idx = glm::floor( (cur_pos_2_origin - maxSearchRange) * inverseCellWidth); glm::ivec3 max_cell_idx = glm::floor( (cur_pos_2_origin + maxSearchRange) * inverseCellWidth ); // clamp for safety min_cell_idx = glm::clamp(min_cell_idx, glm::ivec3(0), glm::ivec3(gridResolution - 1)); max_cell_idx = glm::clamp(max_cell_idx, glm::ivec3(0), glm::ivec3(gridResolution - 1)); // - For each cell, read the start/end indices in the boid pointer array. glm::vec3 d_v; int num_1_neighbors = 0; int num_3_neigbors = 0; glm::vec3 tmp1(0.0f); glm::vec3 tmp2(0.0f); glm::vec3 tmp3(0.0f); // loop z first for (int iz = min_cell_idx.z; iz <= max_cell_idx.z; iz++) { for (int iy = min_cell_idx.y; iy <= max_cell_idx.y; iy++) { for (int ix = min_cell_idx.x; ix <= max_cell_idx.x; ix++) { int cell_idx = gridIndex3Dto1D(ix, iy, iz, gridResolution); // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. int start_idx = gridCellStartIndices[cell_idx]; int end_idx = gridCellEndIndices[cell_idx]; if (start_idx == emptyCell && end_idx == emptyCell) { continue; } for (int idx = start_idx; idx <= end_idx; idx++) { int other_boid_idx = particleArrayIndices[idx]; if (other_boid_idx == index) { continue; } //test[touched_particle] = other_boid_idx; /*touched_particle++;*/ glm::vec3 other_pos; glm::vec3 other_vel; if (!isAligned) { other_pos = pos[other_boid_idx]; other_vel = vel1[other_boid_idx]; } else { other_pos = pos[idx]; other_vel = vel1[idx]; } float dist = glm::distance(cur_pos, other_pos); // rule 1 if (dist < rule1Distance) { num_1_neighbors++; tmp1 += other_pos; } // rule 2 if (dist < rule2Distance) { tmp2 -= other_pos - cur_pos; } // rule3 if (dist < rule3Distance) { num_3_neigbors++; tmp3 += other_vel; } } } } } if (num_1_neighbors != 0) { tmp1 /= num_1_neighbors; #ifdef useSharedMemory tmp1 -= share_pos[t]; #else tmp1 -= pos[index]; #endif } else { tmp1 = glm::vec3(0.0f); } tmp3 = num_3_neigbors != 0 ? (tmp3 / (float)num_3_neigbors) : tmp3; d_v = rule1Scale * tmp1 + rule2Scale * tmp2 + rule3Scale * tmp3; /*int1 tmp = make_int1(touched_particle);*/ glm::vec3 tmp_v = d_v + vel1[index]; vel2[index] = glm::length(tmp_v) < maxSpeed ? tmp_v : glm::normalize(tmp_v) * maxSpeed; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. hipLaunchKernelGGL(( kernUpdateVelocityBruteForce) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2); kernUpdatePos << < fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2); // TODO-1.2 ping-pong the velocity buffers std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationScatteredGrid(float dt, bool isAligned = false) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices<< <fullBlocksPerGrid, blockSize>> >( numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices ); if (isAligned) { kernAlignVelPos << <fullBlocksPerGrid, blockSize >> > ( numObjects, dev_pos, dev_pos_buf, dev_vel1, dev_vel2, dev_particleArrayIndices ); std::swap(dev_vel1, dev_vel2); std::swap(dev_pos, dev_pos_buf); } /*int* debug_data = (int *)malloc(numObjects * sizeof(int)); hipMemcpy(debug_data, dev_particleGridIndices, numObjects * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < numObjects; i++) { std::cout << debug_data[i] << " "; }std::cout << std::endl;*/ // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices dim3 gridBlocks((gridCellCount + blockSize - 1) / blockSize); kernResetIntBuffer << <gridBlocks, blockSize >> > (gridCellCount, dev_gridCellStartIndices, emptyCell); kernResetIntBuffer << <gridBlocks, blockSize >> > (gridCellCount, dev_gridCellEndIndices, emptyCell); hipLaunchKernelGGL(( kernIdentifyCellStartEnd) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // - Perform velocity updates using neighbor search #ifdef useSharedMemory kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize, blockSize * sizeof(glm::vec3)>> > ( numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, maxSearchRange, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2, isAligned); #else kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > ( numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, maxSearchRange, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2, isAligned); #endif // - Update positions hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dt, dev_pos, dev_vel2); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: stepSimulationScatteredGrid(dt, true); // - Label each particle with its array index as well as its grid index. // Use 2x width grids // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. } void Boids::endSimulation() { hipFree(dev_vel1); hipFree(dev_vel2); hipFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. hipFree(dev_particleArrayIndices); hipFree(dev_particleGridIndices); hipFree(dev_gridCellStartIndices); hipFree(dev_gridCellEndIndices); // jack12 2.3 hipFree(dev_pos_buf); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; std::unique_ptr<int[]>intKeys{ new int[N] }; std::unique_ptr<int[]>intValues{ new int[N] }; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; hipMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!"); hipMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost); hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup hipFree(dev_intKeys); hipFree(dev_intValues); checkCUDAErrorWithLine("hipFree failed!"); return; }
314c81b94bc71208b883d494e2361f369963a0e2.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // include for cuda syntax intellisense #include <device_launch_parameters.h> #include <cuda_runtime.h> // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 256 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f //Jack12 add #define emptyCell -1 #define useSharedMemory /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3* dev_pos_buf; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; // Jack12 add float maxSearchRange; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to cudaFree in Boids::endSimulation. cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params maxSearchRange = std::max(std::max(rule1Distance, rule2Distance), rule3Distance); gridCellWidth = 2.0f * maxSearchRange; /*float lambda = 5.0f; gridCellWidth = std::cbrt(numObjects * lambda);*/ int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!"); cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int) ); checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!"); cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!"); cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!"); // for thrust sort dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices); dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices); // Jack12 2.3 cudaMalloc((void**)&dev_pos_buf, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos_buf failed!"); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); cudaDeviceSynchronize(); } /****************** * stepSimulation * ******************/ #pragma region naive_rule_1_3 __device__ glm::vec3 naive_rule_1(const int& N, const int& iSelf, const glm::vec3* pos, const glm::vec3* vel) { glm::vec3 perceived_center = glm::vec3(0.0f, 0.0f, 0.0f); int num_of_neighbors = 0; for (int i = 0; i < N; i++) { if (i == iSelf) { continue; } if (glm::distance(pos[i], pos[iSelf]) < rule1Distance) { num_of_neighbors++; perceived_center += pos[i]; } } if (num_of_neighbors != 0) { perceived_center /= (float)num_of_neighbors; perceived_center = (perceived_center - pos[iSelf]) * rule1Scale; } else { // the tricky part is this line perceived_center = glm::vec3(0.0f, 0.0f, 0.0f); } return perceived_center; } __device__ glm::vec3 naive_rule_2(const int& N, const int& iSelf, const glm::vec3* pos, const glm::vec3* vel) { glm::vec3 out; for (int i = 0; i < N; i++) { if (i == iSelf) { continue; } if (glm::distance(pos[i], pos[iSelf]) < rule2Distance) { out -= pos[i] - pos[iSelf]; } } return out * rule2Scale; } __device__ glm::vec3 naive_rule_3(const int& N, const int& iSelf, const glm::vec3* pos, const glm::vec3* vel) { glm::vec3 out; // perceived_velocity int num_of_neighbors = 0; for (int i = 0; i < N; i++) { if (i == iSelf) { continue; } if (glm::distance(pos[i], pos[iSelf]) < rule3Distance) { num_of_neighbors++; out += vel[i]; } } out = num_of_neighbors != 0 ? (out / (float)num_of_neighbors) : out; return out * rule3Scale; } #pragma endregion naive_rule_1_3 /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { glm::vec3 out(0.0f); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves // Rule 2: boids try to stay a distance d away from each other // Rule 3: boids try to match the speed of surrounding boids out += naive_rule_1(N, iSelf, pos, vel); out += naive_rule_2(N, iSelf, pos, vel); out += naive_rule_3(N, iSelf, pos, vel); return out; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } // Compute a new velocity based on pos and vel1 glm::vec3 tmp_v = vel1[index] + computeVelocityChange(N, index, pos, vel1); // Clamp the speed tmp_v = glm::length(tmp_v) < maxSpeed ? tmp_v : glm::normalize(tmp_v) * maxSpeed; // Record the new velocity into vel2. Question: why NOT vel1? vel2[index] = tmp_v; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *arrayIndices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } arrayIndices[index] = index; glm::vec3 idx_pos = pos[index]; glm::ivec3 grid_3d_idx = (idx_pos - gridMin) * inverseCellWidth ; // need to clamp grid_3d_idx = glm::clamp(grid_3d_idx, glm::ivec3(0), glm::ivec3(gridResolution - 1)); gridIndices[index] = gridIndex3Dto1D(grid_3d_idx.x, grid_3d_idx.y, grid_3d_idx.z, gridResolution); } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int prv_index = (index + N - 1) % N; // if index == 0, get N - 1 int nxt_index = (index + N + 1) % N; int grid_index = particleGridIndices[index]; int grid_prv_index = particleGridIndices[prv_index]; int grid_nxt_index = particleGridIndices[nxt_index]; // here stores the index after sorting, which stands for the start and end index for boid_index in one cell //gridCellStartIndices[grid_index] = (grid_index != grid_prv_index) ? index : gridCellStartIndices[grid_index]; //gridCellEndIndices[grid_index] = (grid_index != grid_nxt_index) ? index : gridCellEndIndices[grid_index]; // why this work while the up not? if (grid_index != grid_prv_index) { gridCellStartIndices[grid_index] = index; } if (grid_index != grid_nxt_index) { gridCellEndIndices[grid_index] = index; } } #pragma region 2_3 __global__ void kernAlignVelPos( int N, glm::vec3* pos1, glm::vec3* pos2, glm::vec3* vel1, glm::vec3* vel2, int *dev_arrayindices ) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int AlignIdx = dev_arrayindices[index]; pos2[index] = pos1[AlignIdx]; vel2[index] = vel1[AlignIdx]; } #pragma endregion 2_3 __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, float maxSearchRange, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2, bool isAligned = false){ // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } #ifdef useSharedMemory int t = threadIdx.x; extern __shared__ glm::vec3 share_pos[]; share_pos[t] = pos[index]; __syncthreads(); glm::vec3 cur_pos = share_pos[t]; #else glm::vec3 cur_pos = pos[index]; #endif // useSharedMemory // - Identify the grid cell that this particle is in glm::vec3 cur_pos_2_origin = cur_pos - gridMin; glm::ivec3 cur_cell_idx = cur_pos_2_origin * inverseCellWidth; // - Identify which cells may contain neighbors. This isn't always 8. glm::ivec3 min_cell_idx = glm::floor( (cur_pos_2_origin - maxSearchRange) * inverseCellWidth); glm::ivec3 max_cell_idx = glm::floor( (cur_pos_2_origin + maxSearchRange) * inverseCellWidth ); // clamp for safety min_cell_idx = glm::clamp(min_cell_idx, glm::ivec3(0), glm::ivec3(gridResolution - 1)); max_cell_idx = glm::clamp(max_cell_idx, glm::ivec3(0), glm::ivec3(gridResolution - 1)); // - For each cell, read the start/end indices in the boid pointer array. glm::vec3 d_v; int num_1_neighbors = 0; int num_3_neigbors = 0; glm::vec3 tmp1(0.0f); glm::vec3 tmp2(0.0f); glm::vec3 tmp3(0.0f); // loop z first for (int iz = min_cell_idx.z; iz <= max_cell_idx.z; iz++) { for (int iy = min_cell_idx.y; iy <= max_cell_idx.y; iy++) { for (int ix = min_cell_idx.x; ix <= max_cell_idx.x; ix++) { int cell_idx = gridIndex3Dto1D(ix, iy, iz, gridResolution); // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. int start_idx = gridCellStartIndices[cell_idx]; int end_idx = gridCellEndIndices[cell_idx]; if (start_idx == emptyCell && end_idx == emptyCell) { continue; } for (int idx = start_idx; idx <= end_idx; idx++) { int other_boid_idx = particleArrayIndices[idx]; if (other_boid_idx == index) { continue; } //test[touched_particle] = other_boid_idx; /*touched_particle++;*/ glm::vec3 other_pos; glm::vec3 other_vel; if (!isAligned) { other_pos = pos[other_boid_idx]; other_vel = vel1[other_boid_idx]; } else { other_pos = pos[idx]; other_vel = vel1[idx]; } float dist = glm::distance(cur_pos, other_pos); // rule 1 if (dist < rule1Distance) { num_1_neighbors++; tmp1 += other_pos; } // rule 2 if (dist < rule2Distance) { tmp2 -= other_pos - cur_pos; } // rule3 if (dist < rule3Distance) { num_3_neigbors++; tmp3 += other_vel; } } } } } if (num_1_neighbors != 0) { tmp1 /= num_1_neighbors; #ifdef useSharedMemory tmp1 -= share_pos[t]; #else tmp1 -= pos[index]; #endif } else { tmp1 = glm::vec3(0.0f); } tmp3 = num_3_neigbors != 0 ? (tmp3 / (float)num_3_neigbors) : tmp3; d_v = rule1Scale * tmp1 + rule2Scale * tmp2 + rule3Scale * tmp3; /*int1 tmp = make_int1(touched_particle);*/ glm::vec3 tmp_v = d_v + vel1[index]; vel2[index] = glm::length(tmp_v) < maxSpeed ? tmp_v : glm::normalize(tmp_v) * maxSpeed; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. kernUpdateVelocityBruteForce <<< fullBlocksPerGrid, blockSize >>> (numObjects, dev_pos, dev_vel1, dev_vel2); kernUpdatePos << < fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2); // TODO-1.2 ping-pong the velocity buffers std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationScatteredGrid(float dt, bool isAligned = false) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices<< <fullBlocksPerGrid, blockSize>> >( numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices ); if (isAligned) { kernAlignVelPos << <fullBlocksPerGrid, blockSize >> > ( numObjects, dev_pos, dev_pos_buf, dev_vel1, dev_vel2, dev_particleArrayIndices ); std::swap(dev_vel1, dev_vel2); std::swap(dev_pos, dev_pos_buf); } /*int* debug_data = (int *)malloc(numObjects * sizeof(int)); cudaMemcpy(debug_data, dev_particleGridIndices, numObjects * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < numObjects; i++) { std::cout << debug_data[i] << " "; }std::cout << std::endl;*/ // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices dim3 gridBlocks((gridCellCount + blockSize - 1) / blockSize); kernResetIntBuffer << <gridBlocks, blockSize >> > (gridCellCount, dev_gridCellStartIndices, emptyCell); kernResetIntBuffer << <gridBlocks, blockSize >> > (gridCellCount, dev_gridCellEndIndices, emptyCell); kernIdentifyCellStartEnd <<<fullBlocksPerGrid, blockSize >>> ( numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // - Perform velocity updates using neighbor search #ifdef useSharedMemory kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize, blockSize * sizeof(glm::vec3)>> > ( numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, maxSearchRange, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2, isAligned); #else kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > ( numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, maxSearchRange, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2, isAligned); #endif // - Update positions kernUpdatePos <<< fullBlocksPerGrid, blockSize >>>(numObjects, dt, dev_pos, dev_vel2); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: stepSimulationScatteredGrid(dt, true); // - Label each particle with its array index as well as its grid index. // Use 2x width grids // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. } void Boids::endSimulation() { cudaFree(dev_vel1); cudaFree(dev_vel2); cudaFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. cudaFree(dev_particleArrayIndices); cudaFree(dev_particleGridIndices); cudaFree(dev_gridCellStartIndices); cudaFree(dev_gridCellEndIndices); // jack12 2.3 cudaFree(dev_pos_buf); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; std::unique_ptr<int[]>intKeys{ new int[N] }; std::unique_ptr<int[]>intValues{ new int[N] }; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); cudaMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup cudaFree(dev_intKeys); cudaFree(dev_intValues); checkCUDAErrorWithLine("cudaFree failed!"); return; }
e92848c0cfb261f37a37a22672528ab4bea31a9e.hip
// !!! This is a file automatically generated by hipify!!! #include "funset.hpp" #include <iostream> #include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" // reference: C:\ProgramData\NVIDIA Corporation\CUDA Samples\v8.0\0_Simple\matrixMul /* __global__: ;;,3.2 ;void;, ;, gridblock,(<<< >>>); a kernel,(GPUCUDAkernel( ),__global__);*/ template <int BLOCK_SIZE> __global__ static void matrix_mul(const float* A, const float* B, float* C, int wA, int wB) { /* gridDim: ,,, ,,. grid,dim3 blockDim: ,block.dim3, block;,, ; blockIdx: ,; threadblockgrid,blockIdx.x [0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3, blockgrid; threadIdx: ,; threadblock;threadIdx.x, threadIdx.y,threadIdx.z;uint3 ,threadblock */ // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { /* __shared__: __shared____device__ blockblock block__shared____constant__ __shared__extern __shared__CUDA C __shared__CUDA C */ // Declaration of the shared memory array As used to store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory to shared memory; each thread loads one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; /* __syncthreads: CUDA __syncthreads() __syncthreads();block(shared memory)(kernel __syncthreads())clock() clock() __syncthreads()block threadblock thread */ // Synchronize to make sure the matrices are loaded __syncthreads(); /* reference: https://devblogs.nvidia.com/parallelforall/new-compiler-features-cuda-8/ https://stackoverflow.com/questions/22278631/what-does-pragma-unroll-do-exactly-does-it-affect-the-number-of-threads/22279341 #pragma unroll ()pragma unroll #pragma unroll 1 */ #pragma unroll // Multiply the two matrices together; each thread computes one element of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } __global__ static void matrix_mul(const float* A, const float* B, float* C, int colsA, int rowsA, int colsB, int rowsB) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float sum{ 0.f }; for (int t = 0; t < colsA; ++t) { sum += A[y * colsA + t] * B[t * colsB + x]; } C[offset] = sum; } int matrix_mul_gpu(const float* A, const float* B, float* C, int colsA, int rowsA, int colsB, int rowsB, float* elapsed_time) { CHECK(colsA == rowsB); /* hipEvent_t: CUDA event types, CUDAGPU CUDAGPUCUDA GPU*/ hipEvent_t start, stop; // hipEventCreate: hipEventCreate(&start); hipEventCreate(&stop); // hipEventRecord: ,start hipEventRecord(start, 0); size_t lengthA{ colsA * rowsA * sizeof(float) }, lengthB{ colsB * rowsB * sizeof(float) }; size_t lengthC{ rowsA * colsB * sizeof(float) }; float *d_A{ nullptr }, *d_B{ nullptr }, *d_C{ nullptr }; // hipMalloc: hipMalloc(&d_A, lengthA); hipMalloc(&d_B, lengthB); hipMalloc(&d_C, lengthC); /* hipMemcpy: ,: (1). hipMemcpyHostToHost: (2). hipMemcpyHostToDevice: (3). hipMemcpyDeviceToHost: (4). hipMemcpyDeviceToDevice: (5). hipMemcpyDefault: , (CUDA6.0) cudaMemcpy */ hipMemcpy(d_A, A, lengthA, hipMemcpyHostToDevice); hipMemcpy(d_B, B, lengthB, hipMemcpyHostToDevice); //hipMemcpy(d_C, C, lengthC, hipMemcpyHostToDevice); const int block_size{ 32 }; /* dim3: uint33unsigned int dim3 1 */ dim3 dimsA(colsA, rowsA, 1); dim3 dimsB(colsB, rowsB, 1); CHECK(dimsA.x == dimsB.y); //fprintf(stderr, "MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); /* <<< >>>: CUDA,, CUDA,, ;, ,, ;; kernel,kernel, GPU,; API,<<<Dg,Db,Ns,S>>> ,Dgdim3,grid .Dg,gridDg.x*Dg.y*Dg.zblock;Db dim3,block.Db, blockDb.x*Db.y*Db.zthread;Nssize_t, , (extern __shared__);Ns,0;S cudaStream_t,.S,0. */ hipLaunchKernelGGL(( matrix_mul<block_size>) , dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, dimsA.x, dimsB.x); // //matrix_mul<< < grid, threads >> >(d_A, d_B, d_C, colsA, rowsA, colsB, rowsB); /* hipDeviceSynchronize: kernel, , cudaDeviceSynchronize; cudaDeviceSynchronize reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */ //hipDeviceSynchronize(); hipMemcpy(C, d_C, lengthC, hipMemcpyDeviceToHost); // hipFree: cudaMalloc hipFree(d_A); hipFree(d_B); hipFree(d_C); // hipEventRecord: ,stop hipEventRecord(stop, 0); // hipEventSynchronize: hipEventSynchronize(stop); // cudaEventElapseTime: hipEventElapsedTime(elapsed_time, start, stop); // hipEventDestroy: hipEventDestroy(start); hipEventDestroy(stop); return 0; }
e92848c0cfb261f37a37a22672528ab4bea31a9e.cu
#include "funset.hpp" #include <iostream> #include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" // reference: C:\ProgramData\NVIDIA Corporation\CUDA Samples\v8.0\0_Simple\matrixMul /* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在 设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在 设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在 设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符); a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函 数),内核函数必须通过__global__函数类型限定符定义);*/ template <int BLOCK_SIZE> __global__ static void matrix_mul(const float* A, const float* B, float* C, int wA, int wB) { /* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个 变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量. 一个grid为三维,为dim3类型; blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含 了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数, 保存的是线程块中每一维的线程数量; blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用 于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是 [0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型, 包含了一个block在grid中各个维度上的索引信息; threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于 说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果 是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类 型,包含了一个thread在block中各个维度的索引信息 */ // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { /* __shared__: 变量类型限定符;使用__shared__限定符,或者与__device__限 定符连用,此时声明的变量位于block中的共享存储器空间中,与block具有相同 的生命周期,仅可通过block内的所有线程访问;__shared__和__constant__变量 默认为是静态存储;在__shared__前可以加extern关键字,但表示的是变量大小 由执行参数确定;__shared__变量在声明时不能初始化;可以将CUDA C的关键字 __shared__添加到变量声明中,这将使这个变量驻留在共享内存中;CUDA C编译 器对共享内存中的变量与普通变量将分别采取不同的处理方式 */ // Declaration of the shared memory array As used to store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory to shared memory; each thread loads one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; /* __syncthreads: 对线程块中的线程进行同步;CUDA架构将确保,除非线程块 中的每个线程都执行了__syncthreads(),否则没有任何线程能执行 __syncthreads()之后的指令;在同一个block中的线程通过共享存储器(shared memory)交换数据,并通过栅栏同步(可以在kernel函数中需要同步的位置调用 __syncthreads()函数)保证线程间能够正确地共享数据;使用clock()函数计时, 在内核函数中要测量的一段代码的开始和结束的位置分别调用一次clock()函数, 并将结果记录下来。由于调用__syncthreads()函数后,一个block中的所有 thread需要的时间是相同的,因此只需要记录每个block执行需要的时间就行了, 而不需要记录每个thread的时间 */ // Synchronize to make sure the matrices are loaded __syncthreads(); /* reference: https://devblogs.nvidia.com/parallelforall/new-compiler-features-cuda-8/ https://stackoverflow.com/questions/22278631/what-does-pragma-unroll-do-exactly-does-it-affect-the-number-of-threads/22279341 编译器默认情况下将循环展开小的次数,#pragma unroll能够指定循环 以多少次展开(程序员必须保证按这个展开是正确的),pragma unroll 后 必须紧接着处理的循环,可选择在其后接一个数字,指定必须展开多少次循环, #pragma unroll 1 表示禁止编译器将循环展开。如果没指定次数,对于常数 次的循环,循环将完全展开,对于不确定次数的循环,循环将不会展开。 */ #pragma unroll // Multiply the two matrices together; each thread computes one element of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } __global__ static void matrix_mul(const float* A, const float* B, float* C, int colsA, int rowsA, int colsB, int rowsB) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float sum{ 0.f }; for (int t = 0; t < colsA; ++t) { sum += A[y * colsA + t] * B[t * colsB + x]; } C[offset] = sum; } int matrix_mul_gpu(const float* A, const float* B, float* C, int colsA, int rowsA, int colsB, int rowsB, float* elapsed_time) { CHECK(colsA == rowsB); /* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某 个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在 GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时*/ cudaEvent_t start, stop; // cudaEventCreate: 创建一个事件对象,异步启动 cudaEventCreate(&start); cudaEventCreate(&stop); // cudaEventRecord: 记录一个事件,异步启动,start记录起始时间 cudaEventRecord(start, 0); size_t lengthA{ colsA * rowsA * sizeof(float) }, lengthB{ colsB * rowsB * sizeof(float) }; size_t lengthC{ rowsA * colsB * sizeof(float) }; float *d_A{ nullptr }, *d_B{ nullptr }, *d_C{ nullptr }; // cudaMalloc: 在设备端分配内存 cudaMalloc(&d_A, lengthA); cudaMalloc(&d_B, lengthB); cudaMalloc(&d_C, lengthC); /* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一: (1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端 (2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端 (3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端 (4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端 (5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持 统一虚拟寻址(CUDA6.0及以上版本) cudaMemcpy函数对于主机是同步的 */ cudaMemcpy(d_A, A, lengthA, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, lengthB, cudaMemcpyHostToDevice); //cudaMemcpy(d_C, C, lengthC, cudaMemcpyHostToDevice); const int block_size{ 32 }; /* dim3: 基于uint3定义的内置矢量类型,相当于由3个unsigned int类型组成的 结构体,可表示一个三维数组,在定义dim3类型变量时,凡是没有赋值的元素都 会被赋予默认值1 */ dim3 dimsA(colsA, rowsA, 1); dim3 dimsB(colsB, rowsB, 1); CHECK(dimsA.x == dimsB.y); //fprintf(stderr, "MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); /* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参 数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何 组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何 启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函 数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须 先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在 GPU计算时会发生错误,例如越界等; 使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>> 的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个 维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是 一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个 block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调 用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组 (extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为 cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */ matrix_mul<block_size> <<< grid, threads >>>(d_A, d_B, d_C, dimsA.x, dimsB.x); // 运行较快 //matrix_mul<< < grid, threads >> >(d_A, d_B, d_C, colsA, rowsA, colsB, rowsB); /* cudaDeviceSynchronize: kernel的启动是异步的, 为了定位它是否出错, 一 般需要加上cudaDeviceSynchronize函数进行同步; 将会一直处于阻塞状态,直到 前面所有请求的任务已经被全部执行完毕,如果前面执行的某个任务失败,将会 返回一个错误;当程序中有多个流,并且流之间在某一点需要通信时,那就必须 在这一点处加上同步的语句,即cudaDeviceSynchronize;异步启动 reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */ //cudaDeviceSynchronize(); cudaMemcpy(C, d_C, lengthC, cudaMemcpyDeviceToHost); // cudaFree: 释放设备上由cudaMalloc函数分配的内存 cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间 cudaEventRecord(stop, 0); // cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动 cudaEventSynchronize(stop); // cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动 cudaEventElapsedTime(elapsed_time, start, stop); // cudaEventDestroy: 销毁事件对象,异步启动 cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
e2ae5120e39303532d9e2f0e9b51d65399d23c62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/model.h" #include "flexflow/utils/cuda_helper.h" namespace FlexFlow { // declare Legion names using Legion::ArgumentMap; using Legion::Context; using Legion::coord_t; using Legion::Domain; using Legion::FutureMap; using Legion::IndexLauncher; using Legion::InlineLauncher; using Legion::Machine; using Legion::Memory; using Legion::PhysicalRegion; using Legion::Predicate; using Legion::Rect; using Legion::RegionRequirement; using Legion::Runtime; using Legion::Task; using Legion::TaskArgument; using Legion::TaskLauncher; void Op::inner_measure_operator_cost(Simulator *sim, std::function<void()> const &forward, std::function<void()> const &backward, CostMetrics &cost_metrics) const { hipStream_t stream; checkCUDA(get_legion_stream(&stream)); // measure forward time checkCUDA(hipDeviceSynchronize()); for (int i = 0; i < sim->warmup_times + sim->repeat_times; i++) { if (i == sim->warmup_times) { checkCUDA(hipEventRecord(sim->start_event, stream)); } forward(); } checkCUDA(hipEventRecord(sim->end_event, stream)); checkCUDA(hipEventSynchronize(sim->end_event)); float milliseconds; hipEventElapsedTime(&milliseconds, sim->start_event, sim->end_event); cost_metrics.forward_time = milliseconds / sim->repeat_times; // measure backward time if (sim->computationMode == COMP_MODE_TRAINING) { checkCUDA(hipDeviceSynchronize()); for (int i = 0; i < sim->warmup_times + sim->repeat_times; i++) { if (i == sim->warmup_times) { checkCUDA(hipEventRecord(sim->start_event, stream)); } backward(); } checkCUDA(hipEventRecord(sim->end_event, stream)); checkCUDA(hipEventSynchronize(sim->end_event)); hipEventElapsedTime(&milliseconds, sim->start_event, sim->end_event); cost_metrics.backward_time = milliseconds / sim->repeat_times; } else { cost_metrics.backward_time = 0.0f; } } FFHandler UtilityTasks::init_cuda_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 0); assert(task->local_arglen == sizeof(FFInitInfo)); FFInitInfo const *info = (FFInitInfo *)task->local_args; // assert(task->arglen == sizeof(size_t)); // size_t workSpaceSize = *(const size_t*) task->args; printf("workSpaceSize (%zu MB)\n", info->workSpaceSize / 1024 / 1024); FFHandler handle; handle.workSpaceSize = info->workSpaceSize; handle.offload_reserve_space_size = info->offload_reserve_space_size; handle.quantization_type = info->quantization_type; handle.allowTensorOpMathConversion = info->allowTensorOpMathConversion; checkCUDA(hipblasCreate(&handle.blas)); if (handle.allowTensorOpMathConversion) { checkCUDA(cublasSetMathMode(handle.blas, CUBLAS_TENSOR_OP_MATH)); } checkCUDNN(cudnnCreate(&handle.dnn)); // #ifdef FF_USE_NCCL // checkNCCL(ncclCommInitRank(&handle.nccl, info->allRanks, info->ncclId, // info->myRank)); fprintf(stderr, "handle.nccl(%p)\n", handle.nccl); // #endif // std::set<Memory> memFB; // assert(memFB.size() == 1); // assert(memFB.begin()->kind() == Memory::GPU_FB_MEM); // Realm::MemoryImpl* memImpl = // Realm::get_runtime()->get_memory_impl(*memFB.begin()); // Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl; // off_t offset = memFBImpl->alloc_bytes(workSpaceSize); // handle.workSpace = memFBImpl->get_direct_ptr(offset, 0); { // allocate memory for workspace Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine()) .only_kind(Memory::GPU_FB_MEM) .best_affinity_to(task->target_proc) .first(); Realm::Rect<1, coord_t> bounds( Realm::Point<1, coord_t>(0), Realm::Point<1, coord_t>(handle.workSpaceSize - 1)); std::vector<size_t> field_sizes; field_sizes.push_back(sizeof(char)); Realm::RegionInstance workspaceInst; Realm::RegionInstance::create_instance(workspaceInst, gpu_mem, bounds, field_sizes, 0, Realm::ProfilingRequestSet()) .wait(); handle.workSpace = workspaceInst.pointer_untyped(0, sizeof(char)); } if (handle.offload_reserve_space_size > 0) { // allocate memory for offload reserve space Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine()) .only_kind(Memory::GPU_FB_MEM) .best_affinity_to(task->target_proc) .first(); Realm::Rect<1, coord_t> bounds( Realm::Point<1, coord_t>(0), Realm::Point<1, coord_t>(handle.offload_reserve_space_size - 1)); std::vector<size_t> field_sizes; field_sizes.push_back(sizeof(char)); Realm::RegionInstance workspaceInst; Realm::RegionInstance::create_instance(workspaceInst, gpu_mem, bounds, field_sizes, 0, Realm::ProfilingRequestSet()) .wait(); handle.offload_reserve_space = workspaceInst.pointer_untyped(0, sizeof(char)); } else { handle.offload_reserve_space = nullptr; } // checkCUDA(hipMalloc(&handle.workSpace, handle.workSpaceSize)); #ifdef FF_USE_NCCL handle.ncclComm = NULL; #endif return handle; } void UtilityTasks::dummy_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) {} __inline__ int calc_offset(int c, int y, int x, int yscale, int xscale) { return (c * yscale * xscale + y * xscale + x); } void nearest_neighbor(unsigned char *image, unsigned char *buffer, int height, int width, int orig_height, int orig_width, float height_scale, float width_scale) { // Note buffer is in HWC layout while image is in CHW layout for (int y = 0; y < height; y++) { int y0 = ::min(static_cast<int>(roundf(y * height_scale)), orig_height - 1); for (int x = 0; x < width; x++) { int x0 = ::min(static_cast<int>(roundf(x * width_scale)), orig_width - 1); for (int c = 0; c < 3; c++) { int origOffset = calc_offset(y0, x0, c, orig_width, 3); int offset = calc_offset(c, y, x, height, width); image[offset] = buffer[origOffset]; } } } } /* regions[0]: image (unsigned char) regions[1]: label (int) */ void UtilityTasks::load_images_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { #ifdef USE_DATA_LOADER assert(regions.size() == 2); assert(task->regions.size() == 2); AccessorWO<unsigned char, 3> const acc_image(regions[0], FID_DATA); AccessorWO<int, 1> const acc_label(regions[1], FID_DATA); Rect<3> rect_image; Rect<1> rect_label; unsigned char *buffer = (unsigned char *)malloc(3000 * 3000 * 3); rect_image = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); rect_label = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); assert(acc_image.accessor.is_dense_arbitrary(rect_image)); assert(acc_label.accessor.is_dense_arbitrary(rect_label)); unsigned char *image_ptr = acc_image.ptr(rect_image.lo); int *label_ptr = acc_label.ptr(rect_label.lo); DataLoadMeta const *meta = (DataLoadMeta *)task->local_args; int height = rect_image.hi[0] - rect_image.lo[0] + 1; int width = rect_image.hi[1] - rect_image.lo[1] + 1; int numImages = (rect_image.hi[2] - rect_image.lo[2] + 1) / 3; assert((rect_image.hi[2] - rect_image.lo[2] + 1) % 3 == 0); assert(meta->numImages == numImages); for (int idx = 0; idx < numImages; idx++) { label_ptr[idx] = meta->labels[idx]; FILE *file; if ((file = fopen(meta->files[idx], "rb")) == NULL) { fprintf(stderr, "cannot open %s\n", meta->files[idx]); continue; } struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; cinfo.err = jpeg_std_error(&jerr); jpeg_create_decompress(&cinfo); jpeg_stdio_src(&cinfo, file); jpeg_read_header(&cinfo, TRUE); jpeg_start_decompress(&cinfo); if (cinfo.output_components != 3) { printf(stderr, "skip non-RGB file %s\n", meta->files[idx]); jpeg_finish_decompress(&cinfo); jpeg_destroy_decompress(&cinfo); fclose(file); continue; } int origHeight = cinfo.output_height; int origWidth = cinfo.output_width; int rowStride = width * cinfo.output_components; JSAMPARRAY array; array = (*cinfo.mem->alloc_sarray)( (j_common_ptr)&cinfo, JPOOL_IMAGE, rowStride, 1); while (cinfo.output_scanline < cinfo.output_height) { jpeg_read_scanlines(&cinfo, buffer, 1); memcpy(buffer, array[0], rowStride * sizeof(JSAMPLE)); buffer += rowStride; } jpeg_finish_decompress(&cinfo); jpeg_destroy_decompress(&cinfo); fclose(file); float heightScale = static_cast<float>(origHeight) / height; float widthScale = static_cast<float>(origWidth) / width; nearest_neighbor(image_ptr, buffer, height, width, origHeight, origWidth, heightScale, widthScale); image_ptr += 3 * height * width; } free(buffer); #endif } __global__ void apply_normalize(float *tensor_ptr, unsigned char const *rgb_ptr, size_t size, size_t hxw) { float const mean[3] = {0.485, 0.456, 0.406}; float const var[3] = {0.229, 0.224, 0.225}; CUDA_KERNEL_LOOP(i, size) { // decide the color of the current position by assuming NCHW layout int c = (i / hxw) % 3; tensor_ptr[i] = (static_cast<float>(rgb_ptr[i]) / 256 - mean[c]) / var[c]; } } /* regions[0](O): input_images regions[1](I): input_rgb */ __host__ void UtilityTasks::normalize_images_task( Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); AccessorWO<float, 3> const acc_tensor(regions[0], FID_DATA); AccessorRO<unsigned char, 3> const acc_rgb(regions[1], FID_DATA); Rect<3> rect_tensor, rect_rgb; rect_tensor = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); rect_rgb = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); assert(acc_tensor.accessor.is_dense_arbitrary(rect_tensor)); assert(acc_rgb.accessor.is_dense_arbitrary(rect_rgb)); assert(rect_tensor == rect_rgb); size_t w = rect_tensor.hi[0] - rect_tensor.lo[0] + 1; size_t h = rect_tensor.hi[1] - rect_tensor.lo[1] + 1; float *tensor_ptr = acc_tensor.ptr(rect_tensor.lo); unsigned char const *rgb_ptr = acc_rgb.ptr(rect_rgb.lo); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipLaunchKernelGGL(( apply_normalize), dim3(GET_BLOCKS(rect_tensor.volume())), dim3(CUDA_NUM_THREADS), 0, stream, tensor_ptr, rgb_ptr, rect_tensor.volume(), h * w); } __global__ void init_image_kernel(float *ptr, coord_t size) { const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { ptr[tid] = 1.0f; } } __global__ void init_label_kernel(int *ptr, coord_t size) { const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { ptr[tid] = 1; } } void UtilityTasks::init_images_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { int const BLKSIZE = 512; AccessorWO<float, 3> const acc_image(regions[0], FID_DATA); Rect<3> rect_image; rect_image = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); assert(acc_image.accessor.is_dense_arbitrary(rect_image)); float *image_ptr = acc_image.ptr(rect_image.lo); int num_blocks = (rect_image.volume() + BLKSIZE - 1) / BLKSIZE; hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipLaunchKernelGGL(( init_image_kernel), dim3(num_blocks), dim3(BLKSIZE), 0, stream, image_ptr, rect_image.volume()); } void UtilityTasks::init_labels_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { int const BLKSIZE = 512; AccessorWO<int, 1> const acc_label(regions[0], FID_DATA); Rect<1> rect_label; rect_label = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); assert(acc_label.accessor.is_dense_arbitrary(rect_label)); int *label_ptr = acc_label.ptr(rect_label.lo); int num_blocks = (rect_label.volume() + BLKSIZE - 1) / BLKSIZE; hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipLaunchKernelGGL(( init_label_kernel), dim3(num_blocks), dim3(BLKSIZE), 0, stream, label_ptr, rect_label.volume()); } void FFModel::prefetch() { for (size_t i = 0; i < operators.size(); i++) { operators[i]->prefetch(*this); } } }; // namespace FlexFlow
e2ae5120e39303532d9e2f0e9b51d65399d23c62.cu
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/model.h" #include "flexflow/utils/cuda_helper.h" namespace FlexFlow { // declare Legion names using Legion::ArgumentMap; using Legion::Context; using Legion::coord_t; using Legion::Domain; using Legion::FutureMap; using Legion::IndexLauncher; using Legion::InlineLauncher; using Legion::Machine; using Legion::Memory; using Legion::PhysicalRegion; using Legion::Predicate; using Legion::Rect; using Legion::RegionRequirement; using Legion::Runtime; using Legion::Task; using Legion::TaskArgument; using Legion::TaskLauncher; void Op::inner_measure_operator_cost(Simulator *sim, std::function<void()> const &forward, std::function<void()> const &backward, CostMetrics &cost_metrics) const { cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); // measure forward time checkCUDA(cudaDeviceSynchronize()); for (int i = 0; i < sim->warmup_times + sim->repeat_times; i++) { if (i == sim->warmup_times) { checkCUDA(cudaEventRecord(sim->start_event, stream)); } forward(); } checkCUDA(cudaEventRecord(sim->end_event, stream)); checkCUDA(cudaEventSynchronize(sim->end_event)); float milliseconds; cudaEventElapsedTime(&milliseconds, sim->start_event, sim->end_event); cost_metrics.forward_time = milliseconds / sim->repeat_times; // measure backward time if (sim->computationMode == COMP_MODE_TRAINING) { checkCUDA(cudaDeviceSynchronize()); for (int i = 0; i < sim->warmup_times + sim->repeat_times; i++) { if (i == sim->warmup_times) { checkCUDA(cudaEventRecord(sim->start_event, stream)); } backward(); } checkCUDA(cudaEventRecord(sim->end_event, stream)); checkCUDA(cudaEventSynchronize(sim->end_event)); cudaEventElapsedTime(&milliseconds, sim->start_event, sim->end_event); cost_metrics.backward_time = milliseconds / sim->repeat_times; } else { cost_metrics.backward_time = 0.0f; } } FFHandler UtilityTasks::init_cuda_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 0); assert(task->local_arglen == sizeof(FFInitInfo)); FFInitInfo const *info = (FFInitInfo *)task->local_args; // assert(task->arglen == sizeof(size_t)); // size_t workSpaceSize = *(const size_t*) task->args; printf("workSpaceSize (%zu MB)\n", info->workSpaceSize / 1024 / 1024); FFHandler handle; handle.workSpaceSize = info->workSpaceSize; handle.offload_reserve_space_size = info->offload_reserve_space_size; handle.quantization_type = info->quantization_type; handle.allowTensorOpMathConversion = info->allowTensorOpMathConversion; checkCUDA(cublasCreate(&handle.blas)); if (handle.allowTensorOpMathConversion) { checkCUDA(cublasSetMathMode(handle.blas, CUBLAS_TENSOR_OP_MATH)); } checkCUDNN(cudnnCreate(&handle.dnn)); // #ifdef FF_USE_NCCL // checkNCCL(ncclCommInitRank(&handle.nccl, info->allRanks, info->ncclId, // info->myRank)); fprintf(stderr, "handle.nccl(%p)\n", handle.nccl); // #endif // std::set<Memory> memFB; // assert(memFB.size() == 1); // assert(memFB.begin()->kind() == Memory::GPU_FB_MEM); // Realm::MemoryImpl* memImpl = // Realm::get_runtime()->get_memory_impl(*memFB.begin()); // Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl; // off_t offset = memFBImpl->alloc_bytes(workSpaceSize); // handle.workSpace = memFBImpl->get_direct_ptr(offset, 0); { // allocate memory for workspace Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine()) .only_kind(Memory::GPU_FB_MEM) .best_affinity_to(task->target_proc) .first(); Realm::Rect<1, coord_t> bounds( Realm::Point<1, coord_t>(0), Realm::Point<1, coord_t>(handle.workSpaceSize - 1)); std::vector<size_t> field_sizes; field_sizes.push_back(sizeof(char)); Realm::RegionInstance workspaceInst; Realm::RegionInstance::create_instance(workspaceInst, gpu_mem, bounds, field_sizes, 0, Realm::ProfilingRequestSet()) .wait(); handle.workSpace = workspaceInst.pointer_untyped(0, sizeof(char)); } if (handle.offload_reserve_space_size > 0) { // allocate memory for offload reserve space Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine()) .only_kind(Memory::GPU_FB_MEM) .best_affinity_to(task->target_proc) .first(); Realm::Rect<1, coord_t> bounds( Realm::Point<1, coord_t>(0), Realm::Point<1, coord_t>(handle.offload_reserve_space_size - 1)); std::vector<size_t> field_sizes; field_sizes.push_back(sizeof(char)); Realm::RegionInstance workspaceInst; Realm::RegionInstance::create_instance(workspaceInst, gpu_mem, bounds, field_sizes, 0, Realm::ProfilingRequestSet()) .wait(); handle.offload_reserve_space = workspaceInst.pointer_untyped(0, sizeof(char)); } else { handle.offload_reserve_space = nullptr; } // checkCUDA(cudaMalloc(&handle.workSpace, handle.workSpaceSize)); #ifdef FF_USE_NCCL handle.ncclComm = NULL; #endif return handle; } void UtilityTasks::dummy_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) {} __inline__ int calc_offset(int c, int y, int x, int yscale, int xscale) { return (c * yscale * xscale + y * xscale + x); } void nearest_neighbor(unsigned char *image, unsigned char *buffer, int height, int width, int orig_height, int orig_width, float height_scale, float width_scale) { // Note buffer is in HWC layout while image is in CHW layout for (int y = 0; y < height; y++) { int y0 = std::min(static_cast<int>(roundf(y * height_scale)), orig_height - 1); for (int x = 0; x < width; x++) { int x0 = std::min(static_cast<int>(roundf(x * width_scale)), orig_width - 1); for (int c = 0; c < 3; c++) { int origOffset = calc_offset(y0, x0, c, orig_width, 3); int offset = calc_offset(c, y, x, height, width); image[offset] = buffer[origOffset]; } } } } /* regions[0]: image (unsigned char) regions[1]: label (int) */ void UtilityTasks::load_images_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { #ifdef USE_DATA_LOADER assert(regions.size() == 2); assert(task->regions.size() == 2); AccessorWO<unsigned char, 3> const acc_image(regions[0], FID_DATA); AccessorWO<int, 1> const acc_label(regions[1], FID_DATA); Rect<3> rect_image; Rect<1> rect_label; unsigned char *buffer = (unsigned char *)malloc(3000 * 3000 * 3); rect_image = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); rect_label = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); assert(acc_image.accessor.is_dense_arbitrary(rect_image)); assert(acc_label.accessor.is_dense_arbitrary(rect_label)); unsigned char *image_ptr = acc_image.ptr(rect_image.lo); int *label_ptr = acc_label.ptr(rect_label.lo); DataLoadMeta const *meta = (DataLoadMeta *)task->local_args; int height = rect_image.hi[0] - rect_image.lo[0] + 1; int width = rect_image.hi[1] - rect_image.lo[1] + 1; int numImages = (rect_image.hi[2] - rect_image.lo[2] + 1) / 3; assert((rect_image.hi[2] - rect_image.lo[2] + 1) % 3 == 0); assert(meta->numImages == numImages); for (int idx = 0; idx < numImages; idx++) { label_ptr[idx] = meta->labels[idx]; FILE *file; if ((file = fopen(meta->files[idx], "rb")) == NULL) { fprintf(stderr, "cannot open %s\n", meta->files[idx]); continue; } struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; cinfo.err = jpeg_std_error(&jerr); jpeg_create_decompress(&cinfo); jpeg_stdio_src(&cinfo, file); jpeg_read_header(&cinfo, TRUE); jpeg_start_decompress(&cinfo); if (cinfo.output_components != 3) { printf(stderr, "skip non-RGB file %s\n", meta->files[idx]); jpeg_finish_decompress(&cinfo); jpeg_destroy_decompress(&cinfo); fclose(file); continue; } int origHeight = cinfo.output_height; int origWidth = cinfo.output_width; int rowStride = width * cinfo.output_components; JSAMPARRAY array; array = (*cinfo.mem->alloc_sarray)( (j_common_ptr)&cinfo, JPOOL_IMAGE, rowStride, 1); while (cinfo.output_scanline < cinfo.output_height) { jpeg_read_scanlines(&cinfo, buffer, 1); memcpy(buffer, array[0], rowStride * sizeof(JSAMPLE)); buffer += rowStride; } jpeg_finish_decompress(&cinfo); jpeg_destroy_decompress(&cinfo); fclose(file); float heightScale = static_cast<float>(origHeight) / height; float widthScale = static_cast<float>(origWidth) / width; nearest_neighbor(image_ptr, buffer, height, width, origHeight, origWidth, heightScale, widthScale); image_ptr += 3 * height * width; } free(buffer); #endif } __global__ void apply_normalize(float *tensor_ptr, unsigned char const *rgb_ptr, size_t size, size_t hxw) { float const mean[3] = {0.485, 0.456, 0.406}; float const var[3] = {0.229, 0.224, 0.225}; CUDA_KERNEL_LOOP(i, size) { // decide the color of the current position by assuming NCHW layout int c = (i / hxw) % 3; tensor_ptr[i] = (static_cast<float>(rgb_ptr[i]) / 256 - mean[c]) / var[c]; } } /* regions[0](O): input_images regions[1](I): input_rgb */ __host__ void UtilityTasks::normalize_images_task( Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); AccessorWO<float, 3> const acc_tensor(regions[0], FID_DATA); AccessorRO<unsigned char, 3> const acc_rgb(regions[1], FID_DATA); Rect<3> rect_tensor, rect_rgb; rect_tensor = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); rect_rgb = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); assert(acc_tensor.accessor.is_dense_arbitrary(rect_tensor)); assert(acc_rgb.accessor.is_dense_arbitrary(rect_rgb)); assert(rect_tensor == rect_rgb); size_t w = rect_tensor.hi[0] - rect_tensor.lo[0] + 1; size_t h = rect_tensor.hi[1] - rect_tensor.lo[1] + 1; float *tensor_ptr = acc_tensor.ptr(rect_tensor.lo); unsigned char const *rgb_ptr = acc_rgb.ptr(rect_rgb.lo); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); apply_normalize<<<GET_BLOCKS(rect_tensor.volume()), CUDA_NUM_THREADS, 0, stream>>>(tensor_ptr, rgb_ptr, rect_tensor.volume(), h * w); } __global__ void init_image_kernel(float *ptr, coord_t size) { const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { ptr[tid] = 1.0f; } } __global__ void init_label_kernel(int *ptr, coord_t size) { const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { ptr[tid] = 1; } } void UtilityTasks::init_images_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { int const BLKSIZE = 512; AccessorWO<float, 3> const acc_image(regions[0], FID_DATA); Rect<3> rect_image; rect_image = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); assert(acc_image.accessor.is_dense_arbitrary(rect_image)); float *image_ptr = acc_image.ptr(rect_image.lo); int num_blocks = (rect_image.volume() + BLKSIZE - 1) / BLKSIZE; cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); init_image_kernel<<<num_blocks, BLKSIZE, 0, stream>>>(image_ptr, rect_image.volume()); } void UtilityTasks::init_labels_task(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { int const BLKSIZE = 512; AccessorWO<int, 1> const acc_label(regions[0], FID_DATA); Rect<1> rect_label; rect_label = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); assert(acc_label.accessor.is_dense_arbitrary(rect_label)); int *label_ptr = acc_label.ptr(rect_label.lo); int num_blocks = (rect_label.volume() + BLKSIZE - 1) / BLKSIZE; cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); init_label_kernel<<<num_blocks, BLKSIZE, 0, stream>>>(label_ptr, rect_label.volume()); } void FFModel::prefetch() { for (size_t i = 0; i < operators.size(); i++) { operators[i]->prefetch(*this); } } }; // namespace FlexFlow
ea50614565e8e8b78a9688a93a22c1e3568a9081.hip
// !!! This is a file automatically generated by hipify!!! #include "HelperFunctions.cuh" float f_zBeam(track *tr) { return -( tr->m_x0 * tr->m_tx + tr->m_y0 * tr->m_ty ) / ( tr->m_tx * tr->m_tx + tr->m_ty * tr->m_ty ); } float f_r2AtZ( float z , track *tr) { float xx = tr->m_x0 + z * tr->m_tx; float yy = tr->m_y0 + z * tr->m_ty; return xx*xx + yy * yy; } void f_solve (track *tr) { float den = ( tr->m_sz2 * tr->m_s0 - tr->m_sz * tr->m_sz ); if ( fabs(den) < 10e-10 ) den = 1.; tr->m_tx = ( tr->m_sxz * tr->m_s0 - tr->m_sx * tr->m_sz ) / den; tr->m_x0 = ( tr->m_sx * tr->m_sz2 - tr->m_sxz * tr->m_sz ) / den; den = ( tr->m_uz2 * tr->m_u0 - tr->m_uz * tr->m_uz ); if ( fabs(den) < 10e-10 ) den = 1.; tr->m_ty = ( tr->m_uyz * tr->m_u0 - tr->m_uy * tr->m_uz ) / den; tr->m_y0 = ( tr->m_uy * tr->m_uz2 - tr->m_uyz * tr->m_uz ) / den; } void f_addHit ( track *tr, int offset, int Z ) { // track_ids[offset] = tr->internalId; tr->trackHitsNum++; float z = hit_Zs[offset]; float x = hit_Xs[offset]; // float f_w = hit_Ws[offset]; float wz = f_w * z; tr->m_s0 += f_w; tr->m_sx += f_w * x; tr->m_sz += wz; tr->m_sxz += wz * x; tr->m_sz2 += wz * z; float y = hit_Ys[offset]; tr->m_u0 += f_w; tr->m_uy += f_w * y; tr->m_uz += wz; tr->m_uyz += wz * y; tr->m_uz2 += wz * z; if( tr->trackHitsNum > 1 ) f_solve(tr); // tr->hits.push_back(offset); tr->hits[Z] = offset; tr->lastZ = Z; tr->activity = 2; } void f_setTrack(track *tr, int hit0_offset, int hit1_offset, int hit0_Z, int hit1_Z){ tr->hits = (int*) ((void**) hipMalloc(sens_num * sizeof(int))); // track_ids[hit0_offset] = tr->internalId; tr->trackHitsNum = 1; float z = hit_Zs[hit0_offset]; float x = hit_Xs[hit0_offset]; // float f_w = hit_Ws[hit0_offset]; float wz = f_w * z; tr->m_s0 = f_w; tr->m_sx = f_w * x; tr->m_sz = wz; tr->m_sxz = wz * x; tr->m_sz2 = wz * z; float y = hit_Ys[hit0_offset]; tr->m_u0 = f_w; tr->m_uy = f_w * y; tr->m_uz = wz; tr->m_uyz = wz * y; tr->m_uz2 = wz * z; // TODO: Remove when not needed // tr->hits.push_back(hit0_offset); tr->hits[hit0_Z] = hit0_offset; f_addHit (tr, hit1_offset, hit1_Z); } float f_chi2Hit( float x, float y, float hitX, float hitY, float hitW){ float dx = x - hitX; float dy = y - hitY; return dx * dx * (hitW) + dy * dy * (hitW); } float f_xAtHit(track *tr, float z ) { return tr->m_x0 + tr->m_tx * z; } float f_yAtHit( track *tr, float z ) { return tr->m_y0 + tr->m_ty * z; } float f_chi2Track(track *tr, int offset) { float z = hit_Zs[offset]; return f_chi2Hit( f_xAtHit( tr, z ), f_yAtHit(tr, z ), hit_Xs[offset], hit_Ys[offset], hit_Ws[offset]); } float f_chi2(track *t) { float ch = 0.0; int nDoF = -4; int hitNumber; for (int i=0; i<t->hits.size(); i++){ hitNumber = t->hits[i]; ch += f_chi2Track(t, hitNumber); nDoF += 2; } return ch/nDoF; } bool f_addHitsOnSensor( f_sensorInfo *sensor, float xTol, float maxChi2, track *tr, int eventId ) { if (sensor->hitsNum == 0) return false; int offset = eventId * max_hits; float xGuess = f_xAtHit(tr, sensor->z) - xTol - 1; int lastHit = sensor->startPosition + sensor->hitsNum - 1; if(hit_Xs[offset + lastHit] < xGuess) return false; int hitStart = sensor->startPosition; unsigned int step = sensor->hitsNum; while ( step > 2 ) { step = step/2; if (hit_Xs[offset + hitStart + step] < xGuess) hitStart += step; } bool added = false; int tmpOffset = 0; float xPred; for(int iH=hitStart; iH<=lastHit; ++iH){ tmpOffset = offset + iH; xPred = f_xAtHit(tr, hit_Zs[tmpOffset]); if ( hit_Xs[tmpOffset] + xTol < xPred ) continue; if ( hit_Xs[tmpOffset] - xTol > xPred ) break; if ( f_chi2Track(tr, tmpOffset) < maxChi2 ) { f_addHit(tr, tmpOffset, 0); // *usedHit = tmpOffset; - Used hits are tagged by the end of the algorithm, not before. added = true; } } return added; } void f_removeHit(track *tr, int worstHitOffset){ tr->trackHitsNum--; float z = hit_Zs[worstHitOffset]; // float f_w = hit_Ws[worstHitOffset]; float x = hit_Xs[worstHitOffset]; float wz = f_w * z; tr->m_s0 -= f_w; tr->m_sx -= f_w * x; tr->m_sz -= wz; tr->m_sxz -= wz * x; tr->m_sz2 -= wz * z; float y = hit_Ys[worstHitOffset]; tr->m_u0 -= f_w; tr->m_uy -= f_w * y; tr->m_uz -= wz; tr->m_uyz -= wz * y; tr->m_uz2 -= wz * z; vector<int>::iterator it = find(tr->hits.begin(), tr->hits.end(), worstHitOffset); tr->hits.erase(it); if( tr->trackHitsNum > 1 ) f_solve(tr); } //== Remove the worst hit until all chi2 are good void f_removeWorstHit(track* tr) { float topChi2 = 1.e9; int worstHitOffset; while( topChi2 > f_m_maxChi2PerHit ) { topChi2 = 0.0; // This for loop gets the worst hit for (int i=0; i<tr->hits.size(); i++){ float myChi2 = f_chi2Track(tr, tr->hits[i]); if (myChi2 > topChi2){ topChi2 = myChi2; worstHitOffset = tr->hits[i]; } } // If it's bad, we remove it if ( topChi2 > f_m_maxChi2PerHit ) { // hit_isUseds[worstHitOffset] = 0; // It has still not been added to isUseds, no need to do this :) f_removeHit(tr, worstHitOffset); // This changes the chi2 of the track, which is why } // And the algorithm goes on... ? // Every hit with chi2 > maxChi2 will be removed... is this the desired behaviour? // -> yes, read description above } } bool f_all3SensorsAreDifferent(track *t) { float s0 = hit_sensorNums[t->hits[0]]; float s1 = hit_sensorNums[t->hits[1]]; float s2 = hit_sensorNums[t->hits[2]]; if ( s0 == s1 ) return false; if ( s0 == s2 ) return false; if ( s1 == s2 ) return false; return true; } int f_nbUnused(track *t) { int nn = 0; for (vector<int>::iterator it = t->hits.begin(); it != t->hits.end(); ++it){ if (!hit_isUseds[(*it)]) ++nn; } return nn; }
ea50614565e8e8b78a9688a93a22c1e3568a9081.cu
#include "HelperFunctions.cuh" float f_zBeam(track *tr) { return -( tr->m_x0 * tr->m_tx + tr->m_y0 * tr->m_ty ) / ( tr->m_tx * tr->m_tx + tr->m_ty * tr->m_ty ); } float f_r2AtZ( float z , track *tr) { float xx = tr->m_x0 + z * tr->m_tx; float yy = tr->m_y0 + z * tr->m_ty; return xx*xx + yy * yy; } void f_solve (track *tr) { float den = ( tr->m_sz2 * tr->m_s0 - tr->m_sz * tr->m_sz ); if ( fabs(den) < 10e-10 ) den = 1.; tr->m_tx = ( tr->m_sxz * tr->m_s0 - tr->m_sx * tr->m_sz ) / den; tr->m_x0 = ( tr->m_sx * tr->m_sz2 - tr->m_sxz * tr->m_sz ) / den; den = ( tr->m_uz2 * tr->m_u0 - tr->m_uz * tr->m_uz ); if ( fabs(den) < 10e-10 ) den = 1.; tr->m_ty = ( tr->m_uyz * tr->m_u0 - tr->m_uy * tr->m_uz ) / den; tr->m_y0 = ( tr->m_uy * tr->m_uz2 - tr->m_uyz * tr->m_uz ) / den; } void f_addHit ( track *tr, int offset, int Z ) { // track_ids[offset] = tr->internalId; tr->trackHitsNum++; float z = hit_Zs[offset]; float x = hit_Xs[offset]; // float f_w = hit_Ws[offset]; float wz = f_w * z; tr->m_s0 += f_w; tr->m_sx += f_w * x; tr->m_sz += wz; tr->m_sxz += wz * x; tr->m_sz2 += wz * z; float y = hit_Ys[offset]; tr->m_u0 += f_w; tr->m_uy += f_w * y; tr->m_uz += wz; tr->m_uyz += wz * y; tr->m_uz2 += wz * z; if( tr->trackHitsNum > 1 ) f_solve(tr); // tr->hits.push_back(offset); tr->hits[Z] = offset; tr->lastZ = Z; tr->activity = 2; } void f_setTrack(track *tr, int hit0_offset, int hit1_offset, int hit0_Z, int hit1_Z){ tr->hits = (int*) ((void**) cudaMalloc(sens_num * sizeof(int))); // track_ids[hit0_offset] = tr->internalId; tr->trackHitsNum = 1; float z = hit_Zs[hit0_offset]; float x = hit_Xs[hit0_offset]; // float f_w = hit_Ws[hit0_offset]; float wz = f_w * z; tr->m_s0 = f_w; tr->m_sx = f_w * x; tr->m_sz = wz; tr->m_sxz = wz * x; tr->m_sz2 = wz * z; float y = hit_Ys[hit0_offset]; tr->m_u0 = f_w; tr->m_uy = f_w * y; tr->m_uz = wz; tr->m_uyz = wz * y; tr->m_uz2 = wz * z; // TODO: Remove when not needed // tr->hits.push_back(hit0_offset); tr->hits[hit0_Z] = hit0_offset; f_addHit (tr, hit1_offset, hit1_Z); } float f_chi2Hit( float x, float y, float hitX, float hitY, float hitW){ float dx = x - hitX; float dy = y - hitY; return dx * dx * (hitW) + dy * dy * (hitW); } float f_xAtHit(track *tr, float z ) { return tr->m_x0 + tr->m_tx * z; } float f_yAtHit( track *tr, float z ) { return tr->m_y0 + tr->m_ty * z; } float f_chi2Track(track *tr, int offset) { float z = hit_Zs[offset]; return f_chi2Hit( f_xAtHit( tr, z ), f_yAtHit(tr, z ), hit_Xs[offset], hit_Ys[offset], hit_Ws[offset]); } float f_chi2(track *t) { float ch = 0.0; int nDoF = -4; int hitNumber; for (int i=0; i<t->hits.size(); i++){ hitNumber = t->hits[i]; ch += f_chi2Track(t, hitNumber); nDoF += 2; } return ch/nDoF; } bool f_addHitsOnSensor( f_sensorInfo *sensor, float xTol, float maxChi2, track *tr, int eventId ) { if (sensor->hitsNum == 0) return false; int offset = eventId * max_hits; float xGuess = f_xAtHit(tr, sensor->z) - xTol - 1; int lastHit = sensor->startPosition + sensor->hitsNum - 1; if(hit_Xs[offset + lastHit] < xGuess) return false; int hitStart = sensor->startPosition; unsigned int step = sensor->hitsNum; while ( step > 2 ) { step = step/2; if (hit_Xs[offset + hitStart + step] < xGuess) hitStart += step; } bool added = false; int tmpOffset = 0; float xPred; for(int iH=hitStart; iH<=lastHit; ++iH){ tmpOffset = offset + iH; xPred = f_xAtHit(tr, hit_Zs[tmpOffset]); if ( hit_Xs[tmpOffset] + xTol < xPred ) continue; if ( hit_Xs[tmpOffset] - xTol > xPred ) break; if ( f_chi2Track(tr, tmpOffset) < maxChi2 ) { f_addHit(tr, tmpOffset, 0); // *usedHit = tmpOffset; - Used hits are tagged by the end of the algorithm, not before. added = true; } } return added; } void f_removeHit(track *tr, int worstHitOffset){ tr->trackHitsNum--; float z = hit_Zs[worstHitOffset]; // float f_w = hit_Ws[worstHitOffset]; float x = hit_Xs[worstHitOffset]; float wz = f_w * z; tr->m_s0 -= f_w; tr->m_sx -= f_w * x; tr->m_sz -= wz; tr->m_sxz -= wz * x; tr->m_sz2 -= wz * z; float y = hit_Ys[worstHitOffset]; tr->m_u0 -= f_w; tr->m_uy -= f_w * y; tr->m_uz -= wz; tr->m_uyz -= wz * y; tr->m_uz2 -= wz * z; vector<int>::iterator it = find(tr->hits.begin(), tr->hits.end(), worstHitOffset); tr->hits.erase(it); if( tr->trackHitsNum > 1 ) f_solve(tr); } //== Remove the worst hit until all chi2 are good void f_removeWorstHit(track* tr) { float topChi2 = 1.e9; int worstHitOffset; while( topChi2 > f_m_maxChi2PerHit ) { topChi2 = 0.0; // This for loop gets the worst hit for (int i=0; i<tr->hits.size(); i++){ float myChi2 = f_chi2Track(tr, tr->hits[i]); if (myChi2 > topChi2){ topChi2 = myChi2; worstHitOffset = tr->hits[i]; } } // If it's bad, we remove it if ( topChi2 > f_m_maxChi2PerHit ) { // hit_isUseds[worstHitOffset] = 0; // It has still not been added to isUseds, no need to do this :) f_removeHit(tr, worstHitOffset); // This changes the chi2 of the track, which is why } // And the algorithm goes on... ? // Every hit with chi2 > maxChi2 will be removed... is this the desired behaviour? // -> yes, read description above } } bool f_all3SensorsAreDifferent(track *t) { float s0 = hit_sensorNums[t->hits[0]]; float s1 = hit_sensorNums[t->hits[1]]; float s2 = hit_sensorNums[t->hits[2]]; if ( s0 == s1 ) return false; if ( s0 == s2 ) return false; if ( s1 == s2 ) return false; return true; } int f_nbUnused(track *t) { int nn = 0; for (vector<int>::iterator it = t->hits.begin(); it != t->hits.end(); ++it){ if (!hit_isUseds[(*it)]) ++nn; } return nn; }
650bd3710484b5e05648816ba4cd6192114dab66.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "fbgemm_gpu/quantize_ops.cuh" #include "fbgemm_gpu/sparse_ops.cuh" #include "fbgemm_gpu/sparse_ops.h" #include "fbgemm_gpu/sparse_ops_utils.h" #include <ATen/ATen.h> #include <ATen/core/op_registration/op_registration.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/Exceptions.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <torch/library.h> #include "ATen/Parallel.h" #include "hipcub/hipcub.hpp" namespace at { Tensor asynchronous_inclusive_cumsum(const Tensor& t_in) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(t_in.get_device()); size_t temp_storage_bytes = 0; TORCH_CHECK(t_in.is_contiguous()); TORCH_CHECK(t_in.dtype() == kInt || t_in.dtype() == kLong); // CUB only handles up to INT_MAX elements. TORCH_CHECK(t_in.numel() < std::numeric_limits<int32_t>::max()); auto t_out = at::empty_like(t_in); AT_DISPATCH_INTEGRAL_TYPES( t_in.scalar_type(), "cub_inclusive_sum_wrapper1", ([&] { AT_CUDA_CHECK(hipcub::DeviceScan::InclusiveSum( nullptr, temp_storage_bytes, t_in.data_ptr<scalar_t>(), t_out.data_ptr<scalar_t>(), t_in.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); })); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, t_in.options().dtype(kByte)); AT_DISPATCH_INTEGRAL_TYPES( t_in.scalar_type(), "cub_inclusive_sum_wrapper2", ([&] { AT_CUDA_CHECK(hipcub::DeviceScan::InclusiveSum( temp_storage.data_ptr(), temp_storage_bytes, t_in.data_ptr<scalar_t>(), t_out.data_ptr<scalar_t>(), t_in.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); })); return t_out; } Tensor asynchronous_exclusive_cumsum(const Tensor& t_in) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(t_in.get_device()); size_t temp_storage_bytes = 0; TORCH_CHECK(t_in.is_contiguous()); TORCH_CHECK(t_in.dtype() == kInt || t_in.dtype() == kLong); // CUB only handles up to INT_MAX elements. TORCH_CHECK(t_in.numel() < std::numeric_limits<int32_t>::max()); auto t_out = at::empty_like(t_in); AT_DISPATCH_INTEGRAL_TYPES( t_in.scalar_type(), "cub_exclusive_sum_wrapper1", ([&] { AT_CUDA_CHECK(hipcub::DeviceScan::ExclusiveSum( nullptr, temp_storage_bytes, t_in.data_ptr<scalar_t>(), t_out.data_ptr<scalar_t>(), t_in.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); })); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, t_in.options().dtype(kByte)); AT_DISPATCH_INTEGRAL_TYPES( t_in.scalar_type(), "cub_exclusive_sum_wrapper2", ([&] { AT_CUDA_CHECK(hipcub::DeviceScan::ExclusiveSum( temp_storage.data_ptr(), temp_storage_bytes, t_in.data_ptr<scalar_t>(), t_out.data_ptr<scalar_t>(), t_in.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); })); return t_out; } std::tuple<Tensor, Tensor, c10::optional<Tensor>> permute_sparse_data_cuda( const Tensor& permute, const Tensor& lengths, const Tensor& indices, const c10::optional<Tensor>& weights, const c10::optional<int64_t>& permuted_lengths_sum) { TENSOR_ON_CUDA_GPU(permute); TENSOR_ON_CUDA_GPU(lengths); TENSOR_ON_CUDA_GPU(indices); TENSOR_ON_CUDA_GPU(weights); TENSORS_ON_SAME_DEVICE(permute, lengths); TENSORS_ON_SAME_DEVICE(permute, indices); TENSORS_ON_SAME_DEVICE(permute, weights); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(indices.get_device()); const auto permute_contig = permute.contiguous(); const auto lengths_contig = lengths.contiguous(); const auto indices_contig = indices.contiguous(); // the data to permute over can be less or more with or without // repetitions const auto T = permute.numel(); const auto T_ = lengths.size(0); const auto B = lengths.view({lengths.sizes()[0], -1}).sizes()[1]; Tensor permuted_lengths; Tensor permuted_indices; Tensor permuted_weights; permuted_lengths = at::empty({T, B}, lengths.options()); constexpr int32_t threads_1 = 256; const auto blocks_1 = cuda_calc_xblock_count(B * T, threads_1); AT_DISPATCH_INDEX_TYPES( lengths.scalar_type(), "permute_lengths_kernel", ([&] { hipLaunchKernelGGL(( permute_lengths_kernel<index_t>) , dim3(blocks_1), dim3(threads_1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), T, B, lengths_contig.data_ptr<index_t>(), permute.data_ptr<int32_t>(), permuted_lengths.data_ptr<index_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); })); // convert lengths to offsets const auto input_offsets = asynchronous_exclusive_cumsum(lengths_contig); const auto output_offsets = asynchronous_exclusive_cumsum(permuted_lengths); int64_t permuted_indices_size = 0; if (permuted_lengths_sum.has_value()) { permuted_indices_size = permuted_lengths_sum.value(); } else { permuted_indices_size = permuted_lengths.sum().item<int64_t>(); } constexpr int32_t BT_blocks = 32; dim3 threads_2(32, BT_blocks); const auto blocks_2 = cuda_calc_xblock_count(B * T, BT_blocks); permuted_indices = at::empty(permuted_indices_size, indices.options()); AT_DISPATCH_INDEX_TYPES( input_offsets.scalar_type(), "permute_data_kernel_1", ([&] { using offsets_t = index_t; AT_DISPATCH_ALL_TYPES( indices.scalar_type(), "permute_data_kernel_2", ([&] { using indices_t = scalar_t; if (weights.has_value()) { const Tensor weights_value = weights.value(); const auto weights_value_contig = weights_value.contiguous(); permuted_weights = at::empty(permuted_indices_size, weights_value.options()); AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "permute_data_kernel_3", ([&] { using weights_t = scalar_t; hipLaunchKernelGGL(( permute_data_kernel<true, offsets_t, indices_t, weights_t>) , dim3(blocks_2), dim3(threads_2), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), permuted_indices_size, T, B, indices_contig.data_ptr<indices_t>(), weights_value_contig.data_ptr<weights_t>(), permute_contig.data_ptr<int32_t>(), input_offsets.data_ptr<offsets_t>(), output_offsets.data_ptr<offsets_t>(), permuted_indices.data_ptr<indices_t>(), permuted_weights.data_ptr<weights_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); })); // for each weights_t } else { hipLaunchKernelGGL(( permute_data_kernel<false, offsets_t, indices_t, std::nullptr_t>) , dim3(blocks_2), dim3(threads_2), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), permuted_indices_size, T, B, indices_contig.data_ptr<indices_t>(), nullptr, permute_contig.data_ptr<int32_t>(), input_offsets.data_ptr<offsets_t>(), output_offsets.data_ptr<offsets_t>(), permuted_indices.data_ptr<indices_t>(), nullptr); C10_HIP_KERNEL_LAUNCH_CHECK(); } })); // for each indices_t })); // for each offsets_t return {permuted_lengths, permuted_indices, permuted_weights}; } // This function partitions sparse features // continuously along the sparse dimension into my_size blocks std::tuple< Tensor, Tensor, c10::optional<Tensor>, c10::optional<Tensor>, c10::optional<Tensor>> block_bucketize_sparse_features_cuda( Tensor lengths, Tensor indices, bool bucketize_pos, bool sequence, Tensor block_sizes, int64_t my_size, c10::optional<Tensor> weights) { TENSOR_ON_CUDA_GPU(lengths); TENSOR_ON_CUDA_GPU(indices); TENSORS_ON_SAME_DEVICE(lengths, indices); TENSOR_ON_CUDA_GPU(weights); TENSORS_ON_SAME_DEVICE(lengths, weights); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(lengths.get_device()); // allocate tensors and buffers const int lengths_size = lengths.numel(); const int T = block_sizes.numel(); const int B = lengths_size / T; const int new_lengths_size = lengths_size * my_size; auto offsets = at::empty({lengths_size}, lengths.options()); auto new_lengths = at::zeros({new_lengths_size}, lengths.options()); auto new_offsets = at::empty({new_lengths_size}, lengths.options()); auto new_indices = at::empty_like(indices); auto lengths_contig = lengths.contiguous(); auto indices_contig = indices.contiguous(); auto offsets_contig = offsets.contiguous(); Tensor new_weights; Tensor new_pos; Tensor unbucketize_permute; // count nonzeros offsets_contig = asynchronous_inclusive_cumsum(lengths); int threads_per_block = 256; int num_blocks = (lengths_size + threads_per_block - 1) / threads_per_block; AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2", ([&] { hipLaunchKernelGGL(( _block_bucketize_sparse_features_cuda_kernel1), dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), new_lengths.data_ptr<offset_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); // bucketize nonzeros new_offsets = asynchronous_exclusive_cumsum(new_lengths); if (sequence) { const auto lengths_sum = indices.numel(); unbucketize_permute = at::empty({lengths_sum}, indices.options()); if (weights.has_value() & bucketize_pos) { Tensor weights_value = weights.value(); auto weights_value_contig = weights_value.contiguous(); new_weights = at::empty_like(weights_value); new_pos = at::empty_like(indices); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_2", ([&] { AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "_block_bucketize_sparse_features_cuda_weight_kernel2_3", ([&] { hipLaunchKernelGGL(( _block_bucketize_sparse_features_cuda_kernel2< true, true, true, offset_t, index_t, scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), weights_value_contig.data_ptr<scalar_t>(), new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), new_weights.data_ptr<scalar_t>(), new_pos.data_ptr<index_t>(), unbucketize_permute.data_ptr<index_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); })); } else if (weights.has_value()) { Tensor weights_value = weights.value(); auto weights_value_contig = weights_value.contiguous(); new_weights = at::empty_like(weights_value); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_2", ([&] { AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "_block_bucketize_sparse_features_cuda_weight_kernel2_3", ([&] { hipLaunchKernelGGL(( _block_bucketize_sparse_features_cuda_kernel2< true, true, false, offset_t, index_t, scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), weights_value_contig.data_ptr<scalar_t>(), new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), new_weights.data_ptr<scalar_t>(), nullptr, unbucketize_permute.data_ptr<index_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); })); } else if (bucketize_pos) { new_pos = at::empty_like(indices); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2_2", ([&] { hipLaunchKernelGGL(( _block_bucketize_sparse_features_cuda_kernel2< true, false, true, offset_t, index_t, std::nullptr_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), nullptr, new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), nullptr, new_pos.data_ptr<index_t>(), unbucketize_permute.data_ptr<index_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); } else { AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2_2", ([&] { hipLaunchKernelGGL(( _block_bucketize_sparse_features_cuda_kernel2< true, false, false, offset_t, index_t, std::nullptr_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), nullptr, new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), nullptr, nullptr, unbucketize_permute.data_ptr<index_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); } } else { if (weights.has_value() & bucketize_pos) { Tensor weights_value = weights.value(); auto weights_value_contig = weights_value.contiguous(); new_weights = at::empty_like(weights_value); new_pos = at::empty_like(indices); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_2", ([&] { AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "_block_bucketize_sparse_features_cuda_weight_kernel2_3", ([&] { hipLaunchKernelGGL(( _block_bucketize_sparse_features_cuda_kernel2< false, true, true, offset_t, index_t, scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), weights_value_contig.data_ptr<scalar_t>(), new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), new_weights.data_ptr<scalar_t>(), new_pos.data_ptr<index_t>(), nullptr); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); })); } else if (weights.has_value()) { Tensor weights_value = weights.value(); auto weights_value_contig = weights_value.contiguous(); new_weights = at::empty_like(weights_value); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_2", ([&] { AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "_block_bucketize_sparse_features_cuda_weight_kernel2_3", ([&] { hipLaunchKernelGGL(( _block_bucketize_sparse_features_cuda_kernel2< false, true, false, offset_t, index_t, scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), weights_value_contig.data_ptr<scalar_t>(), new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), new_weights.data_ptr<scalar_t>(), nullptr, nullptr); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); })); } else if (bucketize_pos) { new_pos = at::empty_like(indices); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2_2", ([&] { hipLaunchKernelGGL(( _block_bucketize_sparse_features_cuda_kernel2< false, false, true, offset_t, index_t, std::nullptr_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), nullptr, new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), nullptr, new_pos.data_ptr<index_t>(), nullptr); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); } else { AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2_2", ([&] { hipLaunchKernelGGL(( _block_bucketize_sparse_features_cuda_kernel2< false, false, false, offset_t, index_t, std::nullptr_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), nullptr, new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), nullptr, nullptr, nullptr); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); } } return {new_lengths, new_indices, new_weights, new_pos, unbucketize_permute}; } at::Tensor _float_to_fused8bitrowwise_gpu(const at::Tensor& input) { TENSOR_ON_CUDA_GPU(input); TORCH_CHECK(input.is_contiguous(), "input must be contiguous"); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(input.get_device()); const auto input_sizes = input.sizes(); const auto last_dim = input_sizes.size() - 1; const int nrows = c10::size_to_dim_(last_dim, input_sizes); const int ncols = input_sizes[last_dim]; const int ncols_aligned = (ncols + 4 - 1) / 4 * 4; const int output_columns = ncols_aligned + 2 * sizeof(float); // Global memory instructions support reading or writing words of size equal // to 1, 2, 4, 8, or 16 bytes. Any access (via a variable or a pointer) to // data residing in global memory compiles to a single global memory // instruction if and only if the size of the data type is 1, 2, 4, 8, or 16 // bytes and the data is naturally aligned (i.e., its address is a multiple of // that size). auto output_dims = input_sizes.vec(); output_dims[last_dim] = output_columns; auto output = at::empty( output_dims, // 4 = sizeof(float) input.options().dtype(at::kByte)); if (nrows == 0 || ncols == 0) { return output; } constexpr int threads_per_block = 256; const auto num_blocks = cuda_calc_xblock_count(nrows, threads_per_block); // think unsigned as we use 0, 255 if (nrows <= 20) { hipLaunchKernelGGL(( _float_to_fused8bitrowwise_cuda_kernel), dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data_ptr<float>(), nrows, ncols, output.data_ptr<std::uint8_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // range_tensor is used to store the range for each embedding row. // We save range/255.0f as row scale, and use 255.0f / (range + kEpsilon) to // quantize. This will guarantee the numerical match but bring some perf // regression. auto range_tensor = at::empty({nrows}, input.options().dtype(at::kFloat)); { // we need a blockDim.x that is a power of 2 no larger than the warp size // of 32 int blockDim_x = 1; if (ncols > 16) { // max warp size blockDim_x = 32; } else { while (blockDim_x < ncols) { blockDim_x <<= 1; } } const int rows_per_block = threads_per_block / blockDim_x; const auto num_blocks_warp = cuda_calc_xblock_count(nrows, rows_per_block); hipLaunchKernelGGL(( _get_8bit_qparam_cuda_kernel), dim3(num_blocks_warp), dim3(dim3(blockDim_x, rows_per_block)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data_ptr<float>(), nrows, ncols, output.data_ptr<std::uint8_t>(), range_tensor.data_ptr<float>()); C10_HIP_KERNEL_LAUNCH_CHECK(); } { const int blockDim_x = ::min(ncols, threads_per_block); dim3 blockDim(blockDim_x, threads_per_block / blockDim_x); const auto gridDim_x = cuda_calc_xblock_count(ncols, blockDim.x); const auto gridDim_y = cuda_calc_block_count(nrows, blockDim.y); dim3 gridDim(gridDim_x, gridDim_y); hipLaunchKernelGGL(( _compute_8bit_quantize_cuda_kernel), dim3(gridDim), dim3(blockDim), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data_ptr<float>(), range_tensor.data_ptr<float>(), nrows, ncols, output.data_ptr<std::uint8_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); } } return output; } at::Tensor _fused8bitrowwise_to_float_gpu(const at::Tensor& input) { TENSOR_ON_CUDA_GPU(input); TORCH_CHECK(input.is_contiguous(), "input must be contiguous"); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(input.get_device()); const auto input_sizes = input.sizes(); const auto last_dim = input_sizes.size() - 1; const int nrows = c10::size_to_dim_(last_dim, input_sizes); const int ncols = input_sizes[last_dim]; const int ncols_aligned = (ncols + 4 - 1) / 4 * 4; const int output_columns = ncols_aligned - 2 * sizeof(float); // Global memory instructions support reading or writing words of size equal // to 1, 2, 4, 8, or 16 bytes. Any access (via a variable or a pointer) to // data residing in global memory compiles to a single global memory // instruction if and only if the size of the data type is 1, 2, 4, 8, or 16 // bytes and the data is naturally aligned (i.e., its address is a multiple of // that size). auto output_dims = input_sizes.vec(); output_dims[last_dim] = output_columns; auto output = at::empty( output_dims, // 4 = sizeof(float) input.options().dtype(at::kFloat)); if (nrows == 0 || output_columns == 0) { return output; } constexpr int threads_per_block = 256; const int blockDim_x = ::min(threads_per_block, output_columns); dim3 blockDim(blockDim_x, threads_per_block / blockDim_x); const auto gridDim_x = cuda_calc_xblock_count(output_columns, blockDim.x); const auto gridDim_y = cuda_calc_block_count(nrows, blockDim.y); dim3 gridDim(gridDim_x, gridDim_y); hipLaunchKernelGGL(( _fused8bitrowwise_to_float_cuda_kernel), dim3(gridDim), dim3(blockDim), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data_ptr<std::uint8_t>(), nrows, ncols, output.data_ptr<float>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return output; } at::Tensor _float_to_fusednbitrowwise_gpu( const at::Tensor& input, const int64_t bit_rate) { TENSOR_ON_CUDA_GPU(input); TENSOR_NDIM_EQUALS(input, 2); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(input.get_device()); const int nrows = input.size(0); const int ncols = input.size(1); const int num_elem_per_byte = 8 / bit_rate; TORCH_CHECK( ncols % (2 * num_elem_per_byte) == 0, "ncols needs to be multiple of 2 Bytes (half type size) to make the address aligned"); const int output_columns = (ncols + num_elem_per_byte - 1) / num_elem_per_byte + 2 * sizeof(at::Half); // Global memory instructions support reading or writing words of size equal // to 1, 2, 4, 8, or 16 bytes. Any access (via a variable or a pointer) to // data residing in global memory compiles to a single global memory // instruction if and only if the size of the data type is 1, 2, 4, 8, or 16 // bytes and the data is naturally aligned (i.e., its address is a multiple of // that size). auto output = at::empty( {nrows, output_columns}, input.options().dtype(at::kByte)); // at::kBytes for uint8_t if (nrows == 0 || ncols == 0) { return output; } constexpr auto threads_per_block = 256; const auto num_blocks = cuda_calc_xblock_count(nrows, threads_per_block); // think unsigned as we use 0, 255 hipLaunchKernelGGL(( _float_to_fusednbitrowwise_cuda_kernel), dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), bit_rate, input.data_ptr<float>(), nrows, ncols, output.data_ptr<std::uint8_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return output; } at::Tensor _fusednbitrowwise_to_float_gpu( const at::Tensor& input, const int64_t bit_rate) { TENSOR_ON_CUDA_GPU(input); TENSOR_NDIM_EQUALS(input, 2); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(input.get_device()); const int nrows = input.size(0); const int ncols = input.size(1); const int num_elem_per_byte = 8 / bit_rate; const int output_columns = (ncols - 2 * sizeof(at::Half)) * num_elem_per_byte; // Global memory instructions support reading or writing words of size equal // to 1, 2, 4, 8, or 16 bytes. Any access (via a variable or a pointer) to // data residing in global memory compiles to a single global memory // instruction if and only if the size of the data type is 1, 2, 4, 8, or 16 // bytes and the data is naturally aligned (i.e., its address is a multiple of // that size). auto output = at::empty( {nrows, output_columns}, // 4 = sizeof(float) input.options().dtype(at::kFloat)); // at::kBytes for uint8_t if (nrows == 0 || output_columns == 0) { return output; } constexpr int threads_per_block = 256; const int blockDim_x = ::min(output_columns, threads_per_block); dim3 blockDim(blockDim_x, threads_per_block / blockDim_x); const auto gridDim_x = cuda_calc_xblock_count(output_columns, blockDim.x); const auto gridDim_y = cuda_calc_block_count(nrows, blockDim.y); dim3 gridDim(gridDim_x, gridDim_y); hipLaunchKernelGGL(( _fusednbitrowwise_to_float_cuda_kernel), dim3(gridDim), dim3(blockDim), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), bit_rate, input.data_ptr<uint8_t>(), nrows, ncols, output.data_ptr<float>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return output; } } // namespace at
650bd3710484b5e05648816ba4cd6192114dab66.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "fbgemm_gpu/quantize_ops.cuh" #include "fbgemm_gpu/sparse_ops.cuh" #include "fbgemm_gpu/sparse_ops.h" #include "fbgemm_gpu/sparse_ops_utils.h" #include <ATen/ATen.h> #include <ATen/core/op_registration/op_registration.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include <c10/cuda/CUDAGuard.h> #include <torch/library.h> #include "ATen/Parallel.h" #include "cub/device/device_scan.cuh" namespace at { Tensor asynchronous_inclusive_cumsum(const Tensor& t_in) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(t_in.get_device()); size_t temp_storage_bytes = 0; TORCH_CHECK(t_in.is_contiguous()); TORCH_CHECK(t_in.dtype() == kInt || t_in.dtype() == kLong); // CUB only handles up to INT_MAX elements. TORCH_CHECK(t_in.numel() < std::numeric_limits<int32_t>::max()); auto t_out = at::empty_like(t_in); AT_DISPATCH_INTEGRAL_TYPES( t_in.scalar_type(), "cub_inclusive_sum_wrapper1", ([&] { AT_CUDA_CHECK(cub::DeviceScan::InclusiveSum( nullptr, temp_storage_bytes, t_in.data_ptr<scalar_t>(), t_out.data_ptr<scalar_t>(), t_in.numel(), at::cuda::getCurrentCUDAStream())); })); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, t_in.options().dtype(kByte)); AT_DISPATCH_INTEGRAL_TYPES( t_in.scalar_type(), "cub_inclusive_sum_wrapper2", ([&] { AT_CUDA_CHECK(cub::DeviceScan::InclusiveSum( temp_storage.data_ptr(), temp_storage_bytes, t_in.data_ptr<scalar_t>(), t_out.data_ptr<scalar_t>(), t_in.numel(), at::cuda::getCurrentCUDAStream())); })); return t_out; } Tensor asynchronous_exclusive_cumsum(const Tensor& t_in) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(t_in.get_device()); size_t temp_storage_bytes = 0; TORCH_CHECK(t_in.is_contiguous()); TORCH_CHECK(t_in.dtype() == kInt || t_in.dtype() == kLong); // CUB only handles up to INT_MAX elements. TORCH_CHECK(t_in.numel() < std::numeric_limits<int32_t>::max()); auto t_out = at::empty_like(t_in); AT_DISPATCH_INTEGRAL_TYPES( t_in.scalar_type(), "cub_exclusive_sum_wrapper1", ([&] { AT_CUDA_CHECK(cub::DeviceScan::ExclusiveSum( nullptr, temp_storage_bytes, t_in.data_ptr<scalar_t>(), t_out.data_ptr<scalar_t>(), t_in.numel(), at::cuda::getCurrentCUDAStream())); })); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, t_in.options().dtype(kByte)); AT_DISPATCH_INTEGRAL_TYPES( t_in.scalar_type(), "cub_exclusive_sum_wrapper2", ([&] { AT_CUDA_CHECK(cub::DeviceScan::ExclusiveSum( temp_storage.data_ptr(), temp_storage_bytes, t_in.data_ptr<scalar_t>(), t_out.data_ptr<scalar_t>(), t_in.numel(), at::cuda::getCurrentCUDAStream())); })); return t_out; } std::tuple<Tensor, Tensor, c10::optional<Tensor>> permute_sparse_data_cuda( const Tensor& permute, const Tensor& lengths, const Tensor& indices, const c10::optional<Tensor>& weights, const c10::optional<int64_t>& permuted_lengths_sum) { TENSOR_ON_CUDA_GPU(permute); TENSOR_ON_CUDA_GPU(lengths); TENSOR_ON_CUDA_GPU(indices); TENSOR_ON_CUDA_GPU(weights); TENSORS_ON_SAME_DEVICE(permute, lengths); TENSORS_ON_SAME_DEVICE(permute, indices); TENSORS_ON_SAME_DEVICE(permute, weights); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(indices.get_device()); const auto permute_contig = permute.contiguous(); const auto lengths_contig = lengths.contiguous(); const auto indices_contig = indices.contiguous(); // the data to permute over can be less or more with or without // repetitions const auto T = permute.numel(); const auto T_ = lengths.size(0); const auto B = lengths.view({lengths.sizes()[0], -1}).sizes()[1]; Tensor permuted_lengths; Tensor permuted_indices; Tensor permuted_weights; permuted_lengths = at::empty({T, B}, lengths.options()); constexpr int32_t threads_1 = 256; const auto blocks_1 = cuda_calc_xblock_count(B * T, threads_1); AT_DISPATCH_INDEX_TYPES( lengths.scalar_type(), "permute_lengths_kernel", ([&] { permute_lengths_kernel<index_t> <<<blocks_1, threads_1, 0, at::cuda::getCurrentCUDAStream()>>>( T, B, lengths_contig.data_ptr<index_t>(), permute.data_ptr<int32_t>(), permuted_lengths.data_ptr<index_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); // convert lengths to offsets const auto input_offsets = asynchronous_exclusive_cumsum(lengths_contig); const auto output_offsets = asynchronous_exclusive_cumsum(permuted_lengths); int64_t permuted_indices_size = 0; if (permuted_lengths_sum.has_value()) { permuted_indices_size = permuted_lengths_sum.value(); } else { permuted_indices_size = permuted_lengths.sum().item<int64_t>(); } constexpr int32_t BT_blocks = 32; dim3 threads_2(32, BT_blocks); const auto blocks_2 = cuda_calc_xblock_count(B * T, BT_blocks); permuted_indices = at::empty(permuted_indices_size, indices.options()); AT_DISPATCH_INDEX_TYPES( input_offsets.scalar_type(), "permute_data_kernel_1", ([&] { using offsets_t = index_t; AT_DISPATCH_ALL_TYPES( indices.scalar_type(), "permute_data_kernel_2", ([&] { using indices_t = scalar_t; if (weights.has_value()) { const Tensor weights_value = weights.value(); const auto weights_value_contig = weights_value.contiguous(); permuted_weights = at::empty(permuted_indices_size, weights_value.options()); AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "permute_data_kernel_3", ([&] { using weights_t = scalar_t; permute_data_kernel<true, offsets_t, indices_t, weights_t> <<<blocks_2, threads_2, 0, at::cuda::getCurrentCUDAStream()>>>( permuted_indices_size, T, B, indices_contig.data_ptr<indices_t>(), weights_value_contig.data_ptr<weights_t>(), permute_contig.data_ptr<int32_t>(), input_offsets.data_ptr<offsets_t>(), output_offsets.data_ptr<offsets_t>(), permuted_indices.data_ptr<indices_t>(), permuted_weights.data_ptr<weights_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); // for each weights_t } else { permute_data_kernel<false, offsets_t, indices_t, std::nullptr_t> <<<blocks_2, threads_2, 0, at::cuda::getCurrentCUDAStream()>>>( permuted_indices_size, T, B, indices_contig.data_ptr<indices_t>(), nullptr, permute_contig.data_ptr<int32_t>(), input_offsets.data_ptr<offsets_t>(), output_offsets.data_ptr<offsets_t>(), permuted_indices.data_ptr<indices_t>(), nullptr); C10_CUDA_KERNEL_LAUNCH_CHECK(); } })); // for each indices_t })); // for each offsets_t return {permuted_lengths, permuted_indices, permuted_weights}; } // This function partitions sparse features // continuously along the sparse dimension into my_size blocks std::tuple< Tensor, Tensor, c10::optional<Tensor>, c10::optional<Tensor>, c10::optional<Tensor>> block_bucketize_sparse_features_cuda( Tensor lengths, Tensor indices, bool bucketize_pos, bool sequence, Tensor block_sizes, int64_t my_size, c10::optional<Tensor> weights) { TENSOR_ON_CUDA_GPU(lengths); TENSOR_ON_CUDA_GPU(indices); TENSORS_ON_SAME_DEVICE(lengths, indices); TENSOR_ON_CUDA_GPU(weights); TENSORS_ON_SAME_DEVICE(lengths, weights); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(lengths.get_device()); // allocate tensors and buffers const int lengths_size = lengths.numel(); const int T = block_sizes.numel(); const int B = lengths_size / T; const int new_lengths_size = lengths_size * my_size; auto offsets = at::empty({lengths_size}, lengths.options()); auto new_lengths = at::zeros({new_lengths_size}, lengths.options()); auto new_offsets = at::empty({new_lengths_size}, lengths.options()); auto new_indices = at::empty_like(indices); auto lengths_contig = lengths.contiguous(); auto indices_contig = indices.contiguous(); auto offsets_contig = offsets.contiguous(); Tensor new_weights; Tensor new_pos; Tensor unbucketize_permute; // count nonzeros offsets_contig = asynchronous_inclusive_cumsum(lengths); int threads_per_block = 256; int num_blocks = (lengths_size + threads_per_block - 1) / threads_per_block; AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2", ([&] { _block_bucketize_sparse_features_cuda_kernel1<<< num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), new_lengths.data_ptr<offset_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); // bucketize nonzeros new_offsets = asynchronous_exclusive_cumsum(new_lengths); if (sequence) { const auto lengths_sum = indices.numel(); unbucketize_permute = at::empty({lengths_sum}, indices.options()); if (weights.has_value() & bucketize_pos) { Tensor weights_value = weights.value(); auto weights_value_contig = weights_value.contiguous(); new_weights = at::empty_like(weights_value); new_pos = at::empty_like(indices); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_2", ([&] { AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "_block_bucketize_sparse_features_cuda_weight_kernel2_3", ([&] { _block_bucketize_sparse_features_cuda_kernel2< true, true, true, offset_t, index_t, scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), weights_value_contig.data_ptr<scalar_t>(), new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), new_weights.data_ptr<scalar_t>(), new_pos.data_ptr<index_t>(), unbucketize_permute.data_ptr<index_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); })); } else if (weights.has_value()) { Tensor weights_value = weights.value(); auto weights_value_contig = weights_value.contiguous(); new_weights = at::empty_like(weights_value); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_2", ([&] { AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "_block_bucketize_sparse_features_cuda_weight_kernel2_3", ([&] { _block_bucketize_sparse_features_cuda_kernel2< true, true, false, offset_t, index_t, scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), weights_value_contig.data_ptr<scalar_t>(), new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), new_weights.data_ptr<scalar_t>(), nullptr, unbucketize_permute.data_ptr<index_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); })); } else if (bucketize_pos) { new_pos = at::empty_like(indices); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2_2", ([&] { _block_bucketize_sparse_features_cuda_kernel2< true, false, true, offset_t, index_t, std::nullptr_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), nullptr, new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), nullptr, new_pos.data_ptr<index_t>(), unbucketize_permute.data_ptr<index_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); } else { AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2_2", ([&] { _block_bucketize_sparse_features_cuda_kernel2< true, false, false, offset_t, index_t, std::nullptr_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), nullptr, new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), nullptr, nullptr, unbucketize_permute.data_ptr<index_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); } } else { if (weights.has_value() & bucketize_pos) { Tensor weights_value = weights.value(); auto weights_value_contig = weights_value.contiguous(); new_weights = at::empty_like(weights_value); new_pos = at::empty_like(indices); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_2", ([&] { AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "_block_bucketize_sparse_features_cuda_weight_kernel2_3", ([&] { _block_bucketize_sparse_features_cuda_kernel2< false, true, true, offset_t, index_t, scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), weights_value_contig.data_ptr<scalar_t>(), new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), new_weights.data_ptr<scalar_t>(), new_pos.data_ptr<index_t>(), nullptr); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); })); } else if (weights.has_value()) { Tensor weights_value = weights.value(); auto weights_value_contig = weights_value.contiguous(); new_weights = at::empty_like(weights_value); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_2", ([&] { AT_DISPATCH_FLOATING_TYPES( weights_value.scalar_type(), "_block_bucketize_sparse_features_cuda_weight_kernel2_3", ([&] { _block_bucketize_sparse_features_cuda_kernel2< false, true, false, offset_t, index_t, scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), weights_value_contig.data_ptr<scalar_t>(), new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), new_weights.data_ptr<scalar_t>(), nullptr, nullptr); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); })); } else if (bucketize_pos) { new_pos = at::empty_like(indices); AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2_2", ([&] { _block_bucketize_sparse_features_cuda_kernel2< false, false, true, offset_t, index_t, std::nullptr_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), nullptr, new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), nullptr, new_pos.data_ptr<index_t>(), nullptr); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); } else { AT_DISPATCH_INDEX_TYPES( offsets_contig.scalar_type(), "_bucketize_sparse_features_weight_cuda_kernel2_1", ([&] { using offset_t = index_t; AT_DISPATCH_INDEX_TYPES( indices_contig.scalar_type(), "_block_bucketize_sparse_features_cuda_kernel2_2", ([&] { _block_bucketize_sparse_features_cuda_kernel2< false, false, false, offset_t, index_t, std::nullptr_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( lengths_size, B, block_sizes.data_ptr<index_t>(), my_size, offsets_contig.data_ptr<offset_t>(), indices_contig.data_ptr<index_t>(), nullptr, new_offsets.data_ptr<offset_t>(), new_indices.data_ptr<index_t>(), nullptr, nullptr, nullptr); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); } } return {new_lengths, new_indices, new_weights, new_pos, unbucketize_permute}; } at::Tensor _float_to_fused8bitrowwise_gpu(const at::Tensor& input) { TENSOR_ON_CUDA_GPU(input); TORCH_CHECK(input.is_contiguous(), "input must be contiguous"); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(input.get_device()); const auto input_sizes = input.sizes(); const auto last_dim = input_sizes.size() - 1; const int nrows = c10::size_to_dim_(last_dim, input_sizes); const int ncols = input_sizes[last_dim]; const int ncols_aligned = (ncols + 4 - 1) / 4 * 4; const int output_columns = ncols_aligned + 2 * sizeof(float); // Global memory instructions support reading or writing words of size equal // to 1, 2, 4, 8, or 16 bytes. Any access (via a variable or a pointer) to // data residing in global memory compiles to a single global memory // instruction if and only if the size of the data type is 1, 2, 4, 8, or 16 // bytes and the data is naturally aligned (i.e., its address is a multiple of // that size). auto output_dims = input_sizes.vec(); output_dims[last_dim] = output_columns; auto output = at::empty( output_dims, // 4 = sizeof(float) input.options().dtype(at::kByte)); if (nrows == 0 || ncols == 0) { return output; } constexpr int threads_per_block = 256; const auto num_blocks = cuda_calc_xblock_count(nrows, threads_per_block); // think unsigned as we use 0, 255 if (nrows <= 20) { _float_to_fused8bitrowwise_cuda_kernel<<< num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( input.data_ptr<float>(), nrows, ncols, output.data_ptr<std::uint8_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // range_tensor is used to store the range for each embedding row. // We save range/255.0f as row scale, and use 255.0f / (range + kEpsilon) to // quantize. This will guarantee the numerical match but bring some perf // regression. auto range_tensor = at::empty({nrows}, input.options().dtype(at::kFloat)); { // we need a blockDim.x that is a power of 2 no larger than the warp size // of 32 int blockDim_x = 1; if (ncols > 16) { // max warp size blockDim_x = 32; } else { while (blockDim_x < ncols) { blockDim_x <<= 1; } } const int rows_per_block = threads_per_block / blockDim_x; const auto num_blocks_warp = cuda_calc_xblock_count(nrows, rows_per_block); _get_8bit_qparam_cuda_kernel<<< num_blocks_warp, dim3(blockDim_x, rows_per_block), 0, at::cuda::getCurrentCUDAStream()>>>( input.data_ptr<float>(), nrows, ncols, output.data_ptr<std::uint8_t>(), range_tensor.data_ptr<float>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } { const int blockDim_x = std::min(ncols, threads_per_block); dim3 blockDim(blockDim_x, threads_per_block / blockDim_x); const auto gridDim_x = cuda_calc_xblock_count(ncols, blockDim.x); const auto gridDim_y = cuda_calc_block_count(nrows, blockDim.y); dim3 gridDim(gridDim_x, gridDim_y); _compute_8bit_quantize_cuda_kernel<<< gridDim, blockDim, 0, at::cuda::getCurrentCUDAStream()>>>( input.data_ptr<float>(), range_tensor.data_ptr<float>(), nrows, ncols, output.data_ptr<std::uint8_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } return output; } at::Tensor _fused8bitrowwise_to_float_gpu(const at::Tensor& input) { TENSOR_ON_CUDA_GPU(input); TORCH_CHECK(input.is_contiguous(), "input must be contiguous"); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(input.get_device()); const auto input_sizes = input.sizes(); const auto last_dim = input_sizes.size() - 1; const int nrows = c10::size_to_dim_(last_dim, input_sizes); const int ncols = input_sizes[last_dim]; const int ncols_aligned = (ncols + 4 - 1) / 4 * 4; const int output_columns = ncols_aligned - 2 * sizeof(float); // Global memory instructions support reading or writing words of size equal // to 1, 2, 4, 8, or 16 bytes. Any access (via a variable or a pointer) to // data residing in global memory compiles to a single global memory // instruction if and only if the size of the data type is 1, 2, 4, 8, or 16 // bytes and the data is naturally aligned (i.e., its address is a multiple of // that size). auto output_dims = input_sizes.vec(); output_dims[last_dim] = output_columns; auto output = at::empty( output_dims, // 4 = sizeof(float) input.options().dtype(at::kFloat)); if (nrows == 0 || output_columns == 0) { return output; } constexpr int threads_per_block = 256; const int blockDim_x = std::min(threads_per_block, output_columns); dim3 blockDim(blockDim_x, threads_per_block / blockDim_x); const auto gridDim_x = cuda_calc_xblock_count(output_columns, blockDim.x); const auto gridDim_y = cuda_calc_block_count(nrows, blockDim.y); dim3 gridDim(gridDim_x, gridDim_y); _fused8bitrowwise_to_float_cuda_kernel<<< gridDim, blockDim, 0, at::cuda::getCurrentCUDAStream()>>>( input.data_ptr<std::uint8_t>(), nrows, ncols, output.data_ptr<float>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return output; } at::Tensor _float_to_fusednbitrowwise_gpu( const at::Tensor& input, const int64_t bit_rate) { TENSOR_ON_CUDA_GPU(input); TENSOR_NDIM_EQUALS(input, 2); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(input.get_device()); const int nrows = input.size(0); const int ncols = input.size(1); const int num_elem_per_byte = 8 / bit_rate; TORCH_CHECK( ncols % (2 * num_elem_per_byte) == 0, "ncols needs to be multiple of 2 Bytes (half type size) to make the address aligned"); const int output_columns = (ncols + num_elem_per_byte - 1) / num_elem_per_byte + 2 * sizeof(at::Half); // Global memory instructions support reading or writing words of size equal // to 1, 2, 4, 8, or 16 bytes. Any access (via a variable or a pointer) to // data residing in global memory compiles to a single global memory // instruction if and only if the size of the data type is 1, 2, 4, 8, or 16 // bytes and the data is naturally aligned (i.e., its address is a multiple of // that size). auto output = at::empty( {nrows, output_columns}, input.options().dtype(at::kByte)); // at::kBytes for uint8_t if (nrows == 0 || ncols == 0) { return output; } constexpr auto threads_per_block = 256; const auto num_blocks = cuda_calc_xblock_count(nrows, threads_per_block); // think unsigned as we use 0, 255 _float_to_fusednbitrowwise_cuda_kernel<<< num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( bit_rate, input.data_ptr<float>(), nrows, ncols, output.data_ptr<std::uint8_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return output; } at::Tensor _fusednbitrowwise_to_float_gpu( const at::Tensor& input, const int64_t bit_rate) { TENSOR_ON_CUDA_GPU(input); TENSOR_NDIM_EQUALS(input, 2); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(input.get_device()); const int nrows = input.size(0); const int ncols = input.size(1); const int num_elem_per_byte = 8 / bit_rate; const int output_columns = (ncols - 2 * sizeof(at::Half)) * num_elem_per_byte; // Global memory instructions support reading or writing words of size equal // to 1, 2, 4, 8, or 16 bytes. Any access (via a variable or a pointer) to // data residing in global memory compiles to a single global memory // instruction if and only if the size of the data type is 1, 2, 4, 8, or 16 // bytes and the data is naturally aligned (i.e., its address is a multiple of // that size). auto output = at::empty( {nrows, output_columns}, // 4 = sizeof(float) input.options().dtype(at::kFloat)); // at::kBytes for uint8_t if (nrows == 0 || output_columns == 0) { return output; } constexpr int threads_per_block = 256; const int blockDim_x = std::min(output_columns, threads_per_block); dim3 blockDim(blockDim_x, threads_per_block / blockDim_x); const auto gridDim_x = cuda_calc_xblock_count(output_columns, blockDim.x); const auto gridDim_y = cuda_calc_block_count(nrows, blockDim.y); dim3 gridDim(gridDim_x, gridDim_y); _fusednbitrowwise_to_float_cuda_kernel<<< gridDim, blockDim, 0, at::cuda::getCurrentCUDAStream()>>>( bit_rate, input.data_ptr<uint8_t>(), nrows, ncols, output.data_ptr<float>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return output; } } // namespace at
ebd1f4fbfe4aa5bd0b67197002d3d0e12807e92a.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" //#include "utils.h" #include <iostream> using namespace std; #ifndef CHECKCUDAERRORS_H #define CHECKCUDAERRORS_H #include <hip/hip_runtime_api.h> #include <stdio.h> #include "Point3D.h" int CudaCheckLastError() { hipError_t error = hipGetLastError(); if(error != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(error)); fflush(stdout); return 1; //exit(-1); } printf("no errors\n"); fflush(stdout); return 0; } #endif // CHECKCUDAERRORS_H __global__ void operationKernel(Point3D<float>* d_points, const int count) { int myId = threadIdx.x + blockDim.x * blockIdx.x; if (count <= myId) { return; } d_points[myId]._y = myId; } void CallKernel(Point3D<float>* h_points, const int count) { const int threads = 16; const dim3 gridSize((count + threads - 1) / threads); const dim3 blockSize(threads); const size_t memLen = sizeof(Point3D<float>) * count; Point3D<float>* d_points; hipMalloc((void**)&d_points, memLen ); hipMemcpy(d_points, h_points, memLen, hipMemcpyHostToDevice); CudaCheckLastError(); hipLaunchKernelGGL(( operationKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_points, count); hipDeviceSynchronize(); hipMemcpy(h_points, d_points, memLen, hipMemcpyDeviceToHost); hipFree(d_points); }
ebd1f4fbfe4aa5bd0b67197002d3d0e12807e92a.cu
#include "stdio.h" #include "cuda.h" #include "cuda_runtime_api.h" //#include "utils.h" #include <iostream> using namespace std; #ifndef CHECKCUDAERRORS_H #define CHECKCUDAERRORS_H #include <cuda_runtime_api.h> #include <stdio.h> #include "Point3D.h" int CudaCheckLastError() { cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); fflush(stdout); return 1; //exit(-1); } printf("no errors\n"); fflush(stdout); return 0; } #endif // CHECKCUDAERRORS_H __global__ void operationKernel(Point3D<float>* d_points, const int count) { int myId = threadIdx.x + blockDim.x * blockIdx.x; if (count <= myId) { return; } d_points[myId]._y = myId; } void CallKernel(Point3D<float>* h_points, const int count) { const int threads = 16; const dim3 gridSize((count + threads - 1) / threads); const dim3 blockSize(threads); const size_t memLen = sizeof(Point3D<float>) * count; Point3D<float>* d_points; cudaMalloc((void**)&d_points, memLen ); cudaMemcpy(d_points, h_points, memLen, cudaMemcpyHostToDevice); CudaCheckLastError(); operationKernel<<<gridSize, blockSize>>>(d_points, count); cudaDeviceSynchronize(); cudaMemcpy(h_points, d_points, memLen, cudaMemcpyDeviceToHost); cudaFree(d_points); }
721d1b01453d118d12f058cf8c32eb6a71a58fda.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <asm/unistd.h> #include <fcntl.h> #include <inttypes.h> #include <linux/kernel-page-flags.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <string.h> #include <sys/ioctl.h> #include <sys/mount.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/sysinfo.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include <vector> #include <sys/time.h> #include <assert.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void kernel(int number_of_threads, float * dsum ,volatile int * d_mapping, int cnt, int fence_system_flag, int fence_block_flag) { int i; /*printf("D: i am [%d] \n", blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x); */ for(i=0; i < cnt ; i++){ *dsum = i; d_mapping[0] = *dsum; if(fence_system_flag){ __threadfence_system(); } if(fence_block_flag){ __threadfence_block(); } } } int main(int argc, char **argv) { int opt, BLOCKS = 1, THREADS = 1, cnt =10000, fence_system_flag =0, fence_block_flag = 0; hipEvent_t start, stop; float elapsed_time =0; gpuErrchk(hipEventCreate(&start)); gpuErrchk(hipEventCreate(&stop)); while ((opt = getopt(argc, argv, "b:t:n:f:s:")) != -1) { switch (opt) { case 'b': BLOCKS = atoi(optarg); break; case 't': THREADS = atoi(optarg); break; case 'n': cnt = atoi(optarg); break; case 'f': fence_system_flag = atoi(optarg); break; case 's': fence_block_flag = atoi(optarg); break; default: fprintf(stderr, "Usage: %s -b [blocks] -t [threads] -n [count of iterations] -f [fence_system] -s [fence_block]\n", argv[0]); exit(EXIT_FAILURE); } } float * dsum; gpuErrchk(hipMallocManaged((void **) &dsum, sizeof(uint64_t))); volatile int * h_mapping; gpuErrchk(hipHostMalloc( (void**)&h_mapping, sizeof(volatile int), hipHostMallocMapped)); volatile int * d_mapping; gpuErrchk(hipHostGetDevicePointer((void**)&d_mapping,(void*)h_mapping,0)); *dsum = 0; hipEventRecord(start, 0); hipLaunchKernelGGL(( kernel) , dim3(BLOCKS), dim3(THREADS) , 0, 0, BLOCKS * THREADS,dsum,d_mapping, cnt, fence_system_flag, fence_block_flag); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipEventRecord(stop, 0)); gpuErrchk(hipEventSynchronize(stop)); gpuErrchk(hipEventElapsedTime (&elapsed_time, start, stop)); assert(*dsum != 0); printf("H: elapsed_time is : %f \n", elapsed_time); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
721d1b01453d118d12f058cf8c32eb6a71a58fda.cu
#include <stdio.h> #include <cuda_runtime.h> #include <asm/unistd.h> #include <fcntl.h> #include <inttypes.h> #include <linux/kernel-page-flags.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <string.h> #include <sys/ioctl.h> #include <sys/mount.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/sysinfo.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include <vector> #include <sys/time.h> #include <assert.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void kernel(int number_of_threads, float * dsum ,volatile int * d_mapping, int cnt, int fence_system_flag, int fence_block_flag) { int i; /*printf("D: i am [%d] \n", blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x); */ for(i=0; i < cnt ; i++){ *dsum = i; d_mapping[0] = *dsum; if(fence_system_flag){ __threadfence_system(); } if(fence_block_flag){ __threadfence_block(); } } } int main(int argc, char **argv) { int opt, BLOCKS = 1, THREADS = 1, cnt =10000, fence_system_flag =0, fence_block_flag = 0; cudaEvent_t start, stop; float elapsed_time =0; gpuErrchk(cudaEventCreate(&start)); gpuErrchk(cudaEventCreate(&stop)); while ((opt = getopt(argc, argv, "b:t:n:f:s:")) != -1) { switch (opt) { case 'b': BLOCKS = atoi(optarg); break; case 't': THREADS = atoi(optarg); break; case 'n': cnt = atoi(optarg); break; case 'f': fence_system_flag = atoi(optarg); break; case 's': fence_block_flag = atoi(optarg); break; default: fprintf(stderr, "Usage: %s -b [blocks] -t [threads] -n [count of iterations] -f [fence_system] -s [fence_block]\n", argv[0]); exit(EXIT_FAILURE); } } float * dsum; gpuErrchk(cudaMallocManaged((void **) &dsum, sizeof(uint64_t))); volatile int * h_mapping; gpuErrchk(cudaHostAlloc( (void**)&h_mapping, sizeof(volatile int), cudaHostAllocMapped)); volatile int * d_mapping; gpuErrchk(cudaHostGetDevicePointer((void**)&d_mapping,(void*)h_mapping,0)); *dsum = 0; cudaEventRecord(start, 0); kernel <<< BLOCKS, THREADS >>> (BLOCKS * THREADS,dsum,d_mapping, cnt, fence_system_flag, fence_block_flag); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaEventRecord(stop, 0)); gpuErrchk(cudaEventSynchronize(stop)); gpuErrchk(cudaEventElapsedTime (&elapsed_time, start, stop)); assert(*dsum != 0); printf("H: elapsed_time is : %f \n", elapsed_time); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
283def33da0de142bce2c4231e816f4d17c184d5.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2021 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "stream.h" #if !defined(_WIN32) && defined(SAIGA_USE_CUDA_TOOLKIT) #include <nvToolsExtCudaRt.h> #endif namespace Saiga { namespace CUDA { CudaStream::CudaStream() { hipStreamCreate(&stream); } CudaStream::~CudaStream() { hipStreamDestroy(stream); } void CudaStream::waitForEvent(hipEvent_t event) { hipStreamWaitEvent(stream, event, 0); } void CudaStream::synchronize() { hipStreamSynchronize(stream); } hipStream_t CudaStream::legacyStream() { return cudaStreamLegacy; } hipStream_t CudaStream::perThreadStream() { return cudaStreamPerThread; } Saiga::CUDA::CudaStream::operator hipStream_t() const { return stream; } void CudaStream::setName(const std::string& name) { #if !defined(_WIN32) && defined(SAIGA_USE_CUDA_TOOLKIT) nvtxNameCudaStreamA(stream, name.c_str()); #endif } } // namespace CUDA } // namespace Saiga
283def33da0de142bce2c4231e816f4d17c184d5.cu
/** * Copyright (c) 2021 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "stream.h" #if !defined(_WIN32) && defined(SAIGA_USE_CUDA_TOOLKIT) #include <nvToolsExtCudaRt.h> #endif namespace Saiga { namespace CUDA { CudaStream::CudaStream() { cudaStreamCreate(&stream); } CudaStream::~CudaStream() { cudaStreamDestroy(stream); } void CudaStream::waitForEvent(cudaEvent_t event) { cudaStreamWaitEvent(stream, event, 0); } void CudaStream::synchronize() { cudaStreamSynchronize(stream); } cudaStream_t CudaStream::legacyStream() { return cudaStreamLegacy; } cudaStream_t CudaStream::perThreadStream() { return cudaStreamPerThread; } Saiga::CUDA::CudaStream::operator cudaStream_t() const { return stream; } void CudaStream::setName(const std::string& name) { #if !defined(_WIN32) && defined(SAIGA_USE_CUDA_TOOLKIT) nvtxNameCudaStreamA(stream, name.c_str()); #endif } } // namespace CUDA } // namespace Saiga
505ed0efa63e7e580bc8c18b296cde9f779e703d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "naiveKernel.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int N = XSIZE*YSIZE; float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( naiveKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,input,output); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( naiveKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( naiveKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
505ed0efa63e7e580bc8c18b296cde9f779e703d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "naiveKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int N = XSIZE*YSIZE; float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); naiveKernel<<<gridBlock,threadBlock>>>(N,input,output); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { naiveKernel<<<gridBlock,threadBlock>>>(N,input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { naiveKernel<<<gridBlock,threadBlock>>>(N,input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
kernels_test.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gtest/gtest.h" #include <thrust/device_vector.h> #include "../src/kernels/kernels.cuh" TEST(KernelTest, TestNdiff2KernelShort) { auto nchans = 64; short *data, *filtered; hipMallocManaged(&data, 4 * nchans * sizeof(short)); hipMallocManaged(&filtered, 4 * nchans * sizeof(short)); /* * channel values: 1 1 2 2 -> (-1 * 1) + (-2 * 1) + (2 * 2) + (1 * 2) = 3 */ for (auto i = 0; i < 4 * nchans; i++) { if (i < 2 * nchans) { data[i] = 1; } else { data[i] = 2; } filtered[i] = 0; } auto nthreads = 256; auto nblocks = (4 * nchans + nthreads - 1) / nthreads; hipLaunchKernelGGL(( ndiff2_), dim3(nblocks), dim3(nthreads), 0, 0, 4 * nchans, nchans, data, filtered); hipDeviceSynchronize(); /* * filtered values at indices 0, 2, and 3 get 0, while channel value at index 1 gets 3 */ for (auto i = 0; i < 4 * nchans; i++) { EXPECT_EQ((i >= nchans && i < 2 * nchans) ? 3 : 0, filtered[i]); } // cleanup hipFree(data); hipFree(filtered); } TEST(KernelTest, TestNdiff2KernelFloat) { auto nchans = 64; float *data, *filtered; hipMallocManaged(&data, 4 * nchans * sizeof(float)); hipMallocManaged(&filtered, 4 * nchans * sizeof(float)); /* * channel values: 1 1 2 2 -> (-1 * 1) + (-2 * 1) + (2 * 2) + (1 * 2) = 3 */ for (auto i = 0; i < 4 * nchans; i++) { if (i < 2 * nchans) { data[i] = 1.0f; } else { data[i] = 2.0f; } filtered[i] = 0; } auto nthreads = 256; auto nblocks = (4 * nchans + nthreads - 1) / nthreads; hipLaunchKernelGGL(( ndiff2_), dim3(nblocks), dim3(nthreads), 0, 0, 4 * nchans, nchans, data, filtered); hipDeviceSynchronize(); /* * filtered values at indices 0, 2, and 3 get 0, while channel value at index 1 gets 3 */ for (auto i = 0; i < 4 * nchans; i++) { EXPECT_EQ((i >= nchans && i < 2 * nchans) ? 3.0f : 0.0f, filtered[i]); } // cleanup hipFree(data); hipFree(filtered); } TEST(KernelTest, TestNdiff2Short) { auto nchans = 64; auto n_frames = 4; short *data, *filtered; hipMallocManaged(&data, n_frames * nchans * sizeof(short)); hipMallocManaged(&filtered, n_frames * nchans * sizeof(short)); /* * channel values: 1 1 2 2 -> (-1 * 1) + (-2 * 1) + (2 * 2) + (1 * 2) = 3 */ for (auto i = 0; i < n_frames * nchans; i++) { if (i < 2 * nchans) { data[i] = 1; } else { data[i] = 2; } filtered[i] = 0; } auto nthreads = 256; auto nblocks = (n_frames * nchans + nthreads - 1) / nthreads; ndiff2(n_frames * nchans, nchans, data, filtered, nblocks, nthreads); /* * filtered values at indices 0, 2, and 3 get 0, while channel value at index 1 gets 3 */ for (auto i = 0; i < n_frames * nchans; i++) { EXPECT_EQ((i >= nchans && i < 2 * nchans) ? 3 : 0, filtered[i]); } // cleanup hipFree(data); hipFree(filtered); } /* * GIVEN a data `data_` of int16 and a constant detect `const_thresh` * TEST THAT values in `data_` which exceed `const_thresh` correspond to true * values in a boolean data `host_crossings_`. */ TEST(KernelTest, FindCrossingsKernelShort) { auto n_channels = 100; auto n_frames = 100; auto n_samples = n_channels * n_frames; auto const_thresh = 9.0f; short *data; uint8_t *crossings; float *thresholds; hipMallocManaged(&data, n_samples * sizeof(short)); hipMallocManaged(&crossings, n_samples * sizeof(bool)); hipMallocManaged(&thresholds, n_channels * sizeof(float)); for (auto i = 0; i < n_channels; ++i) { thresholds[i] = const_thresh; } // column j gets all j's for (auto k = 0; k < n_samples; ++k) { data[k] = (short) (-k / n_channels); } // establish preconditions for the test for (auto k = 0; k < n_samples; k++) { EXPECT_FALSE(crossings[k]); if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(data[k] < -const_thresh); } else { EXPECT_TRUE(data[k] < -const_thresh); } } // perform the thresholding auto n_threads = 256; auto n_blocks = (n_samples + n_threads - 1) / n_threads; hipLaunchKernelGGL(( find_crossings_), dim3(n_blocks), dim3(n_threads), 0, 0, n_samples, n_channels, data, thresholds, crossings); hipDeviceSynchronize(); // test host_crossings_ detected correctly for (auto k = 0; k < n_samples; k++) { if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(crossings[k]); } else { EXPECT_TRUE(crossings[k]); } } // clean up hipFree(data); hipFree(crossings); hipFree(thresholds); } /* * GIVEN a data `data_` of float32 and a constant detect `const_thresh` * TEST THAT values in `data_` which exceed `const_thresh` correspond to true * values in a boolean data `host_crossings_`. */ TEST(KernelTest, FindCrossingsKernelFloat) { auto n_channels = 100; auto n_frames = 100; auto n_samples = n_channels * n_frames; auto const_thresh = 9.0f; float *data; uint8_t *crossings; float *thresholds; hipMallocManaged(&data, n_samples * sizeof(float)); hipMallocManaged(&crossings, n_samples * sizeof(bool)); hipMallocManaged(&thresholds, n_channels * sizeof(float)); for (auto i = 0; i < n_channels; ++i) { thresholds[i] = const_thresh; } // column j gets all j's for (auto k = 0; k < n_samples; ++k) { data[k] = (float) (-k / n_channels); // NOLINT(bugprone-integer-division) } // establish preconditions for the test for (auto k = 0; k < n_samples; k++) { EXPECT_FALSE(crossings[k]); if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(data[k] < -const_thresh); } else { EXPECT_TRUE(data[k] < -const_thresh); } } // perform the thresholding auto n_threads = 256; auto n_blocks = (n_samples + n_threads - 1) / n_threads; hipLaunchKernelGGL(( find_crossings_), dim3(n_blocks), dim3(n_threads), 0, 0, n_samples, n_channels, data, thresholds, crossings); hipDeviceSynchronize(); // test host_crossings_ detected correctly for (auto k = 0; k < n_samples; k++) { if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(crossings[k]); } else { EXPECT_TRUE(crossings[k]); } } // clean up hipFree(data); hipFree(crossings); hipFree(thresholds); } TEST(KernelTest, FindCrossingsShort) { auto n_channels = 100; auto n_frames = 100; auto n_samples = n_channels * n_frames; auto const_thresh = 9.0f; short *data; uint8_t *crossings; float *thresholds; hipMallocManaged(&data, n_samples * sizeof(short)); hipMallocManaged(&crossings, n_samples * sizeof(bool)); hipMallocManaged(&thresholds, n_channels * sizeof(float)); for (auto i = 0; i < n_channels; ++i) { thresholds[i] = const_thresh; } // column j gets all j's for (auto k = 0; k < n_samples; ++k) { data[k] = (short) (-k / n_channels); } // establish preconditions for the test for (auto k = 0; k < n_samples; k++) { EXPECT_FALSE(crossings[k]); if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(data[k] < -const_thresh); } else { EXPECT_TRUE(data[k] < -const_thresh); } } // perform the thresholding auto n_threads = 256; auto n_blocks = (n_samples + n_threads - 1) / n_threads; find_crossings(n_samples, n_channels, data, thresholds, crossings, n_blocks, n_threads); // test host_crossings_ detected correctly for (auto k = 0; k < n_samples; k++) { if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(crossings[k]); } else { EXPECT_TRUE(crossings[k]); } } // clean up hipFree(data); hipFree(crossings); hipFree(thresholds); } TEST(KernelTest, TestMakeCovMatrix) { unsigned long n_obs = 11; unsigned int n_feats = 5; float *features; float *cov; hipMallocManaged(&features, n_obs * n_feats * sizeof(float)); hipMallocManaged(&cov, n_feats * n_feats * sizeof(float)); // store each observation in a row (row-major order) for (auto i = 0; i < n_feats; ++i) { for (auto j = 0; j < n_obs; ++j) { auto k = i * n_obs + j; features[k] = (float) (i + 1); } } // compute the covariance matrix CovMatrixArgs args{n_obs, n_feats, features, cov}; make_cov_matrix(args); auto base_val = (float) n_obs / ((float) n_obs - 1); for (auto i = 0; i < n_feats; ++i) { for (auto j = 0; j < n_feats; ++j) { auto k = i * n_obs + j; EXPECT_FLOAT_EQ((i + 1) * (j + 1) * base_val, args.cov_matrix[k]); } } hipFree(features); hipFree(cov); } TEST(KernelTest, CenterFeatures) { unsigned long n_obs = 11; unsigned int n_feats = 5; thrust::device_vector<float> features(n_feats * n_obs); thrust::sequence(features.begin(), features.end()); // center_ the features matrix CenterFeaturesArgs args{n_obs, n_feats, features}; center_features(args); for (auto i = 0; i < n_feats; ++i) { for (auto j = 0; j < n_obs; ++j) { auto k = i * n_obs + j; EXPECT_LT(std::abs(args.features[k] - (i - 2) * 11), 1e-5); } } } TEST(KernelTest, MakePVs) { uint32_t n_feats = 7; thrust::device_vector<float> mat(n_feats * n_feats); // generate an n = 7 Wilkinson eigenvalue test matrix // https://en.wikipedia.org/wiki/Wilkinson_matrix thrust::fill(mat.begin(), mat.end(), 0); for (auto i = 0; i < n_feats; ++i) { if (i < n_feats - 1) { auto j = i + 1; mat[i * n_feats + j] = 1.0; // i, j entry mat[j * n_feats + i] = 1.0; // j, i entry } auto diag = i * n_feats + i; if (i == 0 || i == n_feats - 1) { mat[diag] = 3.0; } else if (i == 1 || i == n_feats - 2) { mat[diag] = 2.0; } else if (i == 2 || i == n_feats - 3) { mat[diag] = 1.0; } } MakePVArgs args{n_feats, n_feats, thrust::raw_pointer_cast(mat.data())}; make_principal_vectors(args); // principal vectors are stored in mat in column-major order, // but may differ by a sign EXPECT_LT(std::abs(std::abs(-0.036139846) - std::abs(mat[0])), 1e-5); EXPECT_LT(std::abs(std::abs(0.14907272) - std::abs(mat[1])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4296953) - std::abs(mat[2])), 1e-5); EXPECT_LT(std::abs(std::abs(0.76398057) - std::abs(mat[3])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4296953) - std::abs(mat[4])), 1e-5); EXPECT_LT(std::abs(std::abs(0.14907272) - std::abs(mat[5])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.036139846) - std::abs(mat[6])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.14942925) - std::abs(mat[7])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[8])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.5576775) - std::abs(mat[9])), 1e-5); EXPECT_LT(std::abs(std::abs(3.0957322e-15) - std::abs(mat[10])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5576775) - std::abs(mat[11])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[12])), 1e-5); EXPECT_LT(std::abs(std::abs(0.14942925) - std::abs(mat[13])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.25) - std::abs(mat[14])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5) - std::abs(mat[15])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.25) - std::abs(mat[16])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.5) - std::abs(mat[17])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.25) - std::abs(mat[18])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5) - std::abs(mat[19])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.25) - std::abs(mat[20])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[21])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[22])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[23])), 1e-5); EXPECT_LT(std::abs(std::abs(-9.064933e-16) - std::abs(mat[24])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[25])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[26])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[27])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.37990108) - std::abs(mat[28])), 1e-5); EXPECT_LT(std::abs(std::abs(0.24187228) - std::abs(mat[29])), 1e-5); EXPECT_LT(std::abs(std::abs(0.46778008) - std::abs(mat[30])), 1e-5); EXPECT_LT(std::abs(std::abs(0.39586553) - std::abs(mat[31])), 1e-5); EXPECT_LT(std::abs(std::abs(0.46778008) - std::abs(mat[32])), 1e-5); EXPECT_LT(std::abs(std::abs(0.24187228) - std::abs(mat[33])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.37990108) - std::abs(mat[34])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.5576775) - std::abs(mat[35])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[36])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.14942925) - std::abs(mat[37])), 1e-5); EXPECT_LT(std::abs(std::abs(0) - std::abs(mat[38])), 1e-5); EXPECT_LT(std::abs(std::abs(0.14942925) - std::abs(mat[39])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[40])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5576775) - std::abs(mat[41])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5402491) - std::abs(mat[42])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4114306) - std::abs(mat[43])), 1e-5); EXPECT_LT(std::abs(std::abs(0.1845094) - std::abs(mat[44])), 1e-5); EXPECT_LT(std::abs(std::abs(0.09810267) - std::abs(mat[45])), 1e-5); EXPECT_LT(std::abs(std::abs(0.1845094) - std::abs(mat[46])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4114306) - std::abs(mat[47])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5402491) - std::abs(mat[48])), 1e-5); } TEST(KernelTest, ProjectOntoPVs) { // 3 principal components, 5 dimensions, 10 observations uint32_t q = 3, d = 5, N = 10; // observations matrix thrust::device_vector<float> obs(d * N); thrust::sequence(obs.begin(), obs.end()); // principal vectors thrust::device_vector<float> pvs(q * d); pvs[0] = 0.1f; pvs[1] = 0.6f; pvs[2] = -0.9f; pvs[3] = -1.0f; pvs[4] = 0.8f; pvs[5] = 0.4f; pvs[6] = -0.3f; pvs[7] = -0.3f; pvs[8] = 0.5f; pvs[9] = 0.5f; pvs[10] = 0.5f; pvs[11] = -0.7f; pvs[12] = 0.7f; pvs[13] = -0.3f; pvs[14] = 0.0f; ProjectOntoPVsArgs args{q, d, N, thrust::raw_pointer_cast(pvs.data()), thrust::raw_pointer_cast(obs.data())}; project_onto_pvs(args); EXPECT_LT(std::abs(-1.0 - obs[0]), 1e-5); EXPECT_LT(std::abs(2.6 - obs[1]), 1e-5); EXPECT_LT(std::abs(-0.2 - obs[2]), 1e-5); EXPECT_LT(std::abs(-3.0 - obs[3]), 1e-5); EXPECT_LT(std::abs(6.6 - obs[4]), 1e-5); EXPECT_LT(std::abs(0.8 - obs[5]), 1e-5); EXPECT_LT(std::abs(-5.0 - obs[6]), 1e-5); EXPECT_LT(std::abs(10.6 - obs[7]), 1e-5); EXPECT_LT(std::abs(1.8 - obs[8]), 1e-5); EXPECT_LT(std::abs(-7.0 - obs[9]), 1e-5); EXPECT_LT(std::abs(14.6 - obs[10]), 1e-5); EXPECT_LT(std::abs(2.8 - obs[11]), 1e-5); EXPECT_LT(std::abs(-9.0 - obs[12]), 1e-5); EXPECT_LT(std::abs(18.6 - obs[13]), 1e-5); EXPECT_LT(std::abs(3.8 - obs[14]), 1e-5); EXPECT_LT(std::abs(-11.0 - obs[15]), 1e-5); EXPECT_LT(std::abs(22.6 - obs[16]), 1e-5); EXPECT_LT(std::abs(4.8 - obs[17]), 1e-5); EXPECT_LT(std::abs(-13.0 - obs[18]), 1e-5); EXPECT_LT(std::abs(26.6 - obs[19]), 1e-5); EXPECT_LT(std::abs(5.8 - obs[20]), 1e-5); EXPECT_LT(std::abs(-15.0 - obs[21]), 1e-5); EXPECT_LT(std::abs(30.6 - obs[22]), 1e-5); EXPECT_LT(std::abs(6.8 - obs[23]), 1e-5); EXPECT_LT(std::abs(-17.0 - obs[24]), 1e-5); EXPECT_LT(std::abs(34.6 - obs[25]), 1e-5); EXPECT_LT(std::abs(7.8 - obs[26]), 1e-5); EXPECT_LT(std::abs(-19.0 - obs[27]), 1e-5); EXPECT_LT(std::abs(38.6 - obs[28]), 1e-5); EXPECT_LT(std::abs(8.8 - obs[29]), 1e-5); }
kernels_test.cu
#include "gtest/gtest.h" #include <thrust/device_vector.h> #include "../src/kernels/kernels.cuh" TEST(KernelTest, TestNdiff2KernelShort) { auto nchans = 64; short *data, *filtered; cudaMallocManaged(&data, 4 * nchans * sizeof(short)); cudaMallocManaged(&filtered, 4 * nchans * sizeof(short)); /* * channel values: 1 1 2 2 -> (-1 * 1) + (-2 * 1) + (2 * 2) + (1 * 2) = 3 */ for (auto i = 0; i < 4 * nchans; i++) { if (i < 2 * nchans) { data[i] = 1; } else { data[i] = 2; } filtered[i] = 0; } auto nthreads = 256; auto nblocks = (4 * nchans + nthreads - 1) / nthreads; ndiff2_<<<nblocks, nthreads>>>(4 * nchans, nchans, data, filtered); cudaDeviceSynchronize(); /* * filtered values at indices 0, 2, and 3 get 0, while channel value at index 1 gets 3 */ for (auto i = 0; i < 4 * nchans; i++) { EXPECT_EQ((i >= nchans && i < 2 * nchans) ? 3 : 0, filtered[i]); } // cleanup cudaFree(data); cudaFree(filtered); } TEST(KernelTest, TestNdiff2KernelFloat) { auto nchans = 64; float *data, *filtered; cudaMallocManaged(&data, 4 * nchans * sizeof(float)); cudaMallocManaged(&filtered, 4 * nchans * sizeof(float)); /* * channel values: 1 1 2 2 -> (-1 * 1) + (-2 * 1) + (2 * 2) + (1 * 2) = 3 */ for (auto i = 0; i < 4 * nchans; i++) { if (i < 2 * nchans) { data[i] = 1.0f; } else { data[i] = 2.0f; } filtered[i] = 0; } auto nthreads = 256; auto nblocks = (4 * nchans + nthreads - 1) / nthreads; ndiff2_<<<nblocks, nthreads>>>(4 * nchans, nchans, data, filtered); cudaDeviceSynchronize(); /* * filtered values at indices 0, 2, and 3 get 0, while channel value at index 1 gets 3 */ for (auto i = 0; i < 4 * nchans; i++) { EXPECT_EQ((i >= nchans && i < 2 * nchans) ? 3.0f : 0.0f, filtered[i]); } // cleanup cudaFree(data); cudaFree(filtered); } TEST(KernelTest, TestNdiff2Short) { auto nchans = 64; auto n_frames = 4; short *data, *filtered; cudaMallocManaged(&data, n_frames * nchans * sizeof(short)); cudaMallocManaged(&filtered, n_frames * nchans * sizeof(short)); /* * channel values: 1 1 2 2 -> (-1 * 1) + (-2 * 1) + (2 * 2) + (1 * 2) = 3 */ for (auto i = 0; i < n_frames * nchans; i++) { if (i < 2 * nchans) { data[i] = 1; } else { data[i] = 2; } filtered[i] = 0; } auto nthreads = 256; auto nblocks = (n_frames * nchans + nthreads - 1) / nthreads; ndiff2(n_frames * nchans, nchans, data, filtered, nblocks, nthreads); /* * filtered values at indices 0, 2, and 3 get 0, while channel value at index 1 gets 3 */ for (auto i = 0; i < n_frames * nchans; i++) { EXPECT_EQ((i >= nchans && i < 2 * nchans) ? 3 : 0, filtered[i]); } // cleanup cudaFree(data); cudaFree(filtered); } /* * GIVEN a data `data_` of int16 and a constant detect `const_thresh` * TEST THAT values in `data_` which exceed `const_thresh` correspond to true * values in a boolean data `host_crossings_`. */ TEST(KernelTest, FindCrossingsKernelShort) { auto n_channels = 100; auto n_frames = 100; auto n_samples = n_channels * n_frames; auto const_thresh = 9.0f; short *data; uint8_t *crossings; float *thresholds; cudaMallocManaged(&data, n_samples * sizeof(short)); cudaMallocManaged(&crossings, n_samples * sizeof(bool)); cudaMallocManaged(&thresholds, n_channels * sizeof(float)); for (auto i = 0; i < n_channels; ++i) { thresholds[i] = const_thresh; } // column j gets all j's for (auto k = 0; k < n_samples; ++k) { data[k] = (short) (-k / n_channels); } // establish preconditions for the test for (auto k = 0; k < n_samples; k++) { EXPECT_FALSE(crossings[k]); if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(data[k] < -const_thresh); } else { EXPECT_TRUE(data[k] < -const_thresh); } } // perform the thresholding auto n_threads = 256; auto n_blocks = (n_samples + n_threads - 1) / n_threads; find_crossings_<<<n_blocks, n_threads>>>(n_samples, n_channels, data, thresholds, crossings); cudaDeviceSynchronize(); // test host_crossings_ detected correctly for (auto k = 0; k < n_samples; k++) { if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(crossings[k]); } else { EXPECT_TRUE(crossings[k]); } } // clean up cudaFree(data); cudaFree(crossings); cudaFree(thresholds); } /* * GIVEN a data `data_` of float32 and a constant detect `const_thresh` * TEST THAT values in `data_` which exceed `const_thresh` correspond to true * values in a boolean data `host_crossings_`. */ TEST(KernelTest, FindCrossingsKernelFloat) { auto n_channels = 100; auto n_frames = 100; auto n_samples = n_channels * n_frames; auto const_thresh = 9.0f; float *data; uint8_t *crossings; float *thresholds; cudaMallocManaged(&data, n_samples * sizeof(float)); cudaMallocManaged(&crossings, n_samples * sizeof(bool)); cudaMallocManaged(&thresholds, n_channels * sizeof(float)); for (auto i = 0; i < n_channels; ++i) { thresholds[i] = const_thresh; } // column j gets all j's for (auto k = 0; k < n_samples; ++k) { data[k] = (float) (-k / n_channels); // NOLINT(bugprone-integer-division) } // establish preconditions for the test for (auto k = 0; k < n_samples; k++) { EXPECT_FALSE(crossings[k]); if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(data[k] < -const_thresh); } else { EXPECT_TRUE(data[k] < -const_thresh); } } // perform the thresholding auto n_threads = 256; auto n_blocks = (n_samples + n_threads - 1) / n_threads; find_crossings_<<<n_blocks, n_threads>>>(n_samples, n_channels, data, thresholds, crossings); cudaDeviceSynchronize(); // test host_crossings_ detected correctly for (auto k = 0; k < n_samples; k++) { if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(crossings[k]); } else { EXPECT_TRUE(crossings[k]); } } // clean up cudaFree(data); cudaFree(crossings); cudaFree(thresholds); } TEST(KernelTest, FindCrossingsShort) { auto n_channels = 100; auto n_frames = 100; auto n_samples = n_channels * n_frames; auto const_thresh = 9.0f; short *data; uint8_t *crossings; float *thresholds; cudaMallocManaged(&data, n_samples * sizeof(short)); cudaMallocManaged(&crossings, n_samples * sizeof(bool)); cudaMallocManaged(&thresholds, n_channels * sizeof(float)); for (auto i = 0; i < n_channels; ++i) { thresholds[i] = const_thresh; } // column j gets all j's for (auto k = 0; k < n_samples; ++k) { data[k] = (short) (-k / n_channels); } // establish preconditions for the test for (auto k = 0; k < n_samples; k++) { EXPECT_FALSE(crossings[k]); if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(data[k] < -const_thresh); } else { EXPECT_TRUE(data[k] < -const_thresh); } } // perform the thresholding auto n_threads = 256; auto n_blocks = (n_samples + n_threads - 1) / n_threads; find_crossings(n_samples, n_channels, data, thresholds, crossings, n_blocks, n_threads); // test host_crossings_ detected correctly for (auto k = 0; k < n_samples; k++) { if (k < n_channels * (const_thresh + 1)) { EXPECT_FALSE(crossings[k]); } else { EXPECT_TRUE(crossings[k]); } } // clean up cudaFree(data); cudaFree(crossings); cudaFree(thresholds); } TEST(KernelTest, TestMakeCovMatrix) { unsigned long n_obs = 11; unsigned int n_feats = 5; float *features; float *cov; cudaMallocManaged(&features, n_obs * n_feats * sizeof(float)); cudaMallocManaged(&cov, n_feats * n_feats * sizeof(float)); // store each observation in a row (row-major order) for (auto i = 0; i < n_feats; ++i) { for (auto j = 0; j < n_obs; ++j) { auto k = i * n_obs + j; features[k] = (float) (i + 1); } } // compute the covariance matrix CovMatrixArgs args{n_obs, n_feats, features, cov}; make_cov_matrix(args); auto base_val = (float) n_obs / ((float) n_obs - 1); for (auto i = 0; i < n_feats; ++i) { for (auto j = 0; j < n_feats; ++j) { auto k = i * n_obs + j; EXPECT_FLOAT_EQ((i + 1) * (j + 1) * base_val, args.cov_matrix[k]); } } cudaFree(features); cudaFree(cov); } TEST(KernelTest, CenterFeatures) { unsigned long n_obs = 11; unsigned int n_feats = 5; thrust::device_vector<float> features(n_feats * n_obs); thrust::sequence(features.begin(), features.end()); // center_ the features matrix CenterFeaturesArgs args{n_obs, n_feats, features}; center_features(args); for (auto i = 0; i < n_feats; ++i) { for (auto j = 0; j < n_obs; ++j) { auto k = i * n_obs + j; EXPECT_LT(std::abs(args.features[k] - (i - 2) * 11), 1e-5); } } } TEST(KernelTest, MakePVs) { uint32_t n_feats = 7; thrust::device_vector<float> mat(n_feats * n_feats); // generate an n = 7 Wilkinson eigenvalue test matrix // https://en.wikipedia.org/wiki/Wilkinson_matrix thrust::fill(mat.begin(), mat.end(), 0); for (auto i = 0; i < n_feats; ++i) { if (i < n_feats - 1) { auto j = i + 1; mat[i * n_feats + j] = 1.0; // i, j entry mat[j * n_feats + i] = 1.0; // j, i entry } auto diag = i * n_feats + i; if (i == 0 || i == n_feats - 1) { mat[diag] = 3.0; } else if (i == 1 || i == n_feats - 2) { mat[diag] = 2.0; } else if (i == 2 || i == n_feats - 3) { mat[diag] = 1.0; } } MakePVArgs args{n_feats, n_feats, thrust::raw_pointer_cast(mat.data())}; make_principal_vectors(args); // principal vectors are stored in mat in column-major order, // but may differ by a sign EXPECT_LT(std::abs(std::abs(-0.036139846) - std::abs(mat[0])), 1e-5); EXPECT_LT(std::abs(std::abs(0.14907272) - std::abs(mat[1])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4296953) - std::abs(mat[2])), 1e-5); EXPECT_LT(std::abs(std::abs(0.76398057) - std::abs(mat[3])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4296953) - std::abs(mat[4])), 1e-5); EXPECT_LT(std::abs(std::abs(0.14907272) - std::abs(mat[5])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.036139846) - std::abs(mat[6])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.14942925) - std::abs(mat[7])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[8])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.5576775) - std::abs(mat[9])), 1e-5); EXPECT_LT(std::abs(std::abs(3.0957322e-15) - std::abs(mat[10])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5576775) - std::abs(mat[11])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[12])), 1e-5); EXPECT_LT(std::abs(std::abs(0.14942925) - std::abs(mat[13])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.25) - std::abs(mat[14])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5) - std::abs(mat[15])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.25) - std::abs(mat[16])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.5) - std::abs(mat[17])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.25) - std::abs(mat[18])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5) - std::abs(mat[19])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.25) - std::abs(mat[20])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[21])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[22])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[23])), 1e-5); EXPECT_LT(std::abs(std::abs(-9.064933e-16) - std::abs(mat[24])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[25])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[26])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[27])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.37990108) - std::abs(mat[28])), 1e-5); EXPECT_LT(std::abs(std::abs(0.24187228) - std::abs(mat[29])), 1e-5); EXPECT_LT(std::abs(std::abs(0.46778008) - std::abs(mat[30])), 1e-5); EXPECT_LT(std::abs(std::abs(0.39586553) - std::abs(mat[31])), 1e-5); EXPECT_LT(std::abs(std::abs(0.46778008) - std::abs(mat[32])), 1e-5); EXPECT_LT(std::abs(std::abs(0.24187228) - std::abs(mat[33])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.37990108) - std::abs(mat[34])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.5576775) - std::abs(mat[35])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.4082483) - std::abs(mat[36])), 1e-5); EXPECT_LT(std::abs(std::abs(-0.14942925) - std::abs(mat[37])), 1e-5); EXPECT_LT(std::abs(std::abs(0) - std::abs(mat[38])), 1e-5); EXPECT_LT(std::abs(std::abs(0.14942925) - std::abs(mat[39])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4082483) - std::abs(mat[40])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5576775) - std::abs(mat[41])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5402491) - std::abs(mat[42])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4114306) - std::abs(mat[43])), 1e-5); EXPECT_LT(std::abs(std::abs(0.1845094) - std::abs(mat[44])), 1e-5); EXPECT_LT(std::abs(std::abs(0.09810267) - std::abs(mat[45])), 1e-5); EXPECT_LT(std::abs(std::abs(0.1845094) - std::abs(mat[46])), 1e-5); EXPECT_LT(std::abs(std::abs(0.4114306) - std::abs(mat[47])), 1e-5); EXPECT_LT(std::abs(std::abs(0.5402491) - std::abs(mat[48])), 1e-5); } TEST(KernelTest, ProjectOntoPVs) { // 3 principal components, 5 dimensions, 10 observations uint32_t q = 3, d = 5, N = 10; // observations matrix thrust::device_vector<float> obs(d * N); thrust::sequence(obs.begin(), obs.end()); // principal vectors thrust::device_vector<float> pvs(q * d); pvs[0] = 0.1f; pvs[1] = 0.6f; pvs[2] = -0.9f; pvs[3] = -1.0f; pvs[4] = 0.8f; pvs[5] = 0.4f; pvs[6] = -0.3f; pvs[7] = -0.3f; pvs[8] = 0.5f; pvs[9] = 0.5f; pvs[10] = 0.5f; pvs[11] = -0.7f; pvs[12] = 0.7f; pvs[13] = -0.3f; pvs[14] = 0.0f; ProjectOntoPVsArgs args{q, d, N, thrust::raw_pointer_cast(pvs.data()), thrust::raw_pointer_cast(obs.data())}; project_onto_pvs(args); EXPECT_LT(std::abs(-1.0 - obs[0]), 1e-5); EXPECT_LT(std::abs(2.6 - obs[1]), 1e-5); EXPECT_LT(std::abs(-0.2 - obs[2]), 1e-5); EXPECT_LT(std::abs(-3.0 - obs[3]), 1e-5); EXPECT_LT(std::abs(6.6 - obs[4]), 1e-5); EXPECT_LT(std::abs(0.8 - obs[5]), 1e-5); EXPECT_LT(std::abs(-5.0 - obs[6]), 1e-5); EXPECT_LT(std::abs(10.6 - obs[7]), 1e-5); EXPECT_LT(std::abs(1.8 - obs[8]), 1e-5); EXPECT_LT(std::abs(-7.0 - obs[9]), 1e-5); EXPECT_LT(std::abs(14.6 - obs[10]), 1e-5); EXPECT_LT(std::abs(2.8 - obs[11]), 1e-5); EXPECT_LT(std::abs(-9.0 - obs[12]), 1e-5); EXPECT_LT(std::abs(18.6 - obs[13]), 1e-5); EXPECT_LT(std::abs(3.8 - obs[14]), 1e-5); EXPECT_LT(std::abs(-11.0 - obs[15]), 1e-5); EXPECT_LT(std::abs(22.6 - obs[16]), 1e-5); EXPECT_LT(std::abs(4.8 - obs[17]), 1e-5); EXPECT_LT(std::abs(-13.0 - obs[18]), 1e-5); EXPECT_LT(std::abs(26.6 - obs[19]), 1e-5); EXPECT_LT(std::abs(5.8 - obs[20]), 1e-5); EXPECT_LT(std::abs(-15.0 - obs[21]), 1e-5); EXPECT_LT(std::abs(30.6 - obs[22]), 1e-5); EXPECT_LT(std::abs(6.8 - obs[23]), 1e-5); EXPECT_LT(std::abs(-17.0 - obs[24]), 1e-5); EXPECT_LT(std::abs(34.6 - obs[25]), 1e-5); EXPECT_LT(std::abs(7.8 - obs[26]), 1e-5); EXPECT_LT(std::abs(-19.0 - obs[27]), 1e-5); EXPECT_LT(std::abs(38.6 - obs[28]), 1e-5); EXPECT_LT(std::abs(8.8 - obs[29]), 1e-5); }
9418a9e112ff419d68ec9e4ab94d0ec4e3e7451a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : sorting_segments.cu Author : Rafael Schmid Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ */ #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <iostream> typedef unsigned int uint; #ifndef ELAPSED_TIME #define ELAPSED_TIME 0 #endif #ifndef BLOCK_SIZE #define BLOCK_SIZE 512 #endif template <typename T> struct Plus { __host__ __device__ T operator()(const T x, const T y) { return x + y; } }; template <typename T> struct Minus { __host__ __device__ T operator()(const T x, const T y) { return x - y; } }; template<typename Op> __global__ void adjustment(uint* d_vec, uint* d_seg, uint num_of_elements, uint* d_max ){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_elements) { uint mostSignificantBit = (uint)log2((double)*d_max) + 1; uint segIndex = d_seg[id] << mostSignificantBit; Op op = Op(); d_vec[id] = op(d_vec[id], segIndex); } } void cudaTest(hipError_t error) { if (error != hipSuccess) { printf("cuda returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit (EXIT_FAILURE); } hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) printf("1: Sync kernel error: %s\n", hipGetErrorString(errSync)); if (errAsync != hipSuccess) printf("1: Async kernel error: %s\n", hipGetErrorString(errAsync)); } void print(uint* host_data, uint n) { std::cout << "\n"; for (uint i = 0; i < n; i++) { std::cout << host_data[i] << " "; } std::cout << "\n"; } int main(void) { uint num_of_segments; uint num_of_elements; uint i; scanf("%d", &num_of_segments); uint mem_size_seg = sizeof(uint) * (num_of_segments + 1); uint *h_seg_aux = (uint *) malloc(mem_size_seg); for (i = 0; i < num_of_segments + 1; i++) scanf("%d", &h_seg_aux[i]); scanf("%d", &num_of_elements); int mem_size_vec = sizeof(uint) * num_of_elements; uint *h_vec = (uint *) malloc(mem_size_vec); uint *h_value = (uint *) malloc(mem_size_vec); for (i = 0; i < num_of_elements; i++) { scanf("%d", &h_vec[i]); h_value[i] = i; } uint *h_seg = (uint *) malloc(mem_size_vec); for (i = 0; i < num_of_segments; i++) { for (uint j = h_seg_aux[i]; j < h_seg_aux[i + 1]; j++) { h_seg[j] = i; } } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); uint *d_value, *d_value_out, *d_vec, *d_vec_out, *d_max, *d_seg; void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; uint* max_val = (uint *) malloc(sizeof(uint)); cudaTest(hipMalloc((void **) &d_max, sizeof(uint))); cudaTest(hipMalloc((void **) &d_vec, mem_size_vec)); cudaTest(hipMalloc((void **) &d_seg, mem_size_vec)); cudaTest(hipMalloc((void **) &d_value, mem_size_vec)); cudaTest(hipMalloc((void **) &d_vec_out, mem_size_vec)); cudaTest(hipMalloc((void **) &d_value_out, mem_size_vec)); cudaTest(hipMemcpy(d_value, h_value, mem_size_vec, hipMemcpyHostToDevice)); cudaTest(hipMemcpy(d_seg, h_seg, mem_size_vec, hipMemcpyHostToDevice)); void *d_temp = NULL; size_t temp_bytes = 0; int grid = ((num_of_elements-1)/BLOCK_SIZE) + 1; for (uint i = 0; i < EXECUTIONS; i++) { cudaTest(hipMemcpy(d_vec, h_vec, mem_size_vec, hipMemcpyHostToDevice)); /* * maximum element of the array. */ hipEventRecord(start); hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements); hipMalloc(&d_temp_storage, temp_storage_bytes); // Allocate temporary storage hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements); // Run max-reduction /* * add prefix to the elements */ hipLaunchKernelGGL(( adjustment<Plus<uint>>) , dim3(grid), dim3(BLOCK_SIZE), 0, 0, d_vec, d_seg, num_of_elements, d_max); /* * sort the vector */ hipcub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out, d_value, d_value_out, num_of_elements); hipMalloc((void **) &d_temp, temp_bytes); hipcub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out, d_value, d_value_out, num_of_elements); hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync)); if (errAsync != hipSuccess) printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync)); hipLaunchKernelGGL(( adjustment<Minus<uint>>) , dim3(grid), dim3(BLOCK_SIZE), 0, 0, d_vec_out, d_seg, num_of_elements, d_max); hipEventRecord(stop); hipEventSynchronize(stop); if (ELAPSED_TIME == 1) { float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } hipFree(d_temp_storage); temp_storage_bytes = 0; d_temp_storage = NULL; hipFree(d_temp); temp_bytes = 0; d_temp = NULL; hipDeviceSynchronize(); } hipMemcpy(h_vec, d_vec_out, mem_size_vec, hipMemcpyDeviceToHost); hipFree(d_max); hipFree(d_seg); hipFree(d_vec); hipFree(d_vec_out); hipFree(d_value); hipFree(d_value_out); if (ELAPSED_TIME != 1) { print(h_vec, num_of_elements); } free(h_seg_aux); free(h_seg); free(h_vec); free(h_value); return 0; }
9418a9e112ff419d68ec9e4ab94d0ec4e3e7451a.cu
/* ============================================================================ Name : sorting_segments.cu Author : Rafael Schmid Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ */ #include <cub/util_allocator.cuh> #include <cub/device/device_radix_sort.cuh> #include <cub/device/device_reduce.cuh> #include <iostream> typedef unsigned int uint; #ifndef ELAPSED_TIME #define ELAPSED_TIME 0 #endif #ifndef BLOCK_SIZE #define BLOCK_SIZE 512 #endif template <typename T> struct Plus { __host__ __device__ T operator()(const T x, const T y) { return x + y; } }; template <typename T> struct Minus { __host__ __device__ T operator()(const T x, const T y) { return x - y; } }; template<typename Op> __global__ void adjustment(uint* d_vec, uint* d_seg, uint num_of_elements, uint* d_max ){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_elements) { uint mostSignificantBit = (uint)log2((double)*d_max) + 1; uint segIndex = d_seg[id] << mostSignificantBit; Op op = Op(); d_vec[id] = op(d_vec[id], segIndex); } } void cudaTest(cudaError_t error) { if (error != cudaSuccess) { printf("cuda returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit (EXIT_FAILURE); } cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) printf("1: Sync kernel error: %s\n", cudaGetErrorString(errSync)); if (errAsync != cudaSuccess) printf("1: Async kernel error: %s\n", cudaGetErrorString(errAsync)); } void print(uint* host_data, uint n) { std::cout << "\n"; for (uint i = 0; i < n; i++) { std::cout << host_data[i] << " "; } std::cout << "\n"; } int main(void) { uint num_of_segments; uint num_of_elements; uint i; scanf("%d", &num_of_segments); uint mem_size_seg = sizeof(uint) * (num_of_segments + 1); uint *h_seg_aux = (uint *) malloc(mem_size_seg); for (i = 0; i < num_of_segments + 1; i++) scanf("%d", &h_seg_aux[i]); scanf("%d", &num_of_elements); int mem_size_vec = sizeof(uint) * num_of_elements; uint *h_vec = (uint *) malloc(mem_size_vec); uint *h_value = (uint *) malloc(mem_size_vec); for (i = 0; i < num_of_elements; i++) { scanf("%d", &h_vec[i]); h_value[i] = i; } uint *h_seg = (uint *) malloc(mem_size_vec); for (i = 0; i < num_of_segments; i++) { for (uint j = h_seg_aux[i]; j < h_seg_aux[i + 1]; j++) { h_seg[j] = i; } } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); uint *d_value, *d_value_out, *d_vec, *d_vec_out, *d_max, *d_seg; void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; uint* max_val = (uint *) malloc(sizeof(uint)); cudaTest(cudaMalloc((void **) &d_max, sizeof(uint))); cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec)); cudaTest(cudaMalloc((void **) &d_seg, mem_size_vec)); cudaTest(cudaMalloc((void **) &d_value, mem_size_vec)); cudaTest(cudaMalloc((void **) &d_vec_out, mem_size_vec)); cudaTest(cudaMalloc((void **) &d_value_out, mem_size_vec)); cudaTest(cudaMemcpy(d_value, h_value, mem_size_vec, cudaMemcpyHostToDevice)); cudaTest(cudaMemcpy(d_seg, h_seg, mem_size_vec, cudaMemcpyHostToDevice)); void *d_temp = NULL; size_t temp_bytes = 0; int grid = ((num_of_elements-1)/BLOCK_SIZE) + 1; for (uint i = 0; i < EXECUTIONS; i++) { cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice)); /* * maximum element of the array. */ cudaEventRecord(start); cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements); cudaMalloc(&d_temp_storage, temp_storage_bytes); // Allocate temporary storage cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements); // Run max-reduction /* * add prefix to the elements */ adjustment<Plus<uint>> <<< grid, BLOCK_SIZE>>>(d_vec, d_seg, num_of_elements, d_max); /* * sort the vector */ cub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out, d_value, d_value_out, num_of_elements); cudaMalloc((void **) &d_temp, temp_bytes); cub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out, d_value, d_value_out, num_of_elements); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync)); if (errAsync != cudaSuccess) printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync)); adjustment<Minus<uint>> <<< grid, BLOCK_SIZE>>>(d_vec_out, d_seg, num_of_elements, d_max); cudaEventRecord(stop); cudaEventSynchronize(stop); if (ELAPSED_TIME == 1) { float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } cudaFree(d_temp_storage); temp_storage_bytes = 0; d_temp_storage = NULL; cudaFree(d_temp); temp_bytes = 0; d_temp = NULL; cudaDeviceSynchronize(); } cudaMemcpy(h_vec, d_vec_out, mem_size_vec, cudaMemcpyDeviceToHost); cudaFree(d_max); cudaFree(d_seg); cudaFree(d_vec); cudaFree(d_vec_out); cudaFree(d_value); cudaFree(d_value_out); if (ELAPSED_TIME != 1) { print(h_vec, num_of_elements); } free(h_seg_aux); free(h_seg); free(h_vec); free(h_value); return 0; }
dd2deb5779a2695d020d99588cc709cc2d72352b.hip
// !!! This is a file automatically generated by hipify!!! // Solve the Laplace equation on a 2D lattice with boundary conditions. // // compile with the following command: // // (for GTX970) // nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu // // (for GTX1060) // nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu // Includes #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> // field variables float* h_new; // host field vectors float* h_old; float* h_C; // result of diff*diff of each block float* g_new; float* d_new; // device field vectors float* d_old; float* d_C; int MAX=1000000; // maximum iterations double eps=1.0e-10; // stopping criterion __global__ void laplacian(float* phi_old, float* phi_new, float* C, bool flag) { extern __shared__ float cache[]; float t, l, r, b; // top, left, right, bottom float diff; int site, ym1, xm1, xp1, yp1; int Nx = blockDim.x*gridDim.x; int Ny = blockDim.y*gridDim.y; int x = blockDim.x*blockIdx.x + threadIdx.x; int y = blockDim.y*blockIdx.y + threadIdx.y; int cacheIndex = threadIdx.x + threadIdx.y*blockDim.x; site = x + y*Nx; if((x == 0) || (x == Nx-1) || (y == 0) || (y == Ny-1) ) { } else { xm1 = site - 1; // x-1 xp1 = site + 1; // x+1 ym1 = site - Nx; // y-1 yp1 = site + Nx; // y+1 if(flag) { b = phi_old[ym1]; l = phi_old[xm1]; r = phi_old[xp1]; t = phi_old[yp1]; phi_new[site] = 0.25*(b+l+r+t); } else { b = phi_new[ym1]; l = phi_new[xm1]; r = phi_new[xp1]; t = phi_new[yp1]; phi_old[site] = 0.25*(b+l+r+t); } diff = phi_new[site]-phi_old[site]; } cache[cacheIndex]=diff*diff; __syncthreads(); // perform parallel reduction int ib = blockDim.x*blockDim.y/2; while (ib != 0) { if(cacheIndex < ib) cache[cacheIndex] += cache[cacheIndex + ib]; __syncthreads(); ib /=2; } int blockIndex = blockIdx.x + gridDim.x*blockIdx.y; if(cacheIndex == 0) C[blockIndex] = cache[0]; } int main(void) { int gid; // GPU_ID int iter; volatile bool flag; // to toggle between *_new and *_old float cputime; float gputime; float gputime_tot; double flops; double error; printf("Enter the GPU ID (0/1): "); scanf("%d",&gid); printf("%d\n",gid); // Error code to check return values for CUDA calls hipError_t err = hipSuccess; err = hipSetDevice(gid); if (err != hipSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Select GPU with device ID = %d\n", gid); hipSetDevice(gid); printf("Solve Laplace equation on a 2D lattice with boundary conditions\n"); int Nx,Ny; // lattice size printf("Enter the size (Nx, Ny) of the 2D lattice: "); scanf("%d %d",&Nx,&Ny); printf("%d %d\n",Nx,Ny); // Set the number of threads (tx,ty) per block int tx,ty; printf("Enter the number of threads (tx,ty) per block: "); scanf("%d %d",&tx, &ty); printf("%d %d\n",tx, ty); if( tx*ty > 1024 ) { printf("The number of threads per block must be less than 1024 ! \n"); exit(0); } dim3 threads(tx,ty); // The total number of threads in the grid is equal to the total number of lattice sites int bx = Nx/tx; if(bx*tx != Nx) { printf("The block size in x is incorrect\n"); exit(0); } int by = Ny/ty; if(by*ty != Ny) { printf("The block size in y is incorrect\n"); exit(0); } if((bx > 65535)||(by > 65535)) { printf("The grid size exceeds the limit ! \n"); exit(0); } dim3 blocks(bx,by); printf("The dimension of the grid is (%d, %d)\n",bx,by); int CPU; printf("To compute the solution vector with CPU/GPU/both (0/1/2) ? "); scanf("%d",&CPU); printf("%d\n",CPU); fflush(stdout); // Allocate field vector h_phi in host memory int N = Nx*Ny; int size = N*sizeof(float); int sb = bx*by*sizeof(float); h_old = (float*)malloc(size); h_new = (float*)malloc(size); g_new = (float*)malloc(size); h_C = (float*)malloc(sb); memset(h_old, 0, size); memset(h_new, 0, size); // Initialize the field vector with boundary conditions for(int x=0; x<Nx; x++) { h_new[x+Nx*(Ny-1)]=1.0; h_old[x+Nx*(Ny-1)]=1.0; } FILE *out1; // save initial configuration in phi_initial.dat out1 = fopen("phi_initial.dat","w"); fprintf(out1, "Inital field configuration:\n"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(out1,"%.2e ",h_new[i+j*Nx]); } fprintf(out1,"\n"); } fclose(out1); // printf("\n"); // printf("Inital field configuration:\n"); // for(int j=Ny-1;j>-1;j--) { // for(int i=0; i<Nx; i++) { // printf("%.2e ",h_new[i+j*Nx]); // } // printf("\n"); // } printf("\n"); // create the timer hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); if(CPU>0) { // start the timer hipEventRecord(start,0); // Allocate vectors in device memory hipMalloc((void**)&d_new, size); hipMalloc((void**)&d_old, size); hipMalloc((void**)&d_C, sb); // Copy vectors from host memory to device memory hipMemcpy(d_new, h_new, size, hipMemcpyHostToDevice); hipMemcpy(d_old, h_old, size, hipMemcpyHostToDevice); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float Intime; hipEventElapsedTime( &Intime, start, stop); printf("Input time for GPU: %f (ms) \n",Intime); // start the timer hipEventRecord(start,0); error = 10*eps; // any value bigger than eps is OK iter = 0; // counter for iterations flag = true; int sm = tx*ty*sizeof(float); // size of the shared memory in each block while ( (error > eps) && (iter < MAX) ) { hipLaunchKernelGGL(( laplacian), dim3(blocks),dim3(threads),sm, 0, d_old, d_new, d_C, flag); hipMemcpy(h_C, d_C, sb, hipMemcpyDeviceToHost); error = 0.0; for(int i=0; i<bx*by; i++) { error = error + h_C[i]; } error = sqrt(error); // printf("error = %.15e\n",error); // printf("iteration = %d\n",iter); iter++; flag = !flag; } printf("error (GPU) = %.15e\n",error); printf("total iterations (GPU) = %d\n",iter); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); flops = 7.0*(Nx-2)*(Ny-2)*iter; printf("GPU Gflops: %f\n",flops/(1000000.0*gputime)); // Copy result from device memory to host memory // start the timer hipEventRecord(start,0); hipMemcpy(g_new, d_new, size, hipMemcpyDeviceToHost); hipFree(d_new); hipFree(d_old); hipFree(d_C); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float Outime; hipEventElapsedTime( &Outime, start, stop); printf("Output time for GPU: %f (ms) \n",Outime); gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); fflush(stdout); FILE *outg; // save GPU solution in phi_GPU.dat outg = fopen("phi_GPU.dat","w"); fprintf(outg, "GPU field configuration:\n"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(outg,"%.2e ",g_new[i+j*Nx]); } fprintf(outg,"\n"); } fclose(outg); // printf("\n"); // printf("Final field configuration (GPU):\n"); // for(int j=Ny-1;j>-1;j--) { // for(int i=0; i<Nx; i++) { // printf("%.2e ",g_new[i+j*Nx]); // } // printf("\n"); // } printf("\n"); } if(CPU==1) { // not to compute the CPU solution free(h_new); free(h_old); free(g_new); hipDeviceReset(); exit(0); } if((CPU==0)||(CPU==2)) { // to compute the CPU solution // start the timer hipEventRecord(start,0); // to compute the reference solution error = 10*eps; // any value bigger than eps iter = 0; // counter for iterations flag = true; double diff; float t, l, r, b; // top, left, right, bottom int site, ym1, xm1, xp1, yp1; while ( (error > eps) && (iter < MAX) ) { if(flag) { error = 0.0; for(int y=0; y<Ny; y++) { for(int x=0; x<Nx; x++) { if(x==0 || x==Nx-1 || y==0 || y==Ny-1) { } else { site = x+y*Nx; xm1 = site - 1; // x-1 xp1 = site + 1; // x+1 ym1 = site - Nx; // y-1 yp1 = site + Nx; // y+1 b = h_old[ym1]; l = h_old[xm1]; r = h_old[xp1]; t = h_old[yp1]; h_new[site] = 0.25*(b+l+r+t); diff = h_new[site]-h_old[site]; error = error + diff*diff; } } } } else { error = 0.0; for(int y=0; y<Ny; y++) { for(int x=0; x<Nx; x++) { if(x==0 || x==Nx-1 || y==0 || y==Ny-1) { } else { site = x+y*Nx; xm1 = site - 1; // x-1 xp1 = site + 1; // x+1 ym1 = site - Nx; // y-1 yp1 = site + Nx; // y+1 b = h_new[ym1]; l = h_new[xm1]; r = h_new[xp1]; t = h_new[yp1]; h_old[site] = 0.25*(b+l+r+t); diff = h_new[site]-h_old[site]; error = error + diff*diff; } } } } flag = !flag; iter++; error = sqrt(error); // printf("error = %.15e\n",error); // printf("iteration = %d\n",iter); } // exit if error < eps printf("error (CPU) = %.15e\n",error); printf("total iterations (CPU) = %d\n",iter); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); flops = 7.0*(Nx-2)*(Ny-2)*iter; printf("CPU Gflops: %lf\n",flops/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/(gputime_tot)); fflush(stdout); // destroy the timer hipEventDestroy(start); hipEventDestroy(stop); FILE *outc; // save CPU solution in phi_CPU.dat outc = fopen("phi_CPU.dat","w"); fprintf(outc, "CPU field configuration:\n"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(outc,"%.2e ",h_new[i+j*Nx]); } fprintf(outc,"\n"); } fclose(outc); // printf("\n"); // printf("Final field configuration (CPU):\n"); // for(int j=Ny-1;j>-1;j--) { // for(int i=0; i<Nx; i++) { // printf("%.2e ",h_new[i+j*Nx]); // } // printf("\n"); // } printf("\n"); free(h_new); free(h_old); free(g_new); } hipDeviceReset(); }
dd2deb5779a2695d020d99588cc709cc2d72352b.cu
// Solve the Laplace equation on a 2D lattice with boundary conditions. // // compile with the following command: // // (for GTX970) // nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu // // (for GTX1060) // nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu // Includes #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> // field variables float* h_new; // host field vectors float* h_old; float* h_C; // result of diff*diff of each block float* g_new; float* d_new; // device field vectors float* d_old; float* d_C; int MAX=1000000; // maximum iterations double eps=1.0e-10; // stopping criterion __global__ void laplacian(float* phi_old, float* phi_new, float* C, bool flag) { extern __shared__ float cache[]; float t, l, r, b; // top, left, right, bottom float diff; int site, ym1, xm1, xp1, yp1; int Nx = blockDim.x*gridDim.x; int Ny = blockDim.y*gridDim.y; int x = blockDim.x*blockIdx.x + threadIdx.x; int y = blockDim.y*blockIdx.y + threadIdx.y; int cacheIndex = threadIdx.x + threadIdx.y*blockDim.x; site = x + y*Nx; if((x == 0) || (x == Nx-1) || (y == 0) || (y == Ny-1) ) { } else { xm1 = site - 1; // x-1 xp1 = site + 1; // x+1 ym1 = site - Nx; // y-1 yp1 = site + Nx; // y+1 if(flag) { b = phi_old[ym1]; l = phi_old[xm1]; r = phi_old[xp1]; t = phi_old[yp1]; phi_new[site] = 0.25*(b+l+r+t); } else { b = phi_new[ym1]; l = phi_new[xm1]; r = phi_new[xp1]; t = phi_new[yp1]; phi_old[site] = 0.25*(b+l+r+t); } diff = phi_new[site]-phi_old[site]; } cache[cacheIndex]=diff*diff; __syncthreads(); // perform parallel reduction int ib = blockDim.x*blockDim.y/2; while (ib != 0) { if(cacheIndex < ib) cache[cacheIndex] += cache[cacheIndex + ib]; __syncthreads(); ib /=2; } int blockIndex = blockIdx.x + gridDim.x*blockIdx.y; if(cacheIndex == 0) C[blockIndex] = cache[0]; } int main(void) { int gid; // GPU_ID int iter; volatile bool flag; // to toggle between *_new and *_old float cputime; float gputime; float gputime_tot; double flops; double error; printf("Enter the GPU ID (0/1): "); scanf("%d",&gid); printf("%d\n",gid); // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; err = cudaSetDevice(gid); if (err != cudaSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Select GPU with device ID = %d\n", gid); cudaSetDevice(gid); printf("Solve Laplace equation on a 2D lattice with boundary conditions\n"); int Nx,Ny; // lattice size printf("Enter the size (Nx, Ny) of the 2D lattice: "); scanf("%d %d",&Nx,&Ny); printf("%d %d\n",Nx,Ny); // Set the number of threads (tx,ty) per block int tx,ty; printf("Enter the number of threads (tx,ty) per block: "); scanf("%d %d",&tx, &ty); printf("%d %d\n",tx, ty); if( tx*ty > 1024 ) { printf("The number of threads per block must be less than 1024 ! \n"); exit(0); } dim3 threads(tx,ty); // The total number of threads in the grid is equal to the total number of lattice sites int bx = Nx/tx; if(bx*tx != Nx) { printf("The block size in x is incorrect\n"); exit(0); } int by = Ny/ty; if(by*ty != Ny) { printf("The block size in y is incorrect\n"); exit(0); } if((bx > 65535)||(by > 65535)) { printf("The grid size exceeds the limit ! \n"); exit(0); } dim3 blocks(bx,by); printf("The dimension of the grid is (%d, %d)\n",bx,by); int CPU; printf("To compute the solution vector with CPU/GPU/both (0/1/2) ? "); scanf("%d",&CPU); printf("%d\n",CPU); fflush(stdout); // Allocate field vector h_phi in host memory int N = Nx*Ny; int size = N*sizeof(float); int sb = bx*by*sizeof(float); h_old = (float*)malloc(size); h_new = (float*)malloc(size); g_new = (float*)malloc(size); h_C = (float*)malloc(sb); memset(h_old, 0, size); memset(h_new, 0, size); // Initialize the field vector with boundary conditions for(int x=0; x<Nx; x++) { h_new[x+Nx*(Ny-1)]=1.0; h_old[x+Nx*(Ny-1)]=1.0; } FILE *out1; // save initial configuration in phi_initial.dat out1 = fopen("phi_initial.dat","w"); fprintf(out1, "Inital field configuration:\n"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(out1,"%.2e ",h_new[i+j*Nx]); } fprintf(out1,"\n"); } fclose(out1); // printf("\n"); // printf("Inital field configuration:\n"); // for(int j=Ny-1;j>-1;j--) { // for(int i=0; i<Nx; i++) { // printf("%.2e ",h_new[i+j*Nx]); // } // printf("\n"); // } printf("\n"); // create the timer cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if(CPU>0) { // start the timer cudaEventRecord(start,0); // Allocate vectors in device memory cudaMalloc((void**)&d_new, size); cudaMalloc((void**)&d_old, size); cudaMalloc((void**)&d_C, sb); // Copy vectors from host memory to device memory cudaMemcpy(d_new, h_new, size, cudaMemcpyHostToDevice); cudaMemcpy(d_old, h_old, size, cudaMemcpyHostToDevice); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float Intime; cudaEventElapsedTime( &Intime, start, stop); printf("Input time for GPU: %f (ms) \n",Intime); // start the timer cudaEventRecord(start,0); error = 10*eps; // any value bigger than eps is OK iter = 0; // counter for iterations flag = true; int sm = tx*ty*sizeof(float); // size of the shared memory in each block while ( (error > eps) && (iter < MAX) ) { laplacian<<<blocks,threads,sm>>>(d_old, d_new, d_C, flag); cudaMemcpy(h_C, d_C, sb, cudaMemcpyDeviceToHost); error = 0.0; for(int i=0; i<bx*by; i++) { error = error + h_C[i]; } error = sqrt(error); // printf("error = %.15e\n",error); // printf("iteration = %d\n",iter); iter++; flag = !flag; } printf("error (GPU) = %.15e\n",error); printf("total iterations (GPU) = %d\n",iter); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); flops = 7.0*(Nx-2)*(Ny-2)*iter; printf("GPU Gflops: %f\n",flops/(1000000.0*gputime)); // Copy result from device memory to host memory // start the timer cudaEventRecord(start,0); cudaMemcpy(g_new, d_new, size, cudaMemcpyDeviceToHost); cudaFree(d_new); cudaFree(d_old); cudaFree(d_C); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float Outime; cudaEventElapsedTime( &Outime, start, stop); printf("Output time for GPU: %f (ms) \n",Outime); gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); fflush(stdout); FILE *outg; // save GPU solution in phi_GPU.dat outg = fopen("phi_GPU.dat","w"); fprintf(outg, "GPU field configuration:\n"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(outg,"%.2e ",g_new[i+j*Nx]); } fprintf(outg,"\n"); } fclose(outg); // printf("\n"); // printf("Final field configuration (GPU):\n"); // for(int j=Ny-1;j>-1;j--) { // for(int i=0; i<Nx; i++) { // printf("%.2e ",g_new[i+j*Nx]); // } // printf("\n"); // } printf("\n"); } if(CPU==1) { // not to compute the CPU solution free(h_new); free(h_old); free(g_new); cudaDeviceReset(); exit(0); } if((CPU==0)||(CPU==2)) { // to compute the CPU solution // start the timer cudaEventRecord(start,0); // to compute the reference solution error = 10*eps; // any value bigger than eps iter = 0; // counter for iterations flag = true; double diff; float t, l, r, b; // top, left, right, bottom int site, ym1, xm1, xp1, yp1; while ( (error > eps) && (iter < MAX) ) { if(flag) { error = 0.0; for(int y=0; y<Ny; y++) { for(int x=0; x<Nx; x++) { if(x==0 || x==Nx-1 || y==0 || y==Ny-1) { } else { site = x+y*Nx; xm1 = site - 1; // x-1 xp1 = site + 1; // x+1 ym1 = site - Nx; // y-1 yp1 = site + Nx; // y+1 b = h_old[ym1]; l = h_old[xm1]; r = h_old[xp1]; t = h_old[yp1]; h_new[site] = 0.25*(b+l+r+t); diff = h_new[site]-h_old[site]; error = error + diff*diff; } } } } else { error = 0.0; for(int y=0; y<Ny; y++) { for(int x=0; x<Nx; x++) { if(x==0 || x==Nx-1 || y==0 || y==Ny-1) { } else { site = x+y*Nx; xm1 = site - 1; // x-1 xp1 = site + 1; // x+1 ym1 = site - Nx; // y-1 yp1 = site + Nx; // y+1 b = h_new[ym1]; l = h_new[xm1]; r = h_new[xp1]; t = h_new[yp1]; h_old[site] = 0.25*(b+l+r+t); diff = h_new[site]-h_old[site]; error = error + diff*diff; } } } } flag = !flag; iter++; error = sqrt(error); // printf("error = %.15e\n",error); // printf("iteration = %d\n",iter); } // exit if error < eps printf("error (CPU) = %.15e\n",error); printf("total iterations (CPU) = %d\n",iter); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); flops = 7.0*(Nx-2)*(Ny-2)*iter; printf("CPU Gflops: %lf\n",flops/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/(gputime_tot)); fflush(stdout); // destroy the timer cudaEventDestroy(start); cudaEventDestroy(stop); FILE *outc; // save CPU solution in phi_CPU.dat outc = fopen("phi_CPU.dat","w"); fprintf(outc, "CPU field configuration:\n"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(outc,"%.2e ",h_new[i+j*Nx]); } fprintf(outc,"\n"); } fclose(outc); // printf("\n"); // printf("Final field configuration (CPU):\n"); // for(int j=Ny-1;j>-1;j--) { // for(int i=0; i<Nx; i++) { // printf("%.2e ",h_new[i+j*Nx]); // } // printf("\n"); // } printf("\n"); free(h_new); free(h_old); free(g_new); } cudaDeviceReset(); }
a69cf9628fcf5321e8cd95bd56dfd0f263dbda75.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "convolutionX_63_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_Dst = NULL; hipMalloc(&d_Dst, XSIZE*YSIZE); float *d_Src = NULL; hipMalloc(&d_Src, XSIZE*YSIZE); int imageW = 1; int imageH = 1; int imageD = 1; int outofbounds = 1; float outofboundsvalue = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( convolutionX_63_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( convolutionX_63_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( convolutionX_63_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a69cf9628fcf5321e8cd95bd56dfd0f263dbda75.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "convolutionX_63_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_Dst = NULL; cudaMalloc(&d_Dst, XSIZE*YSIZE); float *d_Src = NULL; cudaMalloc(&d_Src, XSIZE*YSIZE); int imageW = 1; int imageH = 1; int imageD = 1; int outofbounds = 1; float outofboundsvalue = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); convolutionX_63_Kernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { convolutionX_63_Kernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { convolutionX_63_Kernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5f75963c4e3a375a4f475aa3b2be79ae0ee985d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef TRANSFORMATION_KERNEL_H #define TRANSFORMATION_KERNEL_H __global__ void copy_data_transform(float *vertexlist, float *orig_list, int size, int offset, float rot_factor) { int vert = threadIdx.x + blockIdx.x * blockDim.x; if (vert < size) { float x = ((orig_list[vert*3 + 0] - 12.0f) / 12.0f); float y = ((orig_list[vert*3 + 1] - 11.0f) / 12.0f); float z = ((orig_list[vert*3 + 2] - 4.5f) / 12.0f); vertexlist[(offset+vert)*3 + 0] = (x * cosf(rot_factor) - y * sinf(rot_factor)) * 9.0f + 14.5f; vertexlist[(offset+vert)*3 + 1] = (x * sinf(rot_factor) + y * cosf(rot_factor)) * 9.0f + 13.0f; vertexlist[(offset+vert)*3 + 2] = z * 9.0f + 4.0f; } } #endif
5f75963c4e3a375a4f475aa3b2be79ae0ee985d2.cu
#ifndef TRANSFORMATION_KERNEL_H #define TRANSFORMATION_KERNEL_H __global__ void copy_data_transform(float *vertexlist, float *orig_list, int size, int offset, float rot_factor) { int vert = threadIdx.x + blockIdx.x * blockDim.x; if (vert < size) { float x = ((orig_list[vert*3 + 0] - 12.0f) / 12.0f); float y = ((orig_list[vert*3 + 1] - 11.0f) / 12.0f); float z = ((orig_list[vert*3 + 2] - 4.5f) / 12.0f); vertexlist[(offset+vert)*3 + 0] = (x * cosf(rot_factor) - y * sinf(rot_factor)) * 9.0f + 14.5f; vertexlist[(offset+vert)*3 + 1] = (x * sinf(rot_factor) + y * cosf(rot_factor)) * 9.0f + 13.0f; vertexlist[(offset+vert)*3 + 2] = z * 9.0f + 4.0f; } } #endif
8e18bb8a6fe1404300a333357b0f1938aee28bd9.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief Benchmarks for array_ops. * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <cstdlib> #include "k2/csrc/array_ops.h" #include "k2/csrc/benchmark/benchmark.h" namespace k2 { template <typename T> static BenchmarkStat BenchmarkExclusiveSum(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = ::min(500, 1000000 / dim); Array1<T> src = RandUniformArray1<T>(context, dim, -1000, 1000); BenchmarkStat stat; stat.op_name = "ExclusiveSum"; stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<T>::dtype).Name(); stat.device_type = device_type; // there are overloads of ExclusiveSum, so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (Array1<T>(*)(const Array1<T> &))(&ExclusiveSum<T>), src); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkRowSplitsToRowIds(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = ::min(500, 1000000 / dim); Array1<int32_t> sizes = RandUniformArray1<int32_t>(context, dim, 0, 1000); Array1<int32_t> row_splits = ExclusiveSum(sizes); Array1<int32_t> row_ids(context, row_splits.Back()); BenchmarkStat stat; stat.op_name = "RowSplitsToRowIds"; stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name(); stat.device_type = device_type; // there are overloads of RowSplitsToRowIds, // so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (void (*)(const Array1<int32_t> &, Array1<int32_t> *))( &RowSplitsToRowIds), row_splits, &row_ids); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } template <typename T> static void RegisterBenchmarkExclusiveSum(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<T>("ExclusiveSum", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkExclusiveSum<T>(s, device_type); }); } } static void RegisterBenchmarkRowSplitsToRowIds(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("RowSplitsToRowIds", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkRowSplitsToRowIds(s, device_type); }); } } static void RunArrayOpsBenchmark() { std::cout << GetCurrentDateTime() << "\n"; std::cout << GetDeviceInfo() << "\n"; RegisterBenchmarkExclusiveSum<int32_t>(kCpu); RegisterBenchmarkExclusiveSum<int32_t>(kCuda); RegisterBenchmarkRowSplitsToRowIds(kCpu); RegisterBenchmarkRowSplitsToRowIds(kCuda); // Users can set a regular expression via environment // variable `K2_BENCHMARK_FILTER` such that only benchmarks // with name matching the pattern are candidates to run. const char *filter = std::getenv("K2_BENCHMARK_FILTER"); if (filter != nullptr) FilterRegisteredBenchmarks(filter); std::vector<BenchmarkRun> results = RunBechmarks(); std::cout << BenchmarkRun::GetFieldsName() << "\n"; for (const auto &r : results) { std::cout << r << "\n"; } } } // namespace k2 int main() { k2::RunArrayOpsBenchmark(); return 0; }
8e18bb8a6fe1404300a333357b0f1938aee28bd9.cu
/** * @brief Benchmarks for array_ops. * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <cstdlib> #include "k2/csrc/array_ops.h" #include "k2/csrc/benchmark/benchmark.h" namespace k2 { template <typename T> static BenchmarkStat BenchmarkExclusiveSum(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = std::min(500, 1000000 / dim); Array1<T> src = RandUniformArray1<T>(context, dim, -1000, 1000); BenchmarkStat stat; stat.op_name = "ExclusiveSum"; stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<T>::dtype).Name(); stat.device_type = device_type; // there are overloads of ExclusiveSum, so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (Array1<T>(*)(const Array1<T> &))(&ExclusiveSum<T>), src); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkRowSplitsToRowIds(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = std::min(500, 1000000 / dim); Array1<int32_t> sizes = RandUniformArray1<int32_t>(context, dim, 0, 1000); Array1<int32_t> row_splits = ExclusiveSum(sizes); Array1<int32_t> row_ids(context, row_splits.Back()); BenchmarkStat stat; stat.op_name = "RowSplitsToRowIds"; stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name(); stat.device_type = device_type; // there are overloads of RowSplitsToRowIds, // so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (void (*)(const Array1<int32_t> &, Array1<int32_t> *))( &RowSplitsToRowIds), row_splits, &row_ids); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } template <typename T> static void RegisterBenchmarkExclusiveSum(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<T>("ExclusiveSum", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkExclusiveSum<T>(s, device_type); }); } } static void RegisterBenchmarkRowSplitsToRowIds(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("RowSplitsToRowIds", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkRowSplitsToRowIds(s, device_type); }); } } static void RunArrayOpsBenchmark() { std::cout << GetCurrentDateTime() << "\n"; std::cout << GetDeviceInfo() << "\n"; RegisterBenchmarkExclusiveSum<int32_t>(kCpu); RegisterBenchmarkExclusiveSum<int32_t>(kCuda); RegisterBenchmarkRowSplitsToRowIds(kCpu); RegisterBenchmarkRowSplitsToRowIds(kCuda); // Users can set a regular expression via environment // variable `K2_BENCHMARK_FILTER` such that only benchmarks // with name matching the pattern are candidates to run. const char *filter = std::getenv("K2_BENCHMARK_FILTER"); if (filter != nullptr) FilterRegisteredBenchmarks(filter); std::vector<BenchmarkRun> results = RunBechmarks(); std::cout << BenchmarkRun::GetFieldsName() << "\n"; for (const auto &r : results) { std::cout << r << "\n"; } } } // namespace k2 int main() { k2::RunArrayOpsBenchmark(); return 0; }
00fc979a37030bd4521e00ed062d42ec29aad4d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // free_list_test.cu #include <Handle.cpp> #include <location.cu> #include <gtest/gtest.h> #include "MacroUtilities.cpp" #include <cstddef> #include <type_traits> #define HANDLE_THREADS 8 using namespace Flamingo::Memory; class HandleTest : public ::testing::Test { protected: virtual void SetUp() { int offset = 0; std::size_t size = 8; Handle<int> handle_int(offset, size, base_ptr); handle = handle_int; } int base[2] = {0, 1}; int* base_ptr = base; Handle<int> handle; DEFINE(BuddyOffSetTest, HANDLE_THREADS) DEFINE(CopyConstructorTest, HANDLE_THREADS) DEFINE(JoinOperatorTest, HANDLE_THREADS) DEFINE(DereferenceOperatorTest, HANDLE_THREADS) DEFINE(IndirectionOperatorTest, HANDLE_THREADS) DEFINE(EqualityComparableTest, HANDLE_THREADS) DEFINE(DefaultConstructionTest, HANDLE_THREADS) DEFINE(CopyAssignableTest, HANDLE_THREADS) DEFINE(NullablePointerTest, HANDLE_THREADS) DEFINE(BiderectionalTest, HANDLE_THREADS) DEFINE(RandomAccessTest, HANDLE_THREADS) DEFINE(VoidTest, HANDLE_THREADS) DEFINE(BoolConvertTest, HANDLE_THREADS) DEFINE(IfStatementTest, HANDLE_THREADS) DEFINE(ConvertToConstTest, HANDLE_THREADS) DEFINE(ConstDereferenceTest, HANDLE_THREADS) DEFINE(DeviceSingleTest, HANDLE_THREADS) DEFINE(DeviceMultipleTest, HANDLE_THREADS) DEFINE(MemCopyTest, HANDLE_THREADS) DEFINE(AssignementTest,HANDLE_THREADS) DEFINE(ConstTest,HANDLE_THREADS) }; void HandleTest::ConstTest() { int A=10; const int* A_ptr=&A; Handle<const int> h(A_ptr); }; void HandleTest::ConstDereferenceTest() { const Handle<int> h(0, 0, base_ptr); *h; }; void HandleTest::BuddyOffSetTest() { int buddy = handle.buddy_offset(); EXPECT_EQ(8, buddy); }; void HandleTest::CopyConstructorTest() { Handle<int> handle_new(handle); Handle<int> handle_null(NULL); Handle<int> handle_null_copy(handle_null); Handle<int>* handle_ptr=&handle_null; Handle<int> handle_move(*handle_ptr); }; void HandleTest::JoinOperatorTest() { Handle<int> h_1(handle); Handle<int> h_2(handle); h_1._offset = 8; h_1.combine(h_2); EXPECT_EQ(h_1._offset, 0); EXPECT_EQ(h_1._size, 16); }; void HandleTest::DereferenceOperatorTest() { *base = 0; EXPECT_EQ(*handle, 0); *base = 1; EXPECT_EQ(*handle, 1); }; void HandleTest::IndirectionOperatorTest() { Handle<int>* handle_ptr = &handle; int offset = handle_ptr->_offset; EXPECT_EQ(offset, 0); }; void HandleTest::EqualityComparableTest() { Handle<int> handle_2(0, 2, base_ptr); bool test_1 = (handle_2 == handle); EXPECT_TRUE(test_1); Handle<int> handle_3(1, 2, base_ptr); bool test_2 = (handle_3 == handle); EXPECT_FALSE(test_2); double* base_ptr2 = new double; Handle<double> handle_4(0, 8, base_ptr2); bool test_3 = (handle_4 == handle); EXPECT_FALSE(test_3); }; void HandleTest::AssignementTest(){ Handle<int> A; Handle<int> B; A=B; } void HandleTest::DefaultConstructionTest() { Handle<int> handle_2(); }; void HandleTest::CopyAssignableTest() { Handle<int> handle_2 = handle; bool test = (handle_2 == handle); EXPECT_TRUE(test); }; void HandleTest::NullablePointerTest() { std::nullptr_t null; Handle<int> handle_2(null); bool test_1 = (handle_2 == null); EXPECT_TRUE(test_1); Handle<int> handle_3 = null; bool test_2 = (handle_3 == null); EXPECT_TRUE(test_2); EXPECT_FALSE(handle == null); EXPECT_FALSE(null == handle); EXPECT_TRUE(handle != null); EXPECT_TRUE(null != handle); }; void HandleTest::BiderectionalTest() { Handle<int> handle_2 = handle; handle_2++; EXPECT_TRUE(handle_2 != handle); handle_2--; EXPECT_TRUE(handle_2 == handle); --handle_2; ++handle_2; *handle_2++; EXPECT_EQ(*handle_2, 1); *handle_2--; EXPECT_EQ(*handle_2, 0); }; void HandleTest::RandomAccessTest() { Handle<int> handle_2 = handle; handle_2 += 1; EXPECT_EQ(*handle_2, 1); handle_2 -= 1; EXPECT_EQ(*handle_2, 0); Handle<int> handle_3 = handle + 1; EXPECT_EQ(*handle_3, 1); Handle<int> handle_4 = 1 + handle; EXPECT_EQ(*handle_4, 1); Handle<int> handle_5 = handle_4 - 1; EXPECT_EQ(*handle_5, 0); Handle<int> handle_6 = handle; handle_6 += 1; int dif = handle_6 - handle; EXPECT_EQ(dif, 1); EXPECT_TRUE(handle < handle_6); EXPECT_FALSE(handle > handle_6); EXPECT_TRUE(handle <= handle_6); EXPECT_FALSE(handle >= handle_6); EXPECT_EQ(handle[0], 0); EXPECT_EQ(handle[1], 1); }; void HandleTest::VoidTest() { Handle_void handle_void; } void HandleTest::BoolConvertTest() { int base_l[2] = {0, 1}; int* base_ptr_l = base_l; Handle<int> handle_1(base_ptr_l); if(handle_1){ EXPECT_TRUE(true); }else{ EXPECT_TRUE(false); } Handle<int> handle_2(NULL); if(handle_2){ EXPECT_TRUE(false); }else{ EXPECT_TRUE(true); } } void HandleTest::IfStatementTest() { bool True = true; True ? handle : 0; !True ? handle : 0; } void HandleTest::ConvertToConstTest() { bool convertable = std::is_convertible<Handle<int>, const Handle<int> >::value; EXPECT_TRUE(convertable); convertable = std::is_convertible<Handle<int>, const Handle<int> >::value; EXPECT_TRUE(convertable); }; __global__ void transferS(Handle<int> x, Handle<int> y) { *y = *x; }; void HandleTest::DeviceSingleTest() { int x = 2; int y = 0; int* x_d; int* y_d; hipMalloc((void**)&x_d, sizeof(int)); hipMalloc((void**)&y_d, sizeof(int)); hipMemcpy(x_d, &x, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(y_d, &y, sizeof(int), hipMemcpyHostToDevice); Handle<int> x_h(0, 1, x_d); Handle<int> y_h(0, 1, y_d); transferS << <1, 1>>> (x_h, y_h); hipDeviceSynchronize(); hipMemcpy(&y, y_d, sizeof(int), hipMemcpyDeviceToHost); EXPECT_EQ(x, y); Handle<int> copy(x_h); hipFree(x_d); hipFree(y_d); } __global__ void transferM(Handle<int> x, Handle<int> y) { y[1] = x[1]; }; void HandleTest::DeviceMultipleTest() { int x[2] = {2, 3}; int y[2] = {0, 1}; int* x_d; int* y_d; int size = 2 * sizeof(int); hipMalloc((void**)&x_d, size); hipMalloc((void**)&y_d, size); hipMemcpy(x_d, &x, size, hipMemcpyHostToDevice); hipMemcpy(y_d, &y, size, hipMemcpyHostToDevice); Handle<int> x_h(0, 2, x_d); Handle<int> y_h(0, 2, y_d); transferM << <1, 1>>> (x_h, y_h); hipDeviceSynchronize(); hipMemcpy(&y, y_d, size, hipMemcpyDeviceToHost); EXPECT_EQ(x[1], y[1]); EXPECT_FALSE(x[0] == y[0]); hipFree(x_d); hipFree(y_d); } template<Region SRC,Region DST> void copyfunction(){ typedef location<SRC> Src_Location; typedef location<DST> Dst_Location; Src_Location src_location; Dst_Location dst_location; int size = 3 * sizeof(int); int* x_d = static_cast<int*>(src_location.New(size)); int* y_d = static_cast<int*>(dst_location.New(size)); Handle<int> x_h(x_d); Handle<int> y_h(y_d); Src_Location::MemCopy(x_h, y_h, 3*sizeof(int) ); src_location.Delete(x_d); dst_location.Delete(y_d); } void HandleTest::MemCopyTest() { copyfunction<Region::host,Region::host>(); copyfunction<Region::host,Region::device>(); copyfunction<Region::host,Region::pinned>(); copyfunction<Region::host,Region::unified>(); copyfunction<Region::device,Region::host>(); copyfunction<Region::device,Region::device>(); copyfunction<Region::device,Region::pinned>(); copyfunction<Region::device,Region::unified>(); copyfunction<Region::pinned,Region::host>(); copyfunction<Region::pinned,Region::device>(); copyfunction<Region::pinned,Region::pinned>(); copyfunction<Region::pinned,Region::unified>(); copyfunction<Region::unified,Region::host>(); copyfunction<Region::unified,Region::device>(); copyfunction<Region::unified,Region::pinned>(); copyfunction<Region::unified,Region::unified>(); } // python:key:function=ConstTest AssignementTest MemCopyTest DeviceSingleTest DeviceMultipleTest ConstDereferenceTest ConvertToConstTest IfStatementTest BoolConvertTest VoidTest BuddyOffSetTest CopyConstructorTest JoinOperatorTest DereferenceOperatorTest IndirectionOperatorTest EqualityComparableTest DefaultConstructionTest CopyAssignableTest NullablePointerTest BiderectionalTest RandomAccessTest // python:key:concurency=Single Threaded // python:template=TEST_F(HandleTest,|function||concurency|){this->|function||concurency|();}; // python:start // python:include=handle.test #include "handle.test" // python:end #undef HANDLE_THREADS
00fc979a37030bd4521e00ed062d42ec29aad4d2.cu
// free_list_test.cu #include <Handle.cpp> #include <location.cu> #include <gtest/gtest.h> #include "MacroUtilities.cpp" #include <cstddef> #include <type_traits> #define HANDLE_THREADS 8 using namespace Flamingo::Memory; class HandleTest : public ::testing::Test { protected: virtual void SetUp() { int offset = 0; std::size_t size = 8; Handle<int> handle_int(offset, size, base_ptr); handle = handle_int; } int base[2] = {0, 1}; int* base_ptr = base; Handle<int> handle; DEFINE(BuddyOffSetTest, HANDLE_THREADS) DEFINE(CopyConstructorTest, HANDLE_THREADS) DEFINE(JoinOperatorTest, HANDLE_THREADS) DEFINE(DereferenceOperatorTest, HANDLE_THREADS) DEFINE(IndirectionOperatorTest, HANDLE_THREADS) DEFINE(EqualityComparableTest, HANDLE_THREADS) DEFINE(DefaultConstructionTest, HANDLE_THREADS) DEFINE(CopyAssignableTest, HANDLE_THREADS) DEFINE(NullablePointerTest, HANDLE_THREADS) DEFINE(BiderectionalTest, HANDLE_THREADS) DEFINE(RandomAccessTest, HANDLE_THREADS) DEFINE(VoidTest, HANDLE_THREADS) DEFINE(BoolConvertTest, HANDLE_THREADS) DEFINE(IfStatementTest, HANDLE_THREADS) DEFINE(ConvertToConstTest, HANDLE_THREADS) DEFINE(ConstDereferenceTest, HANDLE_THREADS) DEFINE(DeviceSingleTest, HANDLE_THREADS) DEFINE(DeviceMultipleTest, HANDLE_THREADS) DEFINE(MemCopyTest, HANDLE_THREADS) DEFINE(AssignementTest,HANDLE_THREADS) DEFINE(ConstTest,HANDLE_THREADS) }; void HandleTest::ConstTest() { int A=10; const int* A_ptr=&A; Handle<const int> h(A_ptr); }; void HandleTest::ConstDereferenceTest() { const Handle<int> h(0, 0, base_ptr); *h; }; void HandleTest::BuddyOffSetTest() { int buddy = handle.buddy_offset(); EXPECT_EQ(8, buddy); }; void HandleTest::CopyConstructorTest() { Handle<int> handle_new(handle); Handle<int> handle_null(NULL); Handle<int> handle_null_copy(handle_null); Handle<int>* handle_ptr=&handle_null; Handle<int> handle_move(*handle_ptr); }; void HandleTest::JoinOperatorTest() { Handle<int> h_1(handle); Handle<int> h_2(handle); h_1._offset = 8; h_1.combine(h_2); EXPECT_EQ(h_1._offset, 0); EXPECT_EQ(h_1._size, 16); }; void HandleTest::DereferenceOperatorTest() { *base = 0; EXPECT_EQ(*handle, 0); *base = 1; EXPECT_EQ(*handle, 1); }; void HandleTest::IndirectionOperatorTest() { Handle<int>* handle_ptr = &handle; int offset = handle_ptr->_offset; EXPECT_EQ(offset, 0); }; void HandleTest::EqualityComparableTest() { Handle<int> handle_2(0, 2, base_ptr); bool test_1 = (handle_2 == handle); EXPECT_TRUE(test_1); Handle<int> handle_3(1, 2, base_ptr); bool test_2 = (handle_3 == handle); EXPECT_FALSE(test_2); double* base_ptr2 = new double; Handle<double> handle_4(0, 8, base_ptr2); bool test_3 = (handle_4 == handle); EXPECT_FALSE(test_3); }; void HandleTest::AssignementTest(){ Handle<int> A; Handle<int> B; A=B; } void HandleTest::DefaultConstructionTest() { Handle<int> handle_2(); }; void HandleTest::CopyAssignableTest() { Handle<int> handle_2 = handle; bool test = (handle_2 == handle); EXPECT_TRUE(test); }; void HandleTest::NullablePointerTest() { std::nullptr_t null; Handle<int> handle_2(null); bool test_1 = (handle_2 == null); EXPECT_TRUE(test_1); Handle<int> handle_3 = null; bool test_2 = (handle_3 == null); EXPECT_TRUE(test_2); EXPECT_FALSE(handle == null); EXPECT_FALSE(null == handle); EXPECT_TRUE(handle != null); EXPECT_TRUE(null != handle); }; void HandleTest::BiderectionalTest() { Handle<int> handle_2 = handle; handle_2++; EXPECT_TRUE(handle_2 != handle); handle_2--; EXPECT_TRUE(handle_2 == handle); --handle_2; ++handle_2; *handle_2++; EXPECT_EQ(*handle_2, 1); *handle_2--; EXPECT_EQ(*handle_2, 0); }; void HandleTest::RandomAccessTest() { Handle<int> handle_2 = handle; handle_2 += 1; EXPECT_EQ(*handle_2, 1); handle_2 -= 1; EXPECT_EQ(*handle_2, 0); Handle<int> handle_3 = handle + 1; EXPECT_EQ(*handle_3, 1); Handle<int> handle_4 = 1 + handle; EXPECT_EQ(*handle_4, 1); Handle<int> handle_5 = handle_4 - 1; EXPECT_EQ(*handle_5, 0); Handle<int> handle_6 = handle; handle_6 += 1; int dif = handle_6 - handle; EXPECT_EQ(dif, 1); EXPECT_TRUE(handle < handle_6); EXPECT_FALSE(handle > handle_6); EXPECT_TRUE(handle <= handle_6); EXPECT_FALSE(handle >= handle_6); EXPECT_EQ(handle[0], 0); EXPECT_EQ(handle[1], 1); }; void HandleTest::VoidTest() { Handle_void handle_void; } void HandleTest::BoolConvertTest() { int base_l[2] = {0, 1}; int* base_ptr_l = base_l; Handle<int> handle_1(base_ptr_l); if(handle_1){ EXPECT_TRUE(true); }else{ EXPECT_TRUE(false); } Handle<int> handle_2(NULL); if(handle_2){ EXPECT_TRUE(false); }else{ EXPECT_TRUE(true); } } void HandleTest::IfStatementTest() { bool True = true; True ? handle : 0; !True ? handle : 0; } void HandleTest::ConvertToConstTest() { bool convertable = std::is_convertible<Handle<int>, const Handle<int> >::value; EXPECT_TRUE(convertable); convertable = std::is_convertible<Handle<int>, const Handle<int> >::value; EXPECT_TRUE(convertable); }; __global__ void transferS(Handle<int> x, Handle<int> y) { *y = *x; }; void HandleTest::DeviceSingleTest() { int x = 2; int y = 0; int* x_d; int* y_d; cudaMalloc((void**)&x_d, sizeof(int)); cudaMalloc((void**)&y_d, sizeof(int)); cudaMemcpy(x_d, &x, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(y_d, &y, sizeof(int), cudaMemcpyHostToDevice); Handle<int> x_h(0, 1, x_d); Handle<int> y_h(0, 1, y_d); transferS << <1, 1>>> (x_h, y_h); cudaDeviceSynchronize(); cudaMemcpy(&y, y_d, sizeof(int), cudaMemcpyDeviceToHost); EXPECT_EQ(x, y); Handle<int> copy(x_h); cudaFree(x_d); cudaFree(y_d); } __global__ void transferM(Handle<int> x, Handle<int> y) { y[1] = x[1]; }; void HandleTest::DeviceMultipleTest() { int x[2] = {2, 3}; int y[2] = {0, 1}; int* x_d; int* y_d; int size = 2 * sizeof(int); cudaMalloc((void**)&x_d, size); cudaMalloc((void**)&y_d, size); cudaMemcpy(x_d, &x, size, cudaMemcpyHostToDevice); cudaMemcpy(y_d, &y, size, cudaMemcpyHostToDevice); Handle<int> x_h(0, 2, x_d); Handle<int> y_h(0, 2, y_d); transferM << <1, 1>>> (x_h, y_h); cudaDeviceSynchronize(); cudaMemcpy(&y, y_d, size, cudaMemcpyDeviceToHost); EXPECT_EQ(x[1], y[1]); EXPECT_FALSE(x[0] == y[0]); cudaFree(x_d); cudaFree(y_d); } template<Region SRC,Region DST> void copyfunction(){ typedef location<SRC> Src_Location; typedef location<DST> Dst_Location; Src_Location src_location; Dst_Location dst_location; int size = 3 * sizeof(int); int* x_d = static_cast<int*>(src_location.New(size)); int* y_d = static_cast<int*>(dst_location.New(size)); Handle<int> x_h(x_d); Handle<int> y_h(y_d); Src_Location::MemCopy(x_h, y_h, 3*sizeof(int) ); src_location.Delete(x_d); dst_location.Delete(y_d); } void HandleTest::MemCopyTest() { copyfunction<Region::host,Region::host>(); copyfunction<Region::host,Region::device>(); copyfunction<Region::host,Region::pinned>(); copyfunction<Region::host,Region::unified>(); copyfunction<Region::device,Region::host>(); copyfunction<Region::device,Region::device>(); copyfunction<Region::device,Region::pinned>(); copyfunction<Region::device,Region::unified>(); copyfunction<Region::pinned,Region::host>(); copyfunction<Region::pinned,Region::device>(); copyfunction<Region::pinned,Region::pinned>(); copyfunction<Region::pinned,Region::unified>(); copyfunction<Region::unified,Region::host>(); copyfunction<Region::unified,Region::device>(); copyfunction<Region::unified,Region::pinned>(); copyfunction<Region::unified,Region::unified>(); } // python:key:function=ConstTest AssignementTest MemCopyTest DeviceSingleTest DeviceMultipleTest ConstDereferenceTest ConvertToConstTest IfStatementTest BoolConvertTest VoidTest BuddyOffSetTest CopyConstructorTest JoinOperatorTest DereferenceOperatorTest IndirectionOperatorTest EqualityComparableTest DefaultConstructionTest CopyAssignableTest NullablePointerTest BiderectionalTest RandomAccessTest // python:key:concurency=Single Threaded // python:template=TEST_F(HandleTest,|function||concurency|){this->|function||concurency|();}; // python:start // python:include=handle.test #include "handle.test" // python:end #undef HANDLE_THREADS
583618a6d79ff80ccf4526a5a48ec8e22b8d2eab.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <algorithm> #include <vector> #include "paddle/fluid/framework/fleet/heter_ps/feature_value.h" #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h" #include "paddle/fluid/framework/fleet/heter_ps/heter_comm.h" #include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h" #include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h" #include "paddle/fluid/platform/cuda_device_guard.h" using namespace paddle::framework; void prepare_file(char file_name[], std::vector<std::string> data) { std::ofstream ofile; ofile.open(file_name); for (auto x : data) { ofile << x << std::endl; } ofile.close(); } char edge_file_name[] = "edges.txt"; TEST(TEST_FLEET, graph_sample) { std::vector<std::string> edges; int gpu_count = 3; std::vector<int> dev_ids; dev_ids.push_back(0); dev_ids.push_back(1); dev_ids.push_back(2); std::shared_ptr<HeterPsResource> resource = std::make_shared<HeterPsResource>(dev_ids); resource->enable_p2p(); GpuPsGraphTable g(resource); int node_count = 10; std::vector<std::vector<int64_t>> neighbors(node_count); int ind = 0; int64_t node_id = 0; // std::vector<GpuPsCommGraph> graph_list(gpu_count); while (ind < node_count) { int neighbor_size = ind + 1; while (neighbor_size--) { edges.push_back(std::to_string(ind) + "\t" + std::to_string(node_id) + "\t1.0"); node_id++; } ind++; } /* gpu 0: 0,3,6,9 gpu 1: 1,4,7 gpu 2: 2,5,8 query(2,6) returns nodes [6,9,1,4,7,2] */ ::paddle::distributed::GraphParameter table_proto; table_proto.set_gpups_mode(true); table_proto.set_shard_num(127); table_proto.set_gpu_num(3); table_proto.set_gpups_graph_sample_class("BasicBfsGraphSampler"); table_proto.set_gpups_graph_sample_args("100,5,5,1,1"); prepare_file(edge_file_name, edges); g.init_cpu_table(table_proto); g.load(std::string(edge_file_name), std::string("e>")); /* node x's neighbor list = [(1+x)*x/2,(1+x)*x/2 + 1,.....,(1+x)*x/2 + x] so node 6's neighbors are [21,22...,27] node 7's neighbors are [28,29,..35] node 0's neighbors are [0] query([7,0,6],sample_size=3) should return [28,29,30,0,x,x,21,22,23] 6 --index-->2 0 --index--->0 7 --index-->2 */ int64_t cpu_key[3] = {7, 0, 6}; void *key; hipMalloc((void **)&key, 3 * sizeof(int64_t)); hipMemcpy(key, cpu_key, 3 * sizeof(int64_t), hipMemcpyHostToDevice); auto neighbor_sample_res = g.graph_neighbor_sample(0, (int64_t *)key, 3, 3); int64_t *res = new int64_t[7]; /* hipMemcpy(res, neighbor_sample_res->val, 56, hipMemcpyDeviceToHost); std::sort(res, res + 3); std::sort(res + 4, res + 7); //int64_t expected_sample_val[] = {28, 29, 30, 0, -1, -1, 21, 22, 23}; int64_t expected_sample_val[] = {28, 29, 30, 0, 21, 22, 23}; for (int i = 0; i < 7; i++) { VLOG(0)<<i<<" "<<res[i]; if (expected_sample_val[i] != -1) { ASSERT_EQ(res[i], expected_sample_val[i]); } } delete[] res; delete neighbor_sample_res; */ hipMemcpy(res, neighbor_sample_res->val, 56, hipMemcpyDeviceToHost); int *actual_sample_size = new int[3]; hipMemcpy(actual_sample_size, neighbor_sample_res->actual_sample_size, 12, hipMemcpyDeviceToHost); // 3, 1, 3 int *cumsum_sample_size = new int[3]; hipMemcpy(cumsum_sample_size, neighbor_sample_res->offset, 12, hipMemcpyDeviceToHost); // 0, 3, 4 std::vector<std::vector<int64_t>> neighbors_; std::vector<int64_t> neighbors_7 = {28, 29, 30, 31, 32, 33, 34, 35}; std::vector<int64_t> neighbors_0 = {0}; std::vector<int64_t> neighbors_6 = {21, 22, 23, 24, 25, 26, 27}; neighbors_.push_back(neighbors_7); neighbors_.push_back(neighbors_0); neighbors_.push_back(neighbors_6); for (int i = 0; i < 3; i++) { for (int j = cumsum_sample_size[i]; j < cumsum_sample_size[i] + actual_sample_size[i]; j++) { bool flag = false; for (int k = 0; k < neighbors_[i].size(); k++) { if (res[j] == neighbors_[i][k]) { flag = true; break; } } ASSERT_EQ(flag, true); } } delete[] res; delete[] actual_sample_size; delete[] cumsum_sample_size; delete neighbor_sample_res; }
583618a6d79ff80ccf4526a5a48ec8e22b8d2eab.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <algorithm> #include <vector> #include "paddle/fluid/framework/fleet/heter_ps/feature_value.h" #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h" #include "paddle/fluid/framework/fleet/heter_ps/heter_comm.h" #include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h" #include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h" #include "paddle/fluid/platform/cuda_device_guard.h" using namespace paddle::framework; void prepare_file(char file_name[], std::vector<std::string> data) { std::ofstream ofile; ofile.open(file_name); for (auto x : data) { ofile << x << std::endl; } ofile.close(); } char edge_file_name[] = "edges.txt"; TEST(TEST_FLEET, graph_sample) { std::vector<std::string> edges; int gpu_count = 3; std::vector<int> dev_ids; dev_ids.push_back(0); dev_ids.push_back(1); dev_ids.push_back(2); std::shared_ptr<HeterPsResource> resource = std::make_shared<HeterPsResource>(dev_ids); resource->enable_p2p(); GpuPsGraphTable g(resource); int node_count = 10; std::vector<std::vector<int64_t>> neighbors(node_count); int ind = 0; int64_t node_id = 0; // std::vector<GpuPsCommGraph> graph_list(gpu_count); while (ind < node_count) { int neighbor_size = ind + 1; while (neighbor_size--) { edges.push_back(std::to_string(ind) + "\t" + std::to_string(node_id) + "\t1.0"); node_id++; } ind++; } /* gpu 0: 0,3,6,9 gpu 1: 1,4,7 gpu 2: 2,5,8 query(2,6) returns nodes [6,9,1,4,7,2] */ ::paddle::distributed::GraphParameter table_proto; table_proto.set_gpups_mode(true); table_proto.set_shard_num(127); table_proto.set_gpu_num(3); table_proto.set_gpups_graph_sample_class("BasicBfsGraphSampler"); table_proto.set_gpups_graph_sample_args("100,5,5,1,1"); prepare_file(edge_file_name, edges); g.init_cpu_table(table_proto); g.load(std::string(edge_file_name), std::string("e>")); /* node x's neighbor list = [(1+x)*x/2,(1+x)*x/2 + 1,.....,(1+x)*x/2 + x] so node 6's neighbors are [21,22...,27] node 7's neighbors are [28,29,..35] node 0's neighbors are [0] query([7,0,6],sample_size=3) should return [28,29,30,0,x,x,21,22,23] 6 --index-->2 0 --index--->0 7 --index-->2 */ int64_t cpu_key[3] = {7, 0, 6}; void *key; cudaMalloc((void **)&key, 3 * sizeof(int64_t)); cudaMemcpy(key, cpu_key, 3 * sizeof(int64_t), cudaMemcpyHostToDevice); auto neighbor_sample_res = g.graph_neighbor_sample(0, (int64_t *)key, 3, 3); int64_t *res = new int64_t[7]; /* cudaMemcpy(res, neighbor_sample_res->val, 56, cudaMemcpyDeviceToHost); std::sort(res, res + 3); std::sort(res + 4, res + 7); //int64_t expected_sample_val[] = {28, 29, 30, 0, -1, -1, 21, 22, 23}; int64_t expected_sample_val[] = {28, 29, 30, 0, 21, 22, 23}; for (int i = 0; i < 7; i++) { VLOG(0)<<i<<" "<<res[i]; if (expected_sample_val[i] != -1) { ASSERT_EQ(res[i], expected_sample_val[i]); } } delete[] res; delete neighbor_sample_res; */ cudaMemcpy(res, neighbor_sample_res->val, 56, cudaMemcpyDeviceToHost); int *actual_sample_size = new int[3]; cudaMemcpy(actual_sample_size, neighbor_sample_res->actual_sample_size, 12, cudaMemcpyDeviceToHost); // 3, 1, 3 int *cumsum_sample_size = new int[3]; cudaMemcpy(cumsum_sample_size, neighbor_sample_res->offset, 12, cudaMemcpyDeviceToHost); // 0, 3, 4 std::vector<std::vector<int64_t>> neighbors_; std::vector<int64_t> neighbors_7 = {28, 29, 30, 31, 32, 33, 34, 35}; std::vector<int64_t> neighbors_0 = {0}; std::vector<int64_t> neighbors_6 = {21, 22, 23, 24, 25, 26, 27}; neighbors_.push_back(neighbors_7); neighbors_.push_back(neighbors_0); neighbors_.push_back(neighbors_6); for (int i = 0; i < 3; i++) { for (int j = cumsum_sample_size[i]; j < cumsum_sample_size[i] + actual_sample_size[i]; j++) { bool flag = false; for (int k = 0; k < neighbors_[i].size(); k++) { if (res[j] == neighbors_[i][k]) { flag = true; break; } } ASSERT_EQ(flag, true); } } delete[] res; delete[] actual_sample_size; delete[] cumsum_sample_size; delete neighbor_sample_res; }
80d97739a8bceeb6f19531e4be092ff1550f835c.hip
// !!! This is a file automatically generated by hipify!!! #include "scan.cuh" #include "segmented_scan_helpers.cuh" #include "fill.cuh" #include <contrib/libs/cub/hipcub/hipcub.hpp> namespace NKernel { template<typename T> hipError_t ScanVector(const T* input, T* output, uint size, bool inclusive, TScanKernelContext<T>& context, TCudaStream stream) { using TKernelContext = TScanKernelContext<T>; if (inclusive) { return hipcub::DeviceScan::InclusiveSum(context.PartResults, context.NumParts, input, output, size, stream); } else { return hipcub::DeviceScan::ExclusiveSum(context.PartResults, context.NumParts, input, output, size, stream); } } template <class T> struct TToSignedConversion { using TSignedType = T; }; template <> struct TToSignedConversion<ui32> { using TSignedType = int; }; template<typename T_> hipError_t SegmentedScanNonNegativeVector(const T_* input, T_* output, ui32 size, bool inclusive, TScanKernelContext<T_>& context, TCudaStream stream) { using TKernelContext = TScanKernelContext<T_>; using T = typename TToSignedConversion<T_>::TSignedType; T zeroValue = 0.0f; if (inclusive) { return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*)input, (T*)output, TNonNegativeSegmentedSum(), size, stream); } else { return hipcub::DeviceScan::ExclusiveScan((T*)context.PartResults, context.NumParts, (const T*) input, (T*)output, TNonNegativeSegmentedSum(), zeroValue, size, stream); } } template<typename T_> hipError_t SegmentedScanAndScatterNonNegativeVector(const T_* input, const ui32* indices, T_* output, ui32 size, bool inclusive, TScanKernelContext<T_>& context, TCudaStream stream) { using TKernelContext = TScanKernelContext<T_>; using T = typename TToSignedConversion<T_>::TSignedType; if (inclusive) { TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, true> outputIterator((T*)output, indices, indices + size); return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*)input, outputIterator, TNonNegativeSegmentedSum(), size, stream); } else { TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, false> outputIterator((T*)output, indices, indices + size); FillBuffer<T>((T*)output, 0, size, stream); return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*) input, outputIterator, TNonNegativeSegmentedSum(), size, stream); } } template <class T> ui64 ScanVectorTempSize(ui32 size, bool inclusive) { ui64 sizeInBytes = 0; if (inclusive) { hipcub::DeviceScan::InclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size); } else { hipcub::DeviceScan::ExclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size); } return sizeInBytes; } template ui64 ScanVectorTempSize<int>(ui32, bool); template ui64 ScanVectorTempSize<ui32>(ui32, bool); template ui64 ScanVectorTempSize<float>(ui32, bool); template ui64 ScanVectorTempSize<double>(ui32, bool); template hipError_t ScanVector<int>(const int *input, int *output, uint size, bool inclusive, TScanKernelContext<int>& context, TCudaStream stream); template hipError_t ScanVector<uint>(const uint *input, uint *output, uint size, bool inclusive, TScanKernelContext<uint>& context, TCudaStream stream); template hipError_t ScanVector<float>(const float *input, float *output, uint size, bool inclusive, TScanKernelContext<float>& context, TCudaStream stream); template hipError_t SegmentedScanNonNegativeVector<float>(const float *input, float *output, ui32 size, bool inclusive, TScanKernelContext<float>& context, TCudaStream stream); template hipError_t SegmentedScanNonNegativeVector<int>(const int *input, int *output, ui32 size, bool inclusive, TScanKernelContext<int>& context, TCudaStream stream); template hipError_t SegmentedScanNonNegativeVector<ui32>(const ui32 *input, ui32 *output, ui32 size, bool inclusive, TScanKernelContext<ui32>& context, TCudaStream stream); template hipError_t ScanVector<double>(const double *input, double *output, uint size, bool inclusive, TScanKernelContext<double>& context, TCudaStream stream); template hipError_t SegmentedScanAndScatterNonNegativeVector<float>(const float *input, const ui32* indices, float *output, ui32 size, bool inclusive, TScanKernelContext<float>& context, TCudaStream stream); template hipError_t SegmentedScanAndScatterNonNegativeVector<int>(const int *input, const ui32* indices, int *output, ui32 size, bool inclusive, TScanKernelContext<int>& context, TCudaStream stream); template hipError_t SegmentedScanAndScatterNonNegativeVector<ui32>(const ui32 *input, const ui32* indices, ui32 *output, ui32 size, bool inclusive, TScanKernelContext<ui32>& context, TCudaStream stream); }
80d97739a8bceeb6f19531e4be092ff1550f835c.cu
#include "scan.cuh" #include "segmented_scan_helpers.cuh" #include "fill.cuh" #include <contrib/libs/cub/cub/device/device_scan.cuh> namespace NKernel { template<typename T> cudaError_t ScanVector(const T* input, T* output, uint size, bool inclusive, TScanKernelContext<T>& context, TCudaStream stream) { using TKernelContext = TScanKernelContext<T>; if (inclusive) { return cub::DeviceScan::InclusiveSum(context.PartResults, context.NumParts, input, output, size, stream); } else { return cub::DeviceScan::ExclusiveSum(context.PartResults, context.NumParts, input, output, size, stream); } } template <class T> struct TToSignedConversion { using TSignedType = T; }; template <> struct TToSignedConversion<ui32> { using TSignedType = int; }; template<typename T_> cudaError_t SegmentedScanNonNegativeVector(const T_* input, T_* output, ui32 size, bool inclusive, TScanKernelContext<T_>& context, TCudaStream stream) { using TKernelContext = TScanKernelContext<T_>; using T = typename TToSignedConversion<T_>::TSignedType; T zeroValue = 0.0f; if (inclusive) { return cub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*)input, (T*)output, TNonNegativeSegmentedSum(), size, stream); } else { return cub::DeviceScan::ExclusiveScan((T*)context.PartResults, context.NumParts, (const T*) input, (T*)output, TNonNegativeSegmentedSum(), zeroValue, size, stream); } } template<typename T_> cudaError_t SegmentedScanAndScatterNonNegativeVector(const T_* input, const ui32* indices, T_* output, ui32 size, bool inclusive, TScanKernelContext<T_>& context, TCudaStream stream) { using TKernelContext = TScanKernelContext<T_>; using T = typename TToSignedConversion<T_>::TSignedType; if (inclusive) { TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, true> outputIterator((T*)output, indices, indices + size); return cub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*)input, outputIterator, TNonNegativeSegmentedSum(), size, stream); } else { TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, false> outputIterator((T*)output, indices, indices + size); FillBuffer<T>((T*)output, 0, size, stream); return cub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*) input, outputIterator, TNonNegativeSegmentedSum(), size, stream); } } template <class T> ui64 ScanVectorTempSize(ui32 size, bool inclusive) { ui64 sizeInBytes = 0; if (inclusive) { cub::DeviceScan::InclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size); } else { cub::DeviceScan::ExclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size); } return sizeInBytes; } template ui64 ScanVectorTempSize<int>(ui32, bool); template ui64 ScanVectorTempSize<ui32>(ui32, bool); template ui64 ScanVectorTempSize<float>(ui32, bool); template ui64 ScanVectorTempSize<double>(ui32, bool); template cudaError_t ScanVector<int>(const int *input, int *output, uint size, bool inclusive, TScanKernelContext<int>& context, TCudaStream stream); template cudaError_t ScanVector<uint>(const uint *input, uint *output, uint size, bool inclusive, TScanKernelContext<uint>& context, TCudaStream stream); template cudaError_t ScanVector<float>(const float *input, float *output, uint size, bool inclusive, TScanKernelContext<float>& context, TCudaStream stream); template cudaError_t SegmentedScanNonNegativeVector<float>(const float *input, float *output, ui32 size, bool inclusive, TScanKernelContext<float>& context, TCudaStream stream); template cudaError_t SegmentedScanNonNegativeVector<int>(const int *input, int *output, ui32 size, bool inclusive, TScanKernelContext<int>& context, TCudaStream stream); template cudaError_t SegmentedScanNonNegativeVector<ui32>(const ui32 *input, ui32 *output, ui32 size, bool inclusive, TScanKernelContext<ui32>& context, TCudaStream stream); template cudaError_t ScanVector<double>(const double *input, double *output, uint size, bool inclusive, TScanKernelContext<double>& context, TCudaStream stream); template cudaError_t SegmentedScanAndScatterNonNegativeVector<float>(const float *input, const ui32* indices, float *output, ui32 size, bool inclusive, TScanKernelContext<float>& context, TCudaStream stream); template cudaError_t SegmentedScanAndScatterNonNegativeVector<int>(const int *input, const ui32* indices, int *output, ui32 size, bool inclusive, TScanKernelContext<int>& context, TCudaStream stream); template cudaError_t SegmentedScanAndScatterNonNegativeVector<ui32>(const ui32 *input, const ui32* indices, ui32 *output, ui32 size, bool inclusive, TScanKernelContext<ui32>& context, TCudaStream stream); }
d37660ef960b2778619beec5c5ddd79951405316.hip
// !!! This is a file automatically generated by hipify!!! #include "../CUDA/CUDA_func.h" #include "UpscaleLayer.h" using namespace std; namespace NN { namespace Layers { UpscaleLayer::UpscaleLayer(std::vector<int> dependencies, int input_width, int input_height, int input_depth, int filter_width, int filter_height, int filter_depth) { this->dependencies = dependencies; this->input_width = input_width; this->input_height = input_height; this->input_depth = input_depth; this->filter_width = filter_width; this->filter_height = filter_height; this->filter_depth = filter_depth; output_width = input_width * filter_width; output_height = input_height * filter_height; output_depth = input_depth * filter_depth; output_size = output_width * output_height * output_depth; hipMallocManaged(&output, output_size * sizeof(float)); hipMallocManaged(&output_gradient, output_size * sizeof(float)); hipMemset(output, 0.0f, output_size * sizeof(float)); hipMemset(output_gradient, 0.0f, output_size * sizeof(float)); } void UpscaleLayer::compute() { float filter_size = filter_width * filter_height * filter_depth; int block_size = (output_size + 511) / 512; NN::CUDA::compute_upscale_layer << <block_size, 512 >> > (input, output, input_width, input_height, input_depth, filter_width, filter_height, filter_depth, output_width, output_height, output_depth, output_size); hipDeviceSynchronize(); } void UpscaleLayer::backpropagate() { int input_size = input_width * input_height * input_depth; float filter_size = filter_width * filter_height * filter_depth; int block_size = (input_size + 511) / 512; NN::CUDA::backprop_upscale_layer << <block_size, 512 >> > (input, input_gradient, output_gradient, input_width, input_height, input_depth, filter_width, filter_height, filter_depth, output_width, output_height, output_depth, input_size); hipDeviceSynchronize(); } int UpscaleLayer::get_parameters_size() { return 0; } void UpscaleLayer::update_dependencies(vector<NN::Layers::Layer*> layer_dependencies) { input = layer_dependencies[0]->get_output_iterator(); input_gradient = layer_dependencies[0]->get_output_gradient_iterator(); } void UpscaleLayer::save(NN::File& file) { int id = 8; file.save(id); save_dependencies(file); file.save(input_width); file.save(input_height); file.save(input_depth); file.save(filter_width); file.save(filter_height); file.save(filter_depth); }; void UpscaleLayer::load(NN::File& file) { load_dependencies(file); file.load(input_width); file.load(input_height); file.load(input_depth); file.load(filter_width); file.load(filter_height); file.load(filter_depth); output_width = input_width * filter_width; output_height = input_height * filter_height; output_depth = input_depth * filter_depth; output_size = output_width * output_height * output_depth; hipMallocManaged(&output, output_size * sizeof(float)); hipMallocManaged(&output_gradient, output_size * sizeof(float)); hipMemset(output, 0.0f, output_size * sizeof(float)); hipMemset(output_gradient, 0.0f, output_size * sizeof(float)); }; UpscaleLayer::~UpscaleLayer() = default; } }
d37660ef960b2778619beec5c5ddd79951405316.cu
#include "../CUDA/CUDA_func.h" #include "UpscaleLayer.h" using namespace std; namespace NN { namespace Layers { UpscaleLayer::UpscaleLayer(std::vector<int> dependencies, int input_width, int input_height, int input_depth, int filter_width, int filter_height, int filter_depth) { this->dependencies = dependencies; this->input_width = input_width; this->input_height = input_height; this->input_depth = input_depth; this->filter_width = filter_width; this->filter_height = filter_height; this->filter_depth = filter_depth; output_width = input_width * filter_width; output_height = input_height * filter_height; output_depth = input_depth * filter_depth; output_size = output_width * output_height * output_depth; cudaMallocManaged(&output, output_size * sizeof(float)); cudaMallocManaged(&output_gradient, output_size * sizeof(float)); cudaMemset(output, 0.0f, output_size * sizeof(float)); cudaMemset(output_gradient, 0.0f, output_size * sizeof(float)); } void UpscaleLayer::compute() { float filter_size = filter_width * filter_height * filter_depth; int block_size = (output_size + 511) / 512; NN::CUDA::compute_upscale_layer << <block_size, 512 >> > (input, output, input_width, input_height, input_depth, filter_width, filter_height, filter_depth, output_width, output_height, output_depth, output_size); cudaDeviceSynchronize(); } void UpscaleLayer::backpropagate() { int input_size = input_width * input_height * input_depth; float filter_size = filter_width * filter_height * filter_depth; int block_size = (input_size + 511) / 512; NN::CUDA::backprop_upscale_layer << <block_size, 512 >> > (input, input_gradient, output_gradient, input_width, input_height, input_depth, filter_width, filter_height, filter_depth, output_width, output_height, output_depth, input_size); cudaDeviceSynchronize(); } int UpscaleLayer::get_parameters_size() { return 0; } void UpscaleLayer::update_dependencies(vector<NN::Layers::Layer*> layer_dependencies) { input = layer_dependencies[0]->get_output_iterator(); input_gradient = layer_dependencies[0]->get_output_gradient_iterator(); } void UpscaleLayer::save(NN::File& file) { int id = 8; file.save(id); save_dependencies(file); file.save(input_width); file.save(input_height); file.save(input_depth); file.save(filter_width); file.save(filter_height); file.save(filter_depth); }; void UpscaleLayer::load(NN::File& file) { load_dependencies(file); file.load(input_width); file.load(input_height); file.load(input_depth); file.load(filter_width); file.load(filter_height); file.load(filter_depth); output_width = input_width * filter_width; output_height = input_height * filter_height; output_depth = input_depth * filter_depth; output_size = output_width * output_height * output_depth; cudaMallocManaged(&output, output_size * sizeof(float)); cudaMallocManaged(&output_gradient, output_size * sizeof(float)); cudaMemset(output, 0.0f, output_size * sizeof(float)); cudaMemset(output_gradient, 0.0f, output_size * sizeof(float)); }; UpscaleLayer::~UpscaleLayer() = default; } }
9bfb39adb4c306c3aaaceba16314476681f39112.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07 and 08 for the basics of tensor op gemm kernels. On NVIDIA Ampere architecture, most concept still holds. The two main differences are 1. NVIDIA Ampere architecture introduces a new series of tensor core instructions (see include/cutlass/arch/mma_sm80.h) which are more efficient on Ampere. 2. NVIDIA Ampere architecture uses cp_async() to build multistage software pipeline to better hide latency (see include/cutlass/gemm/threadblock/mma_multistage.h) Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h) data types in tensor cores. One big advantage is that we can load in fp32 data and convert them implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional fp32 data by using NVIDIA Ampere architecture. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" #include <cutlass/numeric_types.h> #define NUM_PROFILE 200 // #define BIT_WIDTH 16 // #define BIT_WIDTH 8 #define BIT_WIDTH 4 // #define BIT_WIDTH 1 #if BIT_WIDTH == 32 typedef float input_t; typedef float output_t; #elif BIT_WIDTH == 16 typedef cutlass::half_t input_t; typedef cutlass::half_t output_t; #elif BIT_WIDTH == 8 typedef int8_t input_t; typedef int32_t output_t; #elif BIT_WIDTH == 4 typedef cutlass::int4b_t input_t; typedef int32_t output_t; #elif BIT_WIDTH == 1 typedef cutlass::uint1b_t input_t; typedef int32_t output_t; #endif // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = output_t; // <- data type of accumulator using ElementComputeEpilogue = output_t; // <- data type of epilogue operations using ElementInputA = input_t; // <- data type of elements in input matrix A using ElementInputB = input_t; // <- data type of elements in input matrix B using ElementOutput = output_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; //-------------full precision CUDA core (PASS) -------------------- #if BIT_WIDTH == 32 using Element = float; using Gemm = cutlass::gemm::device::Gemm< Element, cutlass::layout::RowMajor, Element, cutlass::layout::ColumnMajor, Element, cutlass::layout::RowMajor, Element, cutlass::arch::OpClassSimt, cutlass::arch::Sm80, cutlass::gemm::GemmShape<32, 64, 8>, cutlass::gemm::GemmShape<32, 64, 8>, cutlass::gemm::GemmShape<1, 1, 1>, cutlass::epilogue::thread::LinearCombination< Element, 1, Element, Element>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4 >; //-------------half precision Tensor core (PASS) -------------------- #elif BIT_WIDTH == 16 using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using Gemm = cutlass::gemm::device::Gemm< cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 64 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2 >; //-------------INT-8 Tensor core (PASS) -------------------- #elif BIT_WIDTH == 8 using ElementOutput = int32_t; using ElementAccumulator = int32_t; using ElementCompute = int32_t; using Gemm = cutlass::gemm::device::Gemm< int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, cutlass::epilogue::thread::LinearCombinationClamp< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementCompute>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; //-------------INT-4 Tensor core (PASS) -------------------- #elif BIT_WIDTH == 4 using ElementOutput = int32_t; using ElementAccumulator = int32_t; using ElementCompute = int32_t; using Gemm = cutlass::gemm::device::Gemm< cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 256, 128>, cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<8, 8, 32>, cutlass::epilogue::thread::LinearCombinationClamp< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementCompute >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2 >; //-------------INT-1 Tensor core (PASS)-------------------- #elif BIT_WIDTH == 1 using ElementOutput = int32_t; using ElementAccumulator = int32_t; using ElementCompute = int32_t; const int pipe_stages = 4; using Gemm = cutlass::gemm::device::Gemm< cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, // RTX3090 setting for block, warp, and mma shape cutlass::gemm::GemmShape<128, 256, 512>, cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<8, 8, 128>, // A100 setting for block, warp, and mma shape // cutlass::gemm::GemmShape<256, 128, 1024>, // cutlass::gemm::GemmShape<64, 64, 1024>, // cutlass::gemm::GemmShape<16, 8, 256>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementCompute>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, pipe_stages, 128, 128, false, cutlass::arch::OpXorPopc>; #endif int run(int M, int N, int K) { int length_m = M; int length_n = N; int length_k = K; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // Launch initialized CUTLASS kernel for(int trial = 0; trial < NUM_PROFILE; trial++) { // printf("[%d]\n", trial); status = gemm_op(); CUTLASS_CHECK(status); } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("CUTLASS-GEMM (%d-bit). M: %6d, N: %6d, K: %6d,\t Time (ms): %.2f, TOPS: %4.2f\t\n", BIT_WIDTH, M, N, K, milliseconds/NUM_PROFILE, static_cast<double>(NUM_PROFILE*(static_cast<double>(M)*N*K*2) / (milliseconds / 1000.)) / 1e12); /* // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; // return (passed ? 0 : -1);*/ return 0; } int main(int argc, char* argv[]) { if (argc < 2){ printf("Usage: ./prog Dim\n"); return -1; } int M = atoi(argv[1]); int N = atoi(argv[2]); int K = atoi(argv[3]); bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(M, N, K); }
9bfb39adb4c306c3aaaceba16314476681f39112.cu
/*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07 and 08 for the basics of tensor op gemm kernels. On NVIDIA Ampere architecture, most concept still holds. The two main differences are 1. NVIDIA Ampere architecture introduces a new series of tensor core instructions (see include/cutlass/arch/mma_sm80.h) which are more efficient on Ampere. 2. NVIDIA Ampere architecture uses cp_async() to build multistage software pipeline to better hide latency (see include/cutlass/gemm/threadblock/mma_multistage.h) Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h) data types in tensor cores. One big advantage is that we can load in fp32 data and convert them implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional fp32 data by using NVIDIA Ampere architecture. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" #include <cutlass/numeric_types.h> #define NUM_PROFILE 200 // #define BIT_WIDTH 16 // #define BIT_WIDTH 8 #define BIT_WIDTH 4 // #define BIT_WIDTH 1 #if BIT_WIDTH == 32 typedef float input_t; typedef float output_t; #elif BIT_WIDTH == 16 typedef cutlass::half_t input_t; typedef cutlass::half_t output_t; #elif BIT_WIDTH == 8 typedef int8_t input_t; typedef int32_t output_t; #elif BIT_WIDTH == 4 typedef cutlass::int4b_t input_t; typedef int32_t output_t; #elif BIT_WIDTH == 1 typedef cutlass::uint1b_t input_t; typedef int32_t output_t; #endif // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = output_t; // <- data type of accumulator using ElementComputeEpilogue = output_t; // <- data type of epilogue operations using ElementInputA = input_t; // <- data type of elements in input matrix A using ElementInputB = input_t; // <- data type of elements in input matrix B using ElementOutput = output_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; //-------------full precision CUDA core (PASS) -------------------- #if BIT_WIDTH == 32 using Element = float; using Gemm = cutlass::gemm::device::Gemm< Element, cutlass::layout::RowMajor, Element, cutlass::layout::ColumnMajor, Element, cutlass::layout::RowMajor, Element, cutlass::arch::OpClassSimt, cutlass::arch::Sm80, cutlass::gemm::GemmShape<32, 64, 8>, cutlass::gemm::GemmShape<32, 64, 8>, cutlass::gemm::GemmShape<1, 1, 1>, cutlass::epilogue::thread::LinearCombination< Element, 1, Element, Element>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4 >; //-------------half precision Tensor core (PASS) -------------------- #elif BIT_WIDTH == 16 using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using Gemm = cutlass::gemm::device::Gemm< cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 64 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2 >; //-------------INT-8 Tensor core (PASS) -------------------- #elif BIT_WIDTH == 8 using ElementOutput = int32_t; using ElementAccumulator = int32_t; using ElementCompute = int32_t; using Gemm = cutlass::gemm::device::Gemm< int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, cutlass::epilogue::thread::LinearCombinationClamp< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementCompute>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; //-------------INT-4 Tensor core (PASS) -------------------- #elif BIT_WIDTH == 4 using ElementOutput = int32_t; using ElementAccumulator = int32_t; using ElementCompute = int32_t; using Gemm = cutlass::gemm::device::Gemm< cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 256, 128>, cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<8, 8, 32>, cutlass::epilogue::thread::LinearCombinationClamp< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementCompute >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2 >; //-------------INT-1 Tensor core (PASS)-------------------- #elif BIT_WIDTH == 1 using ElementOutput = int32_t; using ElementAccumulator = int32_t; using ElementCompute = int32_t; const int pipe_stages = 4; using Gemm = cutlass::gemm::device::Gemm< cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, // RTX3090 setting for block, warp, and mma shape cutlass::gemm::GemmShape<128, 256, 512>, cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<8, 8, 128>, // A100 setting for block, warp, and mma shape // cutlass::gemm::GemmShape<256, 128, 1024>, // cutlass::gemm::GemmShape<64, 64, 1024>, // cutlass::gemm::GemmShape<16, 8, 256>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementCompute>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, pipe_stages, 128, 128, false, cutlass::arch::OpXorPopc>; #endif int run(int M, int N, int K) { int length_m = M; int length_n = N; int length_k = K; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // Launch initialized CUTLASS kernel for(int trial = 0; trial < NUM_PROFILE; trial++) { // printf("[%d]\n", trial); status = gemm_op(); CUTLASS_CHECK(status); } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("CUTLASS-GEMM (%d-bit). M: %6d, N: %6d, K: %6d,\t Time (ms): %.2f, TOPS: %4.2f\t\n", BIT_WIDTH, M, N, K, milliseconds/NUM_PROFILE, static_cast<double>(NUM_PROFILE*(static_cast<double>(M)*N*K*2) / (milliseconds / 1000.)) / 1e12); /* // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; // return (passed ? 0 : -1);*/ return 0; } int main(int argc, char* argv[]) { if (argc < 2){ printf("Usage: ./prog Dim\n"); return -1; } int M = atoi(argv[1]); int N = atoi(argv[2]); int K = atoi(argv[3]); bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(M, N, K); }
982ef6d933942772fa2f00513fc57c00030c068c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <scalar.h> __device__ double op(double d1,double d2,double *params) { return d2 + d1; } extern "C" __global__ void add_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) { transform(n,idx,dx,dy,incx,params,result); }
982ef6d933942772fa2f00513fc57c00030c068c.cu
#include <scalar.h> __device__ double op(double d1,double d2,double *params) { return d2 + d1; } extern "C" __global__ void add_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) { transform(n,idx,dx,dy,incx,params,result); }
4b3b89acb921997339565afa29615005189ec01d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/magma_zmconjugate.cu, normal z -> d, Wed Jan 2 14:18:54 2019 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 __global__ void magma_dmconjugate_kernel( int num_rows, magma_index_t *rowptr, double *values ) { int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ){ for( int i = rowptr[row]; i < rowptr[row+1]; i++){ values[i] = MAGMA_D_CONJ( values[i] ); } } } /** Purpose ------- This function conjugates a matrix. For a real matrix, no value is changed. Arguments --------- @param[in,out] A magma_d_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dmconjugate( magma_d_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; dim3 grid( magma_ceildiv( A->num_rows, BLOCK_SIZE )); hipLaunchKernelGGL(( magma_dmconjugate_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, queue->cuda_stream() , A->num_rows, A->drow, A->dval ); return info; }
4b3b89acb921997339565afa29615005189ec01d.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/magma_zmconjugate.cu, normal z -> d, Wed Jan 2 14:18:54 2019 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 __global__ void magma_dmconjugate_kernel( int num_rows, magma_index_t *rowptr, double *values ) { int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ){ for( int i = rowptr[row]; i < rowptr[row+1]; i++){ values[i] = MAGMA_D_CONJ( values[i] ); } } } /** Purpose ------- This function conjugates a matrix. For a real matrix, no value is changed. Arguments --------- @param[in,out] A magma_d_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dmconjugate( magma_d_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; dim3 grid( magma_ceildiv( A->num_rows, BLOCK_SIZE )); magma_dmconjugate_kernel<<< grid, BLOCK_SIZE, 0, queue->cuda_stream() >>> ( A->num_rows, A->drow, A->dval ); return info; }
1d9a4beb030e20bacd668f273ab51a6eb12c9628.hip
// !!! This is a file automatically generated by hipify!!! /** * ___ _ _ ___ _ _ ___ ___ ___ ___ * / __| | | | \ /_\ | | ___| _ ) __/ __/ __| * | (__| |_| | |) / _ \ | |_|___| _ \ _| (_ \__ \ * \___|\___/|___/_/ \_\ |____| |___/_| \___|___/ * 2012 * by Jens Wetzl ([email protected]) * and Oliver Taubmann ([email protected]) * * This work is licensed under a Creative Commons * Attribution 3.0 Unported License. (CC-BY) * http://creativecommons.org/licenses/by/3.0/ * * File lbfgs.cu: Implementation of class lbfgs (except cpu_lbfgs). * **/ #include "lbfgs.h" #include "timer.h" #include <iostream> #include <limits> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> #include <fstream> #include <sstream> using namespace std; namespace gpu_lbfgs { // Variables __device__ float fkm1; __device__ float fk; __device__ float tmp; __device__ float alpha[HISTORY_SIZE]; __device__ float rho [HISTORY_SIZE]; __device__ float H0; __device__ float step; __device__ float tmp2; __device__ int status; // Small helper kernels for scalar operations in device memory needed during updates. // What they're used for is documented by comments in the places they are executed. // *** Use with a single thread only! *** __global__ void update1 (float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out); // first update loop __global__ void update2 (float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha); // second update loop __global__ void update3 (float *rho_out, float *H0_out, const float *yDotS, const float *yDotY); // after line search } // linesearch_gpu.h is no real header, it contains // part of the implementation and must be included // after the variables above have been declared. #include "linesearch_gpu.h" lbfgs::lbfgs(cost_function& cf) : m_costFunction(cf) , m_maxIter(10000) , m_maxEvals(std::numeric_limits<size_t>::max()) , m_gradientEps(1e-4f) { CublasSafeCall( hipblasCreate(&m_cublasHandle) ); } lbfgs::~lbfgs() { CublasSafeCall( hipblasDestroy(m_cublasHandle) ); } std::string lbfgs::statusToString(lbfgs::status stat) { switch (stat) { case LBFGS_BELOW_GRADIENT_EPS: return "Below gradient epsilon"; case LBFGS_REACHED_MAX_ITER: return "Reached maximum number of iterations"; case LBFGS_REACHED_MAX_EVALS: return "Reached maximum number of function/gradient evaluations"; case LBFGS_LINE_SEARCH_FAILED: return "Line search failed"; default: return "Unknown status"; } } lbfgs::status lbfgs::minimize(float *d_x) { return gpu_lbfgs(d_x); } lbfgs::status lbfgs::minimize_with_host_x(float *h_x) { const size_t NX = m_costFunction.getNumberOfUnknowns(); float *d_x; CudaSafeCall( hipMalloc((void**)&d_x, NX * sizeof(float)) ); CudaSafeCall( hipMemcpy(d_x, h_x, NX * sizeof(float), hipMemcpyHostToDevice) ); status ret = minimize(d_x); CudaSafeCall( hipMemcpy(h_x, d_x, NX * sizeof(float), hipMemcpyDeviceToHost) ); CudaSafeCall( hipFree(d_x) ); return ret; } lbfgs::status lbfgs::gpu_lbfgs(float *d_x) { #ifdef LBFGS_TIMING timer timer_total ("GPU_LBFGS_total" ); timer timer_evals ("GPU_LBFGS_evals" ); timer timer_updates ("GPU_LBFGS_updates" ); timer timer_linesearch("GPU_LBFGS_linesearch"); timer_total.start(); #endif using namespace gpu_lbfgs; const size_t NX = m_costFunction.getNumberOfUnknowns(); float *d_fkm1, *d_fk; // f_{k-1}, f_k, function values at x_{k-1} and x_k float *d_gkm1, *d_gk; // g_{k-1}, g_k, gradients at x_{k-1} and x_k float *d_z; // z, search direction float *d_H0; // H_0, initial inverse Hessian (diagonal, same value for all elements) float *d_step; // step current step length float *d_tmp, *d_tmp2; // tmp, tmp2 temporary storage for intermediate results int *d_status; // status return code for communication device -> host // Ring buffers for history float *d_s; // s, history of solution updates float *d_y; // y, history of gradient updates float *d_alpha; // alpha, history of alphas (needed for z updates) float *d_rho; // rho, history of rhos (needed for z updates) // Allocations CudaSafeCall( hipMalloc(&d_gk, NX * sizeof(float)) ); CudaSafeCall( hipMalloc(&d_gkm1, NX * sizeof(float)) ); CudaSafeCall( hipMalloc(&d_z, NX * sizeof(float)) ); CudaSafeCall( hipMalloc(&d_s, HISTORY_SIZE * NX * sizeof(float)) ); CudaSafeCall( hipMalloc(&d_y, HISTORY_SIZE * NX * sizeof(float)) ); // Addresses of global symbols CudaSafeCall( hipGetSymbolAddress((void**)&d_fkm1, gpu_lbfgs::fkm1 ) ); CudaSafeCall( hipGetSymbolAddress((void**)&d_fk, gpu_lbfgs::fk ) ); CudaSafeCall( hipGetSymbolAddress((void**)&d_tmp, gpu_lbfgs::tmp ) ); CudaSafeCall( hipGetSymbolAddress((void**)&d_tmp2, gpu_lbfgs::tmp2 ) ); CudaSafeCall( hipGetSymbolAddress((void**)&d_H0, gpu_lbfgs::H0 ) ); CudaSafeCall( hipGetSymbolAddress((void**)&d_alpha, gpu_lbfgs::alpha ) ); CudaSafeCall( hipGetSymbolAddress((void**)&d_rho, gpu_lbfgs::rho ) ); CudaSafeCall( hipGetSymbolAddress((void**)&d_step, gpu_lbfgs::step ) ); CudaSafeCall( hipGetSymbolAddress((void**)&d_status, gpu_lbfgs::status) ); // Initialize #ifdef LBFGS_TIMING timer_evals.start(); #endif m_costFunction.f_gradf(d_x, d_fk, d_gk); CudaCheckError(); hipDeviceSynchronize(); #ifdef LBFGS_TIMING timer_evals.stop(); #endif size_t evals = 1; status stat = LBFGS_REACHED_MAX_ITER; #ifdef LBFGS_VERBOSE std::cout << "lbfgs::gpu_lbfgs()" << std::endl; #endif // H0 = 1.0f; const float one = 1.0f; CudaSafeCall( hipMemcpy(d_H0, &one, sizeof(float), hipMemcpyHostToDevice) ); size_t it; for (it = 0; it < m_maxIter; ++it) { #ifdef LBFGS_VERBOSE float h_y; CudaSafeCall( hipMemcpy(&h_y, d_fk, sizeof(float), hipMemcpyDeviceToHost) ); float gknorm2; dispatch_dot(NX, &gknorm2, d_gk, d_gk, false); printf("f(x) = % 12e, ||grad||_2 = % 12e\n", h_y, std::sqrt(gknorm2)); #endif // Check for convergence // --------------------- float gkNormSquared; float xkNormSquared; dispatch_dot(NX, &xkNormSquared, d_x, d_x, false); dispatch_dot(NX, &gkNormSquared, d_gk, d_gk, false); if (gkNormSquared < (m_gradientEps * m_gradientEps) * ::max(xkNormSquared, 1.0f)) { stat = LBFGS_BELOW_GRADIENT_EPS; break; } // Find search direction // --------------------- #ifdef LBFGS_TIMING timer_updates.start(); #endif const float minusOne = -1.0f; dispatch_scale(NX, d_z, d_gk, &minusOne, false); // z = -gk const size_t MAX_IDX = std::min<size_t>(it, HISTORY_SIZE); for (size_t i = 1; i <= MAX_IDX; ++i) { size_t idx = index(it - i); dispatch_dot(NX, d_tmp, d_s + idx * NX, d_z); // tmp = sDotZ // alpha = tmp * rho // tmp = -alpha hipLaunchKernelGGL(( update1), dim3(1), dim3(1), 0, 0, d_alpha + idx, d_tmp, d_rho + idx, d_tmp); CudaCheckError(); hipDeviceSynchronize(); // z += tmp * y dispatch_axpy(NX, d_z, d_z, d_y + idx * NX, d_tmp); } dispatch_scale(NX, d_z, d_z, d_H0); // z = H0 * z for (size_t i = MAX_IDX; i > 0; --i) { size_t idx = index(it - i); dispatch_dot(NX, d_tmp, d_y + idx * NX, d_z); // tmp = yDotZ // beta = rho * tmp // tmp = alpha - beta hipLaunchKernelGGL(( update2), dim3(1), dim3(1), 0, 0, d_tmp, d_rho + idx, d_tmp, d_alpha + idx); CudaCheckError(); hipDeviceSynchronize(); // z += tmp * s dispatch_axpy(NX, d_z, d_z, d_s + idx * NX, d_tmp); } #ifdef LBFGS_TIMING timer_updates.stop(); timer_linesearch.start(); #endif CudaSafeCall( hipMemcpy(d_fkm1, d_fk, 1 * sizeof(float), hipMemcpyDeviceToDevice) ); // fkm1 = fk; CudaSafeCall( hipMemcpy(d_gkm1, d_gk, NX * sizeof(float), hipMemcpyDeviceToDevice) ); // gkm1 = gk; timer *t_evals = NULL, *t_linesearch = NULL; #ifdef LBFGS_TIMING t_evals = &timer_evals; t_linesearch = &timer_linesearch; #endif // (line search defined in linesearch_gpu.h) if (!gpu_linesearch(d_x, d_z, d_fk, d_gk, evals, d_gkm1, d_fkm1, stat, d_step, m_maxEvals, t_evals, t_linesearch, d_tmp, d_status)) { break; } #ifdef LBFGS_TIMING timer_linesearch.stop(); timer_updates.start(); #endif // Update s, y, rho and H_0 // ------------------------ // s = x_k - x_{k-1} = step * z // y = g_k - g_{k-1} // rho = 1 / (y^T s) // H_0 = (y^T s) / (y^T y) float *d_curS = d_s + index(it) * NX; float *d_curY = d_y + index(it) * NX; dispatch_scale(NX, d_curS, d_z, d_step); // s = step * z dispatch_axpy (NX, d_curY, d_gk, d_gkm1, &minusOne, false); // y = gk - gkm1 dispatch_dot(NX, d_tmp, d_curY, d_curS); // tmp = yDotS dispatch_dot(NX, d_tmp2, d_curY, d_curY); // tmp2 = yDotY // rho = 1 / tmp // if (tmp2 > 1e-5) // H0 = tmp / tmp2 hipLaunchKernelGGL(( update3), dim3(1), dim3(1), 0, 0, d_rho + index(it), d_H0, d_tmp, d_tmp2); CudaCheckError(); hipDeviceSynchronize(); #ifdef LBFGS_TIMING timer_updates.stop(); #endif } // Deallocations CudaSafeCall( hipFree(d_gk) ); CudaSafeCall( hipFree(d_gkm1) ); CudaSafeCall( hipFree(d_z) ); CudaSafeCall( hipFree(d_s) ); CudaSafeCall( hipFree(d_y) ); #ifdef LBFGS_TIMING timer_total.stop(); timer_total.saveMeasurement(); timer_evals.saveMeasurement(); timer_updates.saveMeasurement(); timer_linesearch.saveMeasurement(); #endif #ifdef LBFGS_VERBOSE std::cout << "Number of iterations: " << it << std::endl; std::cout << "Number of function/gradient evaluations: " << evals << std::endl; std::cout << "Reason for termination: " << statusToString(stat) << std::endl; #endif return stat; } // Vector operations // ----------------- void lbfgs::dispatch_axpy(const size_t n, float *d_dst, const float *d_y, const float *d_x, const float *a, bool aDevicePointer) const { const hipblasPointerMode_t mode = aDevicePointer ? HIPBLAS_POINTER_MODE_DEVICE : HIPBLAS_POINTER_MODE_HOST; CublasSafeCall( hipblasSetPointerMode(m_cublasHandle, mode) ); if (d_dst != d_y) CudaSafeCall( hipMemcpy(d_dst, d_y, n * sizeof(float), hipMemcpyDeviceToDevice) ); CublasSafeCall( hipblasSaxpy(m_cublasHandle, int(n), a, d_x, 1, d_dst, 1) ); } void lbfgs::dispatch_scale(const size_t n, float *d_dst, const float *d_x, const float *a, bool aDevicePointer) const { const hipblasPointerMode_t mode = aDevicePointer ? HIPBLAS_POINTER_MODE_DEVICE : HIPBLAS_POINTER_MODE_HOST; CublasSafeCall( hipblasSetPointerMode(m_cublasHandle, mode) ); if (d_dst != d_x) CudaSafeCall( hipMemcpy(d_dst, d_x, n * sizeof(float), hipMemcpyDeviceToDevice) ); CublasSafeCall( hipblasSscal(m_cublasHandle, int(n), a, d_dst, 1) ); } void lbfgs::dispatch_dot(const size_t n, float *dst, const float *d_x, const float *d_y, bool dstDevicePointer) const { const hipblasPointerMode_t mode = dstDevicePointer ? HIPBLAS_POINTER_MODE_DEVICE : HIPBLAS_POINTER_MODE_HOST; CublasSafeCall( hipblasSetPointerMode(m_cublasHandle, mode) ); CublasSafeCall( hipblasSdot(m_cublasHandle, int(n), d_x, 1, d_y, 1, dst) ); } // ----------------- // Device / kernel functions // ------------------------- namespace gpu_lbfgs { __global__ void update1(float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out) { *alpha_out = *sDotZ * *rho; *minusAlpha_out = -*alpha_out; } __global__ void update2(float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha) { const float beta = *rho * *yDotZ; *alphaMinusBeta_out = *alpha - beta; } __global__ void update3(float *rho_out, float *H0_out, const float *yDotS, const float *yDotY) { *rho_out = 1.0f / *yDotS; if (*yDotY > 1e-5) *H0_out = *yDotS / *yDotY; } } // ------------------
1d9a4beb030e20bacd668f273ab51a6eb12c9628.cu
/** * ___ _ _ ___ _ _ ___ ___ ___ ___ * / __| | | | \ /_\ | | ___| _ ) __/ __/ __| * | (__| |_| | |) / _ \ | |_|___| _ \ _| (_ \__ \ * \___|\___/|___/_/ \_\ |____| |___/_| \___|___/ * 2012 * by Jens Wetzl ([email protected]) * and Oliver Taubmann ([email protected]) * * This work is licensed under a Creative Commons * Attribution 3.0 Unported License. (CC-BY) * http://creativecommons.org/licenses/by/3.0/ * * File lbfgs.cu: Implementation of class lbfgs (except cpu_lbfgs). * **/ #include "lbfgs.h" #include "timer.h" #include <iostream> #include <limits> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <device_functions.h> #include <fstream> #include <sstream> using namespace std; namespace gpu_lbfgs { // Variables __device__ float fkm1; __device__ float fk; __device__ float tmp; __device__ float alpha[HISTORY_SIZE]; __device__ float rho [HISTORY_SIZE]; __device__ float H0; __device__ float step; __device__ float tmp2; __device__ int status; // Small helper kernels for scalar operations in device memory needed during updates. // What they're used for is documented by comments in the places they are executed. // *** Use with a single thread only! *** __global__ void update1 (float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out); // first update loop __global__ void update2 (float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha); // second update loop __global__ void update3 (float *rho_out, float *H0_out, const float *yDotS, const float *yDotY); // after line search } // linesearch_gpu.h is no real header, it contains // part of the implementation and must be included // after the variables above have been declared. #include "linesearch_gpu.h" lbfgs::lbfgs(cost_function& cf) : m_costFunction(cf) , m_maxIter(10000) , m_maxEvals(std::numeric_limits<size_t>::max()) , m_gradientEps(1e-4f) { CublasSafeCall( cublasCreate(&m_cublasHandle) ); } lbfgs::~lbfgs() { CublasSafeCall( cublasDestroy(m_cublasHandle) ); } std::string lbfgs::statusToString(lbfgs::status stat) { switch (stat) { case LBFGS_BELOW_GRADIENT_EPS: return "Below gradient epsilon"; case LBFGS_REACHED_MAX_ITER: return "Reached maximum number of iterations"; case LBFGS_REACHED_MAX_EVALS: return "Reached maximum number of function/gradient evaluations"; case LBFGS_LINE_SEARCH_FAILED: return "Line search failed"; default: return "Unknown status"; } } lbfgs::status lbfgs::minimize(float *d_x) { return gpu_lbfgs(d_x); } lbfgs::status lbfgs::minimize_with_host_x(float *h_x) { const size_t NX = m_costFunction.getNumberOfUnknowns(); float *d_x; CudaSafeCall( cudaMalloc((void**)&d_x, NX * sizeof(float)) ); CudaSafeCall( cudaMemcpy(d_x, h_x, NX * sizeof(float), cudaMemcpyHostToDevice) ); status ret = minimize(d_x); CudaSafeCall( cudaMemcpy(h_x, d_x, NX * sizeof(float), cudaMemcpyDeviceToHost) ); CudaSafeCall( cudaFree(d_x) ); return ret; } lbfgs::status lbfgs::gpu_lbfgs(float *d_x) { #ifdef LBFGS_TIMING timer timer_total ("GPU_LBFGS_total" ); timer timer_evals ("GPU_LBFGS_evals" ); timer timer_updates ("GPU_LBFGS_updates" ); timer timer_linesearch("GPU_LBFGS_linesearch"); timer_total.start(); #endif using namespace gpu_lbfgs; const size_t NX = m_costFunction.getNumberOfUnknowns(); float *d_fkm1, *d_fk; // f_{k-1}, f_k, function values at x_{k-1} and x_k float *d_gkm1, *d_gk; // g_{k-1}, g_k, gradients at x_{k-1} and x_k float *d_z; // z, search direction float *d_H0; // H_0, initial inverse Hessian (diagonal, same value for all elements) float *d_step; // step current step length float *d_tmp, *d_tmp2; // tmp, tmp2 temporary storage for intermediate results int *d_status; // status return code for communication device -> host // Ring buffers for history float *d_s; // s, history of solution updates float *d_y; // y, history of gradient updates float *d_alpha; // alpha, history of alphas (needed for z updates) float *d_rho; // rho, history of rhos (needed for z updates) // Allocations CudaSafeCall( cudaMalloc(&d_gk, NX * sizeof(float)) ); CudaSafeCall( cudaMalloc(&d_gkm1, NX * sizeof(float)) ); CudaSafeCall( cudaMalloc(&d_z, NX * sizeof(float)) ); CudaSafeCall( cudaMalloc(&d_s, HISTORY_SIZE * NX * sizeof(float)) ); CudaSafeCall( cudaMalloc(&d_y, HISTORY_SIZE * NX * sizeof(float)) ); // Addresses of global symbols CudaSafeCall( cudaGetSymbolAddress((void**)&d_fkm1, gpu_lbfgs::fkm1 ) ); CudaSafeCall( cudaGetSymbolAddress((void**)&d_fk, gpu_lbfgs::fk ) ); CudaSafeCall( cudaGetSymbolAddress((void**)&d_tmp, gpu_lbfgs::tmp ) ); CudaSafeCall( cudaGetSymbolAddress((void**)&d_tmp2, gpu_lbfgs::tmp2 ) ); CudaSafeCall( cudaGetSymbolAddress((void**)&d_H0, gpu_lbfgs::H0 ) ); CudaSafeCall( cudaGetSymbolAddress((void**)&d_alpha, gpu_lbfgs::alpha ) ); CudaSafeCall( cudaGetSymbolAddress((void**)&d_rho, gpu_lbfgs::rho ) ); CudaSafeCall( cudaGetSymbolAddress((void**)&d_step, gpu_lbfgs::step ) ); CudaSafeCall( cudaGetSymbolAddress((void**)&d_status, gpu_lbfgs::status) ); // Initialize #ifdef LBFGS_TIMING timer_evals.start(); #endif m_costFunction.f_gradf(d_x, d_fk, d_gk); CudaCheckError(); cudaDeviceSynchronize(); #ifdef LBFGS_TIMING timer_evals.stop(); #endif size_t evals = 1; status stat = LBFGS_REACHED_MAX_ITER; #ifdef LBFGS_VERBOSE std::cout << "lbfgs::gpu_lbfgs()" << std::endl; #endif // H0 = 1.0f; const float one = 1.0f; CudaSafeCall( cudaMemcpy(d_H0, &one, sizeof(float), cudaMemcpyHostToDevice) ); size_t it; for (it = 0; it < m_maxIter; ++it) { #ifdef LBFGS_VERBOSE float h_y; CudaSafeCall( cudaMemcpy(&h_y, d_fk, sizeof(float), cudaMemcpyDeviceToHost) ); float gknorm2; dispatch_dot(NX, &gknorm2, d_gk, d_gk, false); printf("f(x) = % 12e, ||grad||_2 = % 12e\n", h_y, std::sqrt(gknorm2)); #endif // Check for convergence // --------------------- float gkNormSquared; float xkNormSquared; dispatch_dot(NX, &xkNormSquared, d_x, d_x, false); dispatch_dot(NX, &gkNormSquared, d_gk, d_gk, false); if (gkNormSquared < (m_gradientEps * m_gradientEps) * std::max(xkNormSquared, 1.0f)) { stat = LBFGS_BELOW_GRADIENT_EPS; break; } // Find search direction // --------------------- #ifdef LBFGS_TIMING timer_updates.start(); #endif const float minusOne = -1.0f; dispatch_scale(NX, d_z, d_gk, &minusOne, false); // z = -gk const size_t MAX_IDX = std::min<size_t>(it, HISTORY_SIZE); for (size_t i = 1; i <= MAX_IDX; ++i) { size_t idx = index(it - i); dispatch_dot(NX, d_tmp, d_s + idx * NX, d_z); // tmp = sDotZ // alpha = tmp * rho // tmp = -alpha update1<<<1, 1>>>(d_alpha + idx, d_tmp, d_rho + idx, d_tmp); CudaCheckError(); cudaDeviceSynchronize(); // z += tmp * y dispatch_axpy(NX, d_z, d_z, d_y + idx * NX, d_tmp); } dispatch_scale(NX, d_z, d_z, d_H0); // z = H0 * z for (size_t i = MAX_IDX; i > 0; --i) { size_t idx = index(it - i); dispatch_dot(NX, d_tmp, d_y + idx * NX, d_z); // tmp = yDotZ // beta = rho * tmp // tmp = alpha - beta update2<<<1, 1>>>(d_tmp, d_rho + idx, d_tmp, d_alpha + idx); CudaCheckError(); cudaDeviceSynchronize(); // z += tmp * s dispatch_axpy(NX, d_z, d_z, d_s + idx * NX, d_tmp); } #ifdef LBFGS_TIMING timer_updates.stop(); timer_linesearch.start(); #endif CudaSafeCall( cudaMemcpy(d_fkm1, d_fk, 1 * sizeof(float), cudaMemcpyDeviceToDevice) ); // fkm1 = fk; CudaSafeCall( cudaMemcpy(d_gkm1, d_gk, NX * sizeof(float), cudaMemcpyDeviceToDevice) ); // gkm1 = gk; timer *t_evals = NULL, *t_linesearch = NULL; #ifdef LBFGS_TIMING t_evals = &timer_evals; t_linesearch = &timer_linesearch; #endif // (line search defined in linesearch_gpu.h) if (!gpu_linesearch(d_x, d_z, d_fk, d_gk, evals, d_gkm1, d_fkm1, stat, d_step, m_maxEvals, t_evals, t_linesearch, d_tmp, d_status)) { break; } #ifdef LBFGS_TIMING timer_linesearch.stop(); timer_updates.start(); #endif // Update s, y, rho and H_0 // ------------------------ // s = x_k - x_{k-1} = step * z // y = g_k - g_{k-1} // rho = 1 / (y^T s) // H_0 = (y^T s) / (y^T y) float *d_curS = d_s + index(it) * NX; float *d_curY = d_y + index(it) * NX; dispatch_scale(NX, d_curS, d_z, d_step); // s = step * z dispatch_axpy (NX, d_curY, d_gk, d_gkm1, &minusOne, false); // y = gk - gkm1 dispatch_dot(NX, d_tmp, d_curY, d_curS); // tmp = yDotS dispatch_dot(NX, d_tmp2, d_curY, d_curY); // tmp2 = yDotY // rho = 1 / tmp // if (tmp2 > 1e-5) // H0 = tmp / tmp2 update3<<<1, 1>>>(d_rho + index(it), d_H0, d_tmp, d_tmp2); CudaCheckError(); cudaDeviceSynchronize(); #ifdef LBFGS_TIMING timer_updates.stop(); #endif } // Deallocations CudaSafeCall( cudaFree(d_gk) ); CudaSafeCall( cudaFree(d_gkm1) ); CudaSafeCall( cudaFree(d_z) ); CudaSafeCall( cudaFree(d_s) ); CudaSafeCall( cudaFree(d_y) ); #ifdef LBFGS_TIMING timer_total.stop(); timer_total.saveMeasurement(); timer_evals.saveMeasurement(); timer_updates.saveMeasurement(); timer_linesearch.saveMeasurement(); #endif #ifdef LBFGS_VERBOSE std::cout << "Number of iterations: " << it << std::endl; std::cout << "Number of function/gradient evaluations: " << evals << std::endl; std::cout << "Reason for termination: " << statusToString(stat) << std::endl; #endif return stat; } // Vector operations // ----------------- void lbfgs::dispatch_axpy(const size_t n, float *d_dst, const float *d_y, const float *d_x, const float *a, bool aDevicePointer) const { const cublasPointerMode_t mode = aDevicePointer ? CUBLAS_POINTER_MODE_DEVICE : CUBLAS_POINTER_MODE_HOST; CublasSafeCall( cublasSetPointerMode(m_cublasHandle, mode) ); if (d_dst != d_y) CudaSafeCall( cudaMemcpy(d_dst, d_y, n * sizeof(float), cudaMemcpyDeviceToDevice) ); CublasSafeCall( cublasSaxpy(m_cublasHandle, int(n), a, d_x, 1, d_dst, 1) ); } void lbfgs::dispatch_scale(const size_t n, float *d_dst, const float *d_x, const float *a, bool aDevicePointer) const { const cublasPointerMode_t mode = aDevicePointer ? CUBLAS_POINTER_MODE_DEVICE : CUBLAS_POINTER_MODE_HOST; CublasSafeCall( cublasSetPointerMode(m_cublasHandle, mode) ); if (d_dst != d_x) CudaSafeCall( cudaMemcpy(d_dst, d_x, n * sizeof(float), cudaMemcpyDeviceToDevice) ); CublasSafeCall( cublasSscal(m_cublasHandle, int(n), a, d_dst, 1) ); } void lbfgs::dispatch_dot(const size_t n, float *dst, const float *d_x, const float *d_y, bool dstDevicePointer) const { const cublasPointerMode_t mode = dstDevicePointer ? CUBLAS_POINTER_MODE_DEVICE : CUBLAS_POINTER_MODE_HOST; CublasSafeCall( cublasSetPointerMode(m_cublasHandle, mode) ); CublasSafeCall( cublasSdot(m_cublasHandle, int(n), d_x, 1, d_y, 1, dst) ); } // ----------------- // Device / kernel functions // ------------------------- namespace gpu_lbfgs { __global__ void update1(float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out) { *alpha_out = *sDotZ * *rho; *minusAlpha_out = -*alpha_out; } __global__ void update2(float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha) { const float beta = *rho * *yDotZ; *alphaMinusBeta_out = *alpha - beta; } __global__ void update3(float *rho_out, float *H0_out, const float *yDotS, const float *yDotY) { *rho_out = 1.0f / *yDotS; if (*yDotY > 1e-5) *H0_out = *yDotS / *yDotY; } } // ------------------
685e2da544f49fd1a16ee7bfa00c5c8336d4ede0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Thread2D.h" #include "cudas.h" #include "DamierHSBAFloatMath.h" #include "Indices_GPU.h" #include "DomaineMath_GPU.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void damierHSBAFloat(float4* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void damierHSBAFloat(float4* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t) { DamierHSBAFloatMath damierHSBAFloatMath(n,t); const int TID = Thread2D::tid(); const int NB_THREAD = Thread2D::nbThread(); const int WH = w * h; double x; double y; int i; // in [0,h[ int j; // in [0,w[ int s = TID; while (s < WH) { Indices::toIJ(s, w, &i, &j); // update (i, j) // (i,j) domaine ecran // (x,y) domaine math domaineMath.toXY(i, j, &x, &y); // (i,j) -> (x,y) damierHSBAFloatMath.colorXY(&tabPixelsGM[s], x, y); // update ptrDevPixels[s] s += NB_THREAD; } } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
685e2da544f49fd1a16ee7bfa00c5c8336d4ede0.cu
#include "Thread2D.h" #include "cudas.h" #include "DamierHSBAFloatMath.h" #include "Indices_GPU.h" #include "DomaineMath_GPU.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void damierHSBAFloat(float4* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void damierHSBAFloat(float4* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t) { DamierHSBAFloatMath damierHSBAFloatMath(n,t); const int TID = Thread2D::tid(); const int NB_THREAD = Thread2D::nbThread(); const int WH = w * h; double x; double y; int i; // in [0,h[ int j; // in [0,w[ int s = TID; while (s < WH) { Indices::toIJ(s, w, &i, &j); // update (i, j) // (i,j) domaine ecran // (x,y) domaine math domaineMath.toXY(i, j, &x, &y); // (i,j) -> (x,y) damierHSBAFloatMath.colorXY(&tabPixelsGM[s], x, y); // update ptrDevPixels[s] s += NB_THREAD; } } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
afa7b6d72c7206c4c4c99cfb5af3845dd9d9a0ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "book.h" #include <time.h> #define imin(a, b) (a < b ? a : b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N+threadsPerBlock-1) / threadsPerBlock); __global__ void dot(float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N){ temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0){ if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main(void) { struct timespec old_time, new_time; unsigned long int oldNs, newNs; float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; a = (float*) malloc(N * sizeof(float)); b = (float*) malloc(N * sizeof(float)); partial_c = (float*) malloc(blocksPerGrid * sizeof(float)); HANDLE_ERROR( hipMalloc((void**) &dev_a, N * sizeof(float)) ); HANDLE_ERROR( hipMalloc((void**) &dev_b, N * sizeof(float)) ); HANDLE_ERROR( hipMalloc((void**) &dev_partial_c, blocksPerGrid * sizeof(float)) ); for(int i = 0; i < N; ++i){ a[i] = i; b[i] = i * 2; } HANDLE_ERROR( hipMemcpy(dev_a, a, N * sizeof(float), hipMemcpyHostToDevice) ); HANDLE_ERROR( hipMemcpy(dev_b, b, N * sizeof(float), hipMemcpyHostToDevice) ); clock_gettime(CLOCK_MONOTONIC, &old_time); hipLaunchKernelGGL(( dot) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c); hipDeviceSynchronize(); HANDLE_ERROR( hipMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost) ); clock_gettime(CLOCK_MONOTONIC, &new_time); oldNs = old_time.tv_sec * 1000000000ull + old_time.tv_nsec; newNs = new_time.tv_sec * 1000000000ull + new_time.tv_nsec; float dt = (newNs - oldNs) * 0.000000001f; printf("Original vector sizes were %d, dot product took %0.6f seconds \n", N, dt); //HANDLE_ERROR( hipMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost) ); c = 0; for (int i = 0; i < blocksPerGrid; ++i) { c += partial_c[i]; } #define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6) printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float) (N - 1) )); hipFree(dev_a); hipFree(dev_b); hipFree(dev_partial_c); free(a); free(b); free(partial_c); return 0; }
afa7b6d72c7206c4c4c99cfb5af3845dd9d9a0ad.cu
#include "book.h" #include <time.h> #define imin(a, b) (a < b ? a : b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N+threadsPerBlock-1) / threadsPerBlock); __global__ void dot(float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N){ temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0){ if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main(void) { struct timespec old_time, new_time; unsigned long int oldNs, newNs; float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; a = (float*) malloc(N * sizeof(float)); b = (float*) malloc(N * sizeof(float)); partial_c = (float*) malloc(blocksPerGrid * sizeof(float)); HANDLE_ERROR( cudaMalloc((void**) &dev_a, N * sizeof(float)) ); HANDLE_ERROR( cudaMalloc((void**) &dev_b, N * sizeof(float)) ); HANDLE_ERROR( cudaMalloc((void**) &dev_partial_c, blocksPerGrid * sizeof(float)) ); for(int i = 0; i < N; ++i){ a[i] = i; b[i] = i * 2; } HANDLE_ERROR( cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice) ); clock_gettime(CLOCK_MONOTONIC, &old_time); dot <<<blocksPerGrid, threadsPerBlock>>> (dev_a, dev_b, dev_partial_c); cudaDeviceSynchronize(); HANDLE_ERROR( cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost) ); clock_gettime(CLOCK_MONOTONIC, &new_time); oldNs = old_time.tv_sec * 1000000000ull + old_time.tv_nsec; newNs = new_time.tv_sec * 1000000000ull + new_time.tv_nsec; float dt = (newNs - oldNs) * 0.000000001f; printf("Original vector sizes were %d, dot product took %0.6f seconds \n", N, dt); //HANDLE_ERROR( cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost) ); c = 0; for (int i = 0; i < blocksPerGrid; ++i) { c += partial_c[i]; } #define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6) printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float) (N - 1) )); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_partial_c); free(a); free(b); free(partial_c); return 0; }
4981b0a4f116f141c6bd4f857b75fbcab8156cdd.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/hip/ForeachFunctors.cuh> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_tensor_list_op(TensorList tensors1, TensorList tensors2, Scalar alpha = 1) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(tensors1.size()); for (const auto& t: tensors1) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), "foreach_binary_op_list_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<3>(tensor_lists, BinaryOpListAlphaFunctor<scalar_t, /* depth */ 3, /* r_args_depth */ 2, /* res_arg_index */ 2>(), Op<opmath_t>(), alpha.to<opmath_t>()); }); return tensor_lists[2]; } template<template<class> class Op> void foreach_tensor_list_op_(TensorList tensors1, TensorList tensors2, Scalar alpha = 1) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), "foreach_binary_op_list_cuda_", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<2>(tensor_lists, BinaryOpListAlphaFunctor<scalar_t, /* depth */ 2, /* r_args_depth */ 2, /* res_arg_index */ 0>(), Op<opmath_t>(), alpha.to<opmath_t>()); }); } #define FOREACH_BINARY_OP_LIST(NAME, OP) \ void foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ \ if (!can_use_fast_route(tensors1, tensors2)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2); \ } \ \ foreach_tensor_list_op_<OP>(tensors1, tensors2); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ \ if (!can_use_fast_route(tensors1, tensors2)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2); \ } \ \ return foreach_tensor_list_op<OP>(tensors1, tensors2); \ } #define FOREACH_BINARY_OP_LIST_ALPHA(NAME, OP) \ void foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2, Scalar alpha) { \ check_foreach_api_restrictions(tensors1, tensors2); \ \ if (!can_use_fast_route(tensors1, tensors2)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2, alpha); \ } \ \ foreach_tensor_list_op_<OP>(tensors1, tensors2, alpha); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2, Scalar alpha) { \ check_foreach_api_restrictions(tensors1, tensors2); \ \ if (!can_use_fast_route(tensors1, tensors2)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2, alpha); \ } \ \ return foreach_tensor_list_op<OP>(tensors1, tensors2, alpha); \ } FOREACH_BINARY_OP_LIST_ALPHA(add, std::plus); FOREACH_BINARY_OP_LIST_ALPHA(sub, std::minus); FOREACH_BINARY_OP_LIST(mul, std::multiplies); FOREACH_BINARY_OP_LIST(div, std::divides); }} // namespace at::native
4981b0a4f116f141c6bd4f857b75fbcab8156cdd.cu
#include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/cuda/ForeachFunctors.cuh> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_tensor_list_op(TensorList tensors1, TensorList tensors2, Scalar alpha = 1) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(tensors1.size()); for (const auto& t: tensors1) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), "foreach_binary_op_list_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<3>(tensor_lists, BinaryOpListAlphaFunctor<scalar_t, /* depth */ 3, /* r_args_depth */ 2, /* res_arg_index */ 2>(), Op<opmath_t>(), alpha.to<opmath_t>()); }); return tensor_lists[2]; } template<template<class> class Op> void foreach_tensor_list_op_(TensorList tensors1, TensorList tensors2, Scalar alpha = 1) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), "foreach_binary_op_list_cuda_", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<2>(tensor_lists, BinaryOpListAlphaFunctor<scalar_t, /* depth */ 2, /* r_args_depth */ 2, /* res_arg_index */ 0>(), Op<opmath_t>(), alpha.to<opmath_t>()); }); } #define FOREACH_BINARY_OP_LIST(NAME, OP) \ void foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ \ if (!can_use_fast_route(tensors1, tensors2)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2); \ } \ \ foreach_tensor_list_op_<OP>(tensors1, tensors2); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ \ if (!can_use_fast_route(tensors1, tensors2)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2); \ } \ \ return foreach_tensor_list_op<OP>(tensors1, tensors2); \ } #define FOREACH_BINARY_OP_LIST_ALPHA(NAME, OP) \ void foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2, Scalar alpha) { \ check_foreach_api_restrictions(tensors1, tensors2); \ \ if (!can_use_fast_route(tensors1, tensors2)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2, alpha); \ } \ \ foreach_tensor_list_op_<OP>(tensors1, tensors2, alpha); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2, Scalar alpha) { \ check_foreach_api_restrictions(tensors1, tensors2); \ \ if (!can_use_fast_route(tensors1, tensors2)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2, alpha); \ } \ \ return foreach_tensor_list_op<OP>(tensors1, tensors2, alpha); \ } FOREACH_BINARY_OP_LIST_ALPHA(add, std::plus); FOREACH_BINARY_OP_LIST_ALPHA(sub, std::minus); FOREACH_BINARY_OP_LIST(mul, std::multiplies); FOREACH_BINARY_OP_LIST(div, std::divides); }} // namespace at::native
dc2a4a6904575e52c4778ea10d6dbb5a22c489cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "fft3d.h" void FFT3D :: initialize (Cell * ce) { __global__ void set_kf(double2 * dkf, int nx, int ny, int nz); __global__ void set_ir(int * dir); ngrid = ce -> ngrid; volf = ce -> dv; volb = 1.0 / ce -> volume; b.x = ce -> grid[0]; g.x = ce -> grid[1]; g.y = ce -> grid[2]; hipMalloc(&dkf, ngrid * sizeof(double2)); hipMalloc(&dir, ngrid * sizeof(int)); hipfftPlan3d(&plan, ce -> grid[0], ce -> grid[1], ce -> grid[2], HIPFFT_Z2Z); hipLaunchKernelGGL(( set_kf) , dim3(g), dim3(b) , 0, 0, dkf, ce -> grid[0], ce -> grid[1], ce -> grid[2]); hipLaunchKernelGGL(( set_ir) , dim3(g), dim3(b) , 0, 0, dir); } void FFT3D :: execute (double2 * da, int key) { __global__ void timeirvol(double2 *, const int * __restrict__, double); __global__ void timekf(double2 *, const double2 * __restrict__, const int * __restrict__); __global__ void timekb(double2 *, const double2 * __restrict__, const int * __restrict__, double); if (key == - 1) { hipLaunchKernelGGL(( timekf) , dim3(g), dim3(b) , 0, 0, da, dkf, dir); hipfftExecZ2Z(plan, da, da, HIPFFT_FORWARD); hipLaunchKernelGGL(( timeirvol) , dim3(g), dim3(b) , 0, 0, da, dir, volf); } else { hipLaunchKernelGGL(( timeirvol) , dim3(g), dim3(b) , 0, 0, da, dir, 1.0); hipfftExecZ2Z(plan, da, da, HIPFFT_BACKWARD); hipLaunchKernelGGL(( timekb) , dim3(g), dim3(b) , 0, 0, da, dkf, dir, volb); } } __global__ void timekf(double2 * da, const double2 * __restrict__ dkf, const int * __restrict__ dir) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; double tmpr = da[ip].x * dir[ip]; double tmpi = da[ip].y * dir[ip]; da[ip].x = tmpr * dkf[ip].x - tmpi * dkf[ip].y; da[ip].y = tmpi * dkf[ip].x + tmpr * dkf[ip].y; } __global__ void timekb(double2 * da, const double2 * __restrict__ dkf, const int * __restrict__ dir, double vol) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; double tmpr = da[ip].x * dir[ip] * vol; double tmpi = da[ip].y * dir[ip] * vol; da[ip].x = tmpr * dkf[ip].x + tmpi * dkf[ip].y; da[ip].y = tmpi * dkf[ip].x - tmpr * dkf[ip].y; } __global__ void timeirvol(double2 * da, const int * __restrict__ dir, double vol) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; da[ip].x *= dir[ip] * vol; da[ip].y *= dir[ip] * vol; } __global__ void set_kf(double2 * dkf, int nx, int ny, int nz) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; int x = threadIdx.x - nx / 2.0; int y = blockIdx.x - ny / 2.0; int z = blockIdx.y - nz / 2.0; double dkx = M_PI / nx; double dky = M_PI / ny; double dkz = M_PI / nz; double dkr = dkx * x + dky * y + dkz * z; dkf[ip].x = cos(dkr); dkf[ip].y = - sin(dkr); } __global__ void set_ir(int * dir) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if ((threadIdx.x + blockIdx.x + blockIdx.y) % 2 == 0) { dir[ip] = 1; } else { dir[ip] = -1; } }
dc2a4a6904575e52c4778ea10d6dbb5a22c489cb.cu
#include <iostream> #include "fft3d.h" void FFT3D :: initialize (Cell * ce) { __global__ void set_kf(double2 * dkf, int nx, int ny, int nz); __global__ void set_ir(int * dir); ngrid = ce -> ngrid; volf = ce -> dv; volb = 1.0 / ce -> volume; b.x = ce -> grid[0]; g.x = ce -> grid[1]; g.y = ce -> grid[2]; cudaMalloc(&dkf, ngrid * sizeof(double2)); cudaMalloc(&dir, ngrid * sizeof(int)); cufftPlan3d(&plan, ce -> grid[0], ce -> grid[1], ce -> grid[2], CUFFT_Z2Z); set_kf <<< g, b >>> (dkf, ce -> grid[0], ce -> grid[1], ce -> grid[2]); set_ir <<< g, b >>> (dir); } void FFT3D :: execute (double2 * da, int key) { __global__ void timeirvol(double2 *, const int * __restrict__, double); __global__ void timekf(double2 *, const double2 * __restrict__, const int * __restrict__); __global__ void timekb(double2 *, const double2 * __restrict__, const int * __restrict__, double); if (key == - 1) { timekf <<< g, b >>> (da, dkf, dir); cufftExecZ2Z(plan, da, da, CUFFT_FORWARD); timeirvol <<< g, b >>> (da, dir, volf); } else { timeirvol <<< g, b >>> (da, dir, 1.0); cufftExecZ2Z(plan, da, da, CUFFT_INVERSE); timekb <<< g, b >>> (da, dkf, dir, volb); } } __global__ void timekf(double2 * da, const double2 * __restrict__ dkf, const int * __restrict__ dir) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; double tmpr = da[ip].x * dir[ip]; double tmpi = da[ip].y * dir[ip]; da[ip].x = tmpr * dkf[ip].x - tmpi * dkf[ip].y; da[ip].y = tmpi * dkf[ip].x + tmpr * dkf[ip].y; } __global__ void timekb(double2 * da, const double2 * __restrict__ dkf, const int * __restrict__ dir, double vol) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; double tmpr = da[ip].x * dir[ip] * vol; double tmpi = da[ip].y * dir[ip] * vol; da[ip].x = tmpr * dkf[ip].x + tmpi * dkf[ip].y; da[ip].y = tmpi * dkf[ip].x - tmpr * dkf[ip].y; } __global__ void timeirvol(double2 * da, const int * __restrict__ dir, double vol) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; da[ip].x *= dir[ip] * vol; da[ip].y *= dir[ip] * vol; } __global__ void set_kf(double2 * dkf, int nx, int ny, int nz) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; int x = threadIdx.x - nx / 2.0; int y = blockIdx.x - ny / 2.0; int z = blockIdx.y - nz / 2.0; double dkx = M_PI / nx; double dky = M_PI / ny; double dkz = M_PI / nz; double dkr = dkx * x + dky * y + dkz * z; dkf[ip].x = cos(dkr); dkf[ip].y = - sin(dkr); } __global__ void set_ir(int * dir) { unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if ((threadIdx.x + blockIdx.x + blockIdx.y) % 2 == 0) { dir[ip] = 1; } else { dir[ip] = -1; } }
b06f23758027b8b8b76d70a16f0c6ef294bfebc2.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <iostream> /** forward kernel * c) Implement the forward pass of the Center Surround Convolution * - Write a CUDA-kernel that computes the forward pass form Equation 1. The * kernel should take I, w c , w s, w b and O in form of * torch::PackedTensorAccessor32<scalar t> objects and write into O */ template <typename scalar_t> __global__ void center_surround_convolution_forward_kernel ( const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> I, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_c, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_s, const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> w_b, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> O) { const int o = blockIdx.x * blockDim.x + threadIdx.x;// Calculate this threads output index const int n = blockIdx.y * blockDim.y + threadIdx.y;// Calculate this threads batch index // 28 x 28 Images scalar_t result = 0.0f; idx_o = 29 + (o/27)*28 + o%27; for (int i = -1; i< 2; ++i) { for (int j = -1; j< 2; ++j) { idx = idx_o + i + 28*j; if(idx >= 0 && idx < 784) { if(idx == o) { result += I[n][o] * w_c; } else { result += I[n][o] * w_s; } } } } O[n][o] = result; } /** backward kernels * d) Implement the backward pass of the Center Surround Convolution. * - Write CUDA kernels to compute the partial derivatives dL_dI, dL_dw_c, * dL_dw_s and dL_dw_b by implementing Equations 3 4 5 and 6. */ // dL_dw_c and d_L_dw_s template <typename scalar_t> __global__ void dL_dw_kernel ( const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> dL_dO, const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> I, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> dL_dw_c, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> dL_dw_s) { // TODO compute dL_dw_c and dL_dw_s here } // dL_dw_b template <typename scalar_t> __global__ void dL_dw_b_kernel ( const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> dL_dO, torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> dL_dw_b) { const int i= blockIdx.x* blockDim.x+ threadIdx.x;// Calculate this thread's input index const int n = blockIdx.y* blockDim.y+ threadIdx.y;// Calculate this thread's batch index // TODO compute dL_dw_b here } // dL_dI template <typename scalar_t> __global__ void dL_dI_kernel( const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> dL_dO_padded, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_c, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_s, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> dL_dI) { // TODO your kernel for dL_dI here } #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) /** c) * - Write a c++ function that allocates memory for O and calls your kernel * with appropriate block- and grid dimensions. This function should take I, * w_c, w_s and w_b as torch::Tensor objects and returns a * std::vector<torch::Tensor> the computed O tensor. */ std::vector<torch::Tensor> center_surround_convolution_forward ( torch::Tensor I, torch::Tensor w_c, torch::Tensor w_s, torch::Tensor w_b) { // TODO Use the forward kernel to compute O // - Check inputs // - Allocate Memory for O // - Call the kernel (only for floating types) // - return {O} CHECK_INPUT(I); CHECK_INPUT(w_c); CHECK_INPUT(w_s); CHECK_INPUT(w_b); // 27x27 =729 // batch size 100 const auto batch_size = I.size(0);// Obtain the batch size const auto input_size = I.size(1);// Obtain the I size auto O = torch::empty({batch_size, 729}, input_size.options());// Create an uninitialized output tensor const dim3 block_dim(32, 32);// Use 1024 element blocks const dim3 grid_dim((729 + 31) / 32, (100 + 31) / 32);// Map output elements to x and batch elements to y // -> One thread per calculated output element AT_DISPATCH_FLOATING_TYPES(// Executes the kernel only if I.type() is a floating point type I.type(), "center_surround_convolution", ([&] { // and sets the kernel's template parameter accordingly. hipLaunchKernelGGL(( center_surround_convolution_forward_kernel<scalar_t>), dim3(grid_dim), dim3(block_dim), 0, 0, I.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), w_c.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), w_s.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), w_b.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), O.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>()); })); return {O}; } /** d) * - Write a c++ function that allocates tensors for the derivatives and calls * the kernels to compute * their content. */ std::vector<torch::Tensor> center_surround_convolution_backward ( torch::Tensor dL_dO, torch::Tensor I, torch::Tensor w_c, torch::Tensor w_s, torch::Tensor w_b) { // TODO Use the backward kernels to compute the derivatives // - Check inputs // - Allocate memory for dL_dI, dL_dw_c, dL_dw_s and dL_dw_b // - Call the kernels with correct grid and block sizes // - return {dL_dI, dL_dw_c, dL_dw_s, dL_dw_b}; // XXX: Use this padded version of dL_dO to compute dL_dI auto dL_dO_padded = torch::constant_pad_nd(dL_dO, torch::IntList({2, 2, 2, 2}), 0); return {I, w_c, w_s, w_b}; } /** c) & d) * Export your c++ function to a python module. Call the exported function * forward / backward. */ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("forward", &center_surrond_convolution_forward, "Center-Surround-Convolution TODO documentation string"); m.def("backward", &center_surround_convolution_backward, "Center-Surround-Convolution TODO documentation string"); }
b06f23758027b8b8b76d70a16f0c6ef294bfebc2.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <iostream> /** forward kernel * c) Implement the forward pass of the Center Surround Convolution * - Write a CUDA-kernel that computes the forward pass form Equation 1. The * kernel should take I, w c , w s, w b and O in form of * torch::PackedTensorAccessor32<scalar t> objects and write into O */ template <typename scalar_t> __global__ void center_surround_convolution_forward_kernel ( const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> I, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_c, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_s, const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> w_b, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> O) { const int o = blockIdx.x * blockDim.x + threadIdx.x;// Calculate this threads output index const int n = blockIdx.y * blockDim.y + threadIdx.y;// Calculate this threads batch index // 28 x 28 Images scalar_t result = 0.0f; idx_o = 29 + (o/27)*28 + o%27; for (int i = -1; i< 2; ++i) { for (int j = -1; j< 2; ++j) { idx = idx_o + i + 28*j; if(idx >= 0 && idx < 784) { if(idx == o) { result += I[n][o] * w_c; } else { result += I[n][o] * w_s; } } } } O[n][o] = result; } /** backward kernels * d) Implement the backward pass of the Center Surround Convolution. * - Write CUDA kernels to compute the partial derivatives dL_dI, dL_dw_c, * dL_dw_s and dL_dw_b by implementing Equations 3 4 5 and 6. */ // dL_dw_c and d_L_dw_s template <typename scalar_t> __global__ void dL_dw_kernel ( const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> dL_dO, const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> I, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> dL_dw_c, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> dL_dw_s) { // TODO compute dL_dw_c and dL_dw_s here } // dL_dw_b template <typename scalar_t> __global__ void dL_dw_b_kernel ( const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> dL_dO, torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> dL_dw_b) { const int i= blockIdx.x* blockDim.x+ threadIdx.x;// Calculate this thread's input index const int n = blockIdx.y* blockDim.y+ threadIdx.y;// Calculate this thread's batch index // TODO compute dL_dw_b here } // dL_dI template <typename scalar_t> __global__ void dL_dI_kernel( const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> dL_dO_padded, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_c, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_s, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> dL_dI) { // TODO your kernel for dL_dI here } #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) /** c) * - Write a c++ function that allocates memory for O and calls your kernel * with appropriate block- and grid dimensions. This function should take I, * w_c, w_s and w_b as torch::Tensor objects and returns a * std::vector<torch::Tensor> the computed O tensor. */ std::vector<torch::Tensor> center_surround_convolution_forward ( torch::Tensor I, torch::Tensor w_c, torch::Tensor w_s, torch::Tensor w_b) { // TODO Use the forward kernel to compute O // - Check inputs // - Allocate Memory for O // - Call the kernel (only for floating types) // - return {O} CHECK_INPUT(I); CHECK_INPUT(w_c); CHECK_INPUT(w_s); CHECK_INPUT(w_b); // 27x27 =729 // batch size 100 const auto batch_size = I.size(0);// Obtain the batch size const auto input_size = I.size(1);// Obtain the I size auto O = torch::empty({batch_size, 729}, input_size.options());// Create an uninitialized output tensor const dim3 block_dim(32, 32);// Use 1024 element blocks const dim3 grid_dim((729 + 31) / 32, (100 + 31) / 32);// Map output elements to x and batch elements to y // -> One thread per calculated output element AT_DISPATCH_FLOATING_TYPES(// Executes the kernel only if I.type() is a floating point type I.type(), "center_surround_convolution", ([&] { // and sets the kernel's template parameter accordingly. center_surround_convolution_forward_kernel<scalar_t><<<grid_dim, block_dim>>>( I.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), w_c.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), w_s.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), w_b.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), O.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>()); })); return {O}; } /** d) * - Write a c++ function that allocates tensors for the derivatives and calls * the kernels to compute * their content. */ std::vector<torch::Tensor> center_surround_convolution_backward ( torch::Tensor dL_dO, torch::Tensor I, torch::Tensor w_c, torch::Tensor w_s, torch::Tensor w_b) { // TODO Use the backward kernels to compute the derivatives // - Check inputs // - Allocate memory for dL_dI, dL_dw_c, dL_dw_s and dL_dw_b // - Call the kernels with correct grid and block sizes // - return {dL_dI, dL_dw_c, dL_dw_s, dL_dw_b}; // XXX: Use this padded version of dL_dO to compute dL_dI auto dL_dO_padded = torch::constant_pad_nd(dL_dO, torch::IntList({2, 2, 2, 2}), 0); return {I, w_c, w_s, w_b}; } /** c) & d) * Export your c++ function to a python module. Call the exported function * forward / backward. */ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("forward", &center_surrond_convolution_forward, "Center-Surround-Convolution TODO documentation string"); m.def("backward", &center_surround_convolution_backward, "Center-Surround-Convolution TODO documentation string"); }
f36f72246280c100b1c8665c901882eece303d1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __CUDNN__ #include "Concatenate.hpp" // template class ConcatenateChannelWise<int>; template class ConcatenateChannelWise<float>; // template class ConcatenateChannelWise<double>; __global__ void ConcatenateChannelWise_ForwardPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *result, float *input, int preSize) { for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) { for (int ba = 0; ba < batchsize; ba++) { result[ba * sizeOfResultImg + idx + preSize] = input[ba * sizeOfInputImg + idx]; } } } template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::ForwardPropagateOnGPU(int pTime) { int noBlock = 3, threadsPerBlock = 128; Tensor<DTYPE> *result = this->GetResult(); Tensor<DTYPE> *input = NULL; int timesize = result->GetTimeSize(); int batchsize = result->GetBatchSize(); int channelsize = result->GetChannelSize(); int rowsize = result->GetRowSize(); int colsize = result->GetColSize(); Shape *resultTenShape = result->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *result_gpu = result->GetGPUData(); DTYPE *input_gpu = NULL; int preSize = 0; int inputChannelSize = 0; for (int opnum = 0; opnum < m_noOperator; opnum++) { input = this->GetInput()[opnum]->GetResult(); input_gpu = input->GetGPUData(); inputChannelSize = input->GetChannelSize(); preSize = m_aAccumulate[opnum] * sizeOfPlane; sizeOfInputImg = inputChannelSize * sizeOfPlane; // std::cout << "check" << '\n'; GetKernelParameters(sizeOfInputImg, &noBlock, &threadsPerBlock); // printf("%d, %d\n", noBlock, threadsPerBlock); ConcatenateChannelWise_ForwardPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, result_gpu, input_gpu, preSize); } return TRUE; } __global__ void ConcatenateChannelWise_BackPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *delta_gpu, float *input_delta_gpu, int preSize) { for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) { for (int ba = 0; ba < batchsize; ba++) { input_delta_gpu[ba * sizeOfInputImg + idx] += delta_gpu[ba * sizeOfResultImg + idx + preSize]; } } } template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::BackPropagateOnGPU(int pTime) { Tensor<DTYPE> *this_delta = this->GetDelta(); Tensor<DTYPE> *input_delta = NULL; int timesize = this_delta->GetTimeSize(); int batchsize = this_delta->GetBatchSize(); int channelsize = this_delta->GetChannelSize(); int rowsize = this_delta->GetRowSize(); int colsize = this_delta->GetColSize(); Shape *resultTenShape = this_delta->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *delta_gpu = this_delta->GetGPUData(); DTYPE *input_delta_gpu = NULL; int preSize = 0; int inputChannelSize = 0; for (int opnum = 0; opnum < m_noOperator; opnum++) { input_delta = this->GetInput()[opnum]->GetDelta(); input_delta_gpu = input_delta->GetGPUData(); inputChannelSize = input_delta->GetChannelSize(); preSize = m_aAccumulate[opnum] * sizeOfPlane;; sizeOfInputImg = inputChannelSize * sizeOfPlane; ConcatenateChannelWise_BackPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, delta_gpu, input_delta_gpu, preSize); } return TRUE; } // template class ConcatenateColumnWise<int>; template class ConcatenateColumnWise<float>; // template class ConcatenateColumnWise<double>; //ColumnWise template<typename DTYPE> int ConcatenateColumnWise<DTYPE>::ForwardPropagateOnGPU(int pTime) { int noBlock = 3, threadsPerBlock = 128; Tensor<DTYPE> *result = this->GetResult(); Tensor<DTYPE> *input = NULL; int timesize = result->GetTimeSize(); int batchsize = result->GetBatchSize(); int channelsize = result->GetChannelSize(); int rowsize = result->GetRowSize(); int colsize = result->GetColSize(); Shape *resultTenShape = result->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *result_gpu = result->GetGPUData(pTime); DTYPE *input_gpu = NULL; int preSize = 0; for (int opnum = 0; opnum < m_noOperator; opnum++) { input = this->GetInput()[opnum]->GetResult(); input_gpu = input->GetGPUData(pTime); preSize = m_aAccumulate[opnum]; sizeOfInputImg = input->GetColSize(); GetKernelParameters(sizeOfInputImg, &noBlock, &threadsPerBlock); ConcatenateChannelWise_ForwardPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, result_gpu, input_gpu, preSize); } return TRUE; } template<typename DTYPE> int ConcatenateColumnWise<DTYPE>::BackPropagateOnGPU(int pTime) { Tensor<DTYPE> *this_delta = this->GetDelta(); Tensor<DTYPE> *input_delta = NULL; int timesize = this_delta->GetTimeSize(); int batchsize = this_delta->GetBatchSize(); int channelsize = this_delta->GetChannelSize(); int rowsize = this_delta->GetRowSize(); int colsize = this_delta->GetColSize(); Shape *resultTenShape = this_delta->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *delta_gpu = this_delta->GetGPUData(pTime); DTYPE *input_delta_gpu = NULL; int preSize = 0; int inputChannelSize = 0; for (int opnum = 0; opnum < m_noOperator; opnum++) { input_delta = this->GetInput()[opnum]->GetDelta(); input_delta_gpu = input_delta->GetGPUData(pTime); inputChannelSize = input_delta->GetChannelSize(); preSize = m_aAccumulate[opnum]; sizeOfInputImg = input_delta->GetColSize(); ConcatenateChannelWise_BackPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, delta_gpu, input_delta_gpu, preSize); } return TRUE; } #endif // ifdef __CUDNN__
f36f72246280c100b1c8665c901882eece303d1c.cu
#ifdef __CUDNN__ #include "Concatenate.hpp" // template class ConcatenateChannelWise<int>; template class ConcatenateChannelWise<float>; // template class ConcatenateChannelWise<double>; __global__ void ConcatenateChannelWise_ForwardPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *result, float *input, int preSize) { for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) { for (int ba = 0; ba < batchsize; ba++) { result[ba * sizeOfResultImg + idx + preSize] = input[ba * sizeOfInputImg + idx]; } } } template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::ForwardPropagateOnGPU(int pTime) { int noBlock = 3, threadsPerBlock = 128; Tensor<DTYPE> *result = this->GetResult(); Tensor<DTYPE> *input = NULL; int timesize = result->GetTimeSize(); int batchsize = result->GetBatchSize(); int channelsize = result->GetChannelSize(); int rowsize = result->GetRowSize(); int colsize = result->GetColSize(); Shape *resultTenShape = result->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *result_gpu = result->GetGPUData(); DTYPE *input_gpu = NULL; int preSize = 0; int inputChannelSize = 0; for (int opnum = 0; opnum < m_noOperator; opnum++) { input = this->GetInput()[opnum]->GetResult(); input_gpu = input->GetGPUData(); inputChannelSize = input->GetChannelSize(); preSize = m_aAccumulate[opnum] * sizeOfPlane; sizeOfInputImg = inputChannelSize * sizeOfPlane; // std::cout << "check" << '\n'; GetKernelParameters(sizeOfInputImg, &noBlock, &threadsPerBlock); // printf("%d, %d\n", noBlock, threadsPerBlock); ConcatenateChannelWise_ForwardPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, result_gpu, input_gpu, preSize); } return TRUE; } __global__ void ConcatenateChannelWise_BackPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *delta_gpu, float *input_delta_gpu, int preSize) { for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) { for (int ba = 0; ba < batchsize; ba++) { input_delta_gpu[ba * sizeOfInputImg + idx] += delta_gpu[ba * sizeOfResultImg + idx + preSize]; } } } template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::BackPropagateOnGPU(int pTime) { Tensor<DTYPE> *this_delta = this->GetDelta(); Tensor<DTYPE> *input_delta = NULL; int timesize = this_delta->GetTimeSize(); int batchsize = this_delta->GetBatchSize(); int channelsize = this_delta->GetChannelSize(); int rowsize = this_delta->GetRowSize(); int colsize = this_delta->GetColSize(); Shape *resultTenShape = this_delta->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *delta_gpu = this_delta->GetGPUData(); DTYPE *input_delta_gpu = NULL; int preSize = 0; int inputChannelSize = 0; for (int opnum = 0; opnum < m_noOperator; opnum++) { input_delta = this->GetInput()[opnum]->GetDelta(); input_delta_gpu = input_delta->GetGPUData(); inputChannelSize = input_delta->GetChannelSize(); preSize = m_aAccumulate[opnum] * sizeOfPlane;; sizeOfInputImg = inputChannelSize * sizeOfPlane; ConcatenateChannelWise_BackPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, delta_gpu, input_delta_gpu, preSize); } return TRUE; } // template class ConcatenateColumnWise<int>; template class ConcatenateColumnWise<float>; // template class ConcatenateColumnWise<double>; //ColumnWise template<typename DTYPE> int ConcatenateColumnWise<DTYPE>::ForwardPropagateOnGPU(int pTime) { int noBlock = 3, threadsPerBlock = 128; Tensor<DTYPE> *result = this->GetResult(); Tensor<DTYPE> *input = NULL; int timesize = result->GetTimeSize(); int batchsize = result->GetBatchSize(); int channelsize = result->GetChannelSize(); int rowsize = result->GetRowSize(); int colsize = result->GetColSize(); Shape *resultTenShape = result->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *result_gpu = result->GetGPUData(pTime); DTYPE *input_gpu = NULL; int preSize = 0; for (int opnum = 0; opnum < m_noOperator; opnum++) { input = this->GetInput()[opnum]->GetResult(); input_gpu = input->GetGPUData(pTime); preSize = m_aAccumulate[opnum]; sizeOfInputImg = input->GetColSize(); GetKernelParameters(sizeOfInputImg, &noBlock, &threadsPerBlock); ConcatenateChannelWise_ForwardPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, result_gpu, input_gpu, preSize); } return TRUE; } template<typename DTYPE> int ConcatenateColumnWise<DTYPE>::BackPropagateOnGPU(int pTime) { Tensor<DTYPE> *this_delta = this->GetDelta(); Tensor<DTYPE> *input_delta = NULL; int timesize = this_delta->GetTimeSize(); int batchsize = this_delta->GetBatchSize(); int channelsize = this_delta->GetChannelSize(); int rowsize = this_delta->GetRowSize(); int colsize = this_delta->GetColSize(); Shape *resultTenShape = this_delta->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *delta_gpu = this_delta->GetGPUData(pTime); DTYPE *input_delta_gpu = NULL; int preSize = 0; int inputChannelSize = 0; for (int opnum = 0; opnum < m_noOperator; opnum++) { input_delta = this->GetInput()[opnum]->GetDelta(); input_delta_gpu = input_delta->GetGPUData(pTime); inputChannelSize = input_delta->GetChannelSize(); preSize = m_aAccumulate[opnum]; sizeOfInputImg = input_delta->GetColSize(); ConcatenateChannelWise_BackPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, delta_gpu, input_delta_gpu, preSize); } return TRUE; } #endif // ifdef __CUDNN__
9f4ce78f8e2bb96c049ed9035bc27d0ddeb5a81b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define BOOL int #define TRUE 1 #define FALSE 0 #define populationSize 128 #define chromosomeSize 10 #define maxGeneration 500 #define crossRate 0.8 #define mutationRate 0.01 #define eliteCount 0.05*populationSize //typedef float float; float LB[10] = {0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5}; //lower bound float UB[10] = {5, 4, 5, 4, 5, 5, 5, 5, 5, 4}; //upper bound float *a; //Tzaihe float *aa; //yingliK float *aaa; //Tyingli int aRow; int aaaRow; float Dysum[9]; __device__ float c_LB[10]; //lower bound __device__ float c_UB[10]; //upper bound __device__ float *c_a; //Tzaihe __device__ float *c_aa; //yingliK __device__ float *c_aaa; //Tyingli __device__ int c_aRow; __device__ int c_aaaRow; __device__ float c_Dysum[9]; float bestFitnessOfGen; // int bestIndexOfGen; // float aveFitnessOfGen[maxGeneration]; // float fval; // int G; // //BOOL elitism = TRUE; // __global__ void mutationFcn(float *populationArray, hiprandState_t *states) { //printf("mutationFcn\n"); int idx = threadIdx.x; hiprandState_t s = states[idx]; hiprandState_t t = states[idx]; float ss = hiprand_uniform(&s); int tt = hiprand(&t); float scale = 0.5, shrink = 0.75; scale -= scale * shrink * idx / maxGeneration; // if (ss < mutationRate){ for (int j = 0; j < chromosomeSize; j++) { // if (tt % 2 != 0) { float tmpChromosome; do { tmpChromosome = populationArray[idx * chromosomeSize + j] + scale * (c_UB[j] - c_LB[j]) * ss; // } while (tmpChromosome > c_UB[j] || tmpChromosome < c_LB[j]); populationArray[idx * chromosomeSize + j] = tmpChromosome; } } } }
9f4ce78f8e2bb96c049ed9035bc27d0ddeb5a81b.cu
#include "includes.h" #define BOOL int #define TRUE 1 #define FALSE 0 #define populationSize 128 #define chromosomeSize 10 #define maxGeneration 500 #define crossRate 0.8 #define mutationRate 0.01 #define eliteCount 0.05*populationSize //typedef float float; float LB[10] = {0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5}; //lower bound float UB[10] = {5, 4, 5, 4, 5, 5, 5, 5, 5, 4}; //upper bound float *a; //Tzaihe float *aa; //yingliK float *aaa; //Tyingli int aRow; int aaaRow; float Dysum[9]; __device__ float c_LB[10]; //lower bound __device__ float c_UB[10]; //upper bound __device__ float *c_a; //Tzaihe __device__ float *c_aa; //yingliK __device__ float *c_aaa; //Tyingli __device__ int c_aRow; __device__ int c_aaaRow; __device__ float c_Dysum[9]; float bestFitnessOfGen; //每一代的最优适应度 int bestIndexOfGen; //每一代的最优适应度位置 float aveFitnessOfGen[maxGeneration]; //每一代的平均最优适应度 float fval; //最终最优适应度 int G; //取得最终最优适应度的迭代次数 //BOOL elitism = TRUE; //是否精英选择 __global__ void mutationFcn(float *populationArray, curandState_t *states) { //printf("mutationFcn\n"); int idx = threadIdx.x; curandState_t s = states[idx]; curandState_t t = states[idx]; float ss = curand_uniform(&s); int tt = curand(&t); float scale = 0.5, shrink = 0.75; scale -= scale * shrink * idx / maxGeneration; //判断当前个体是否变异 if (ss < mutationRate){ for (int j = 0; j < chromosomeSize; j++) { //判断当前染色体是否变异 if (tt % 2 != 0) { float tmpChromosome; do { tmpChromosome = populationArray[idx * chromosomeSize + j] + scale * (c_UB[j] - c_LB[j]) * ss; //判断是否越界 } while (tmpChromosome > c_UB[j] || tmpChromosome < c_LB[j]); populationArray[idx * chromosomeSize + j] = tmpChromosome; } } } }
d8c9ed77cf64c31a0a4077e4b40ad7c9ba187d1f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void zmgeellmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; extern __shared__ magmaDoubleComplex dot[]; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; magmaDoubleComplex val = dval [ num_cols_per_row * row + n ]; if( val != 0){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i * num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors hipLaunchKernelGGL(( zmgeellmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
d8c9ed77cf64c31a0a4077e4b40ad7c9ba187d1f.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void zmgeellmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; extern __shared__ magmaDoubleComplex dot[]; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; magmaDoubleComplex val = dval [ num_cols_per_row * row + n ]; if( val != 0){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i * num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors zmgeellmv_kernel<<< grid, threads, MEM_SIZE, queue->cuda_stream() >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
d8a9bd33dc1de75bfd617010d4d6e2ae53285018.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Thomas Heller // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <hpx/local/chrono.hpp> #include <hpx/local/execution.hpp> #include <hpx/local/init.hpp> #include <hpx/modules/async_cuda.hpp> #include <hpx/modules/compute_cuda.hpp> #include <cstddef> #include <iostream> __global__ void dummy() {} int hpx_main(hpx::program_options::variables_map& vm) { std::size_t const iterations = vm["iterations"].as<std::size_t>(); std::size_t const batch_size = 10; std::size_t const batch_iterations = iterations / batch_size; std::size_t const non_batch_iterations = iterations % batch_size; // Get the cuda targets we want to run on hpx::cuda::experimental::target target; // Create the executor hpx::cuda::experimental::default_executor executor(target); // Warmup { auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, ); hpx::cuda::experimental::check_cuda_error( hipStreamSynchronize(cuda_stream)); } double elapsed = timer.elapsed(); std::cout << "native + synchronize (warmup): " << elapsed << '\n'; } { auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, ); hpx::cuda::experimental::check_cuda_error( hipStreamSynchronize(cuda_stream)); } double elapsed = timer.elapsed(); std::cout << "native + synchronize: " << elapsed << '\n'; } { auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { for (std::size_t b = 0; b < batch_size; ++b) { hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, ); } hpx::cuda::experimental::check_cuda_error( hipStreamSynchronize(cuda_stream)); } for (std::size_t i = 0; i < non_batch_iterations; ++i) { hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, ); } hpx::cuda::experimental::check_cuda_error( hipStreamSynchronize(cuda_stream)); double elapsed = timer.elapsed(); std::cout << "native + synchronize batched: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.sync_execute([] HPX_DEVICE() {}); } double elapsed = timer.elapsed(); std::cout << "executor.sync_execute([](){}): " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.post([] HPX_DEVICE() {}); target.synchronize(); } double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + synchronize: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { for (std::size_t b = 0; b < batch_size; ++b) { executor.post([] HPX_DEVICE() {}); } target.synchronize(); } for (std::size_t i = 0; i < non_batch_iterations; ++i) { executor.post([] HPX_DEVICE() {}); } target.synchronize(); double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + synchronize batched: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.post([] HPX_DEVICE() {}); target.get_future_with_callback().get(); } double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + get_future() callback: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { for (std::size_t b = 0; b < batch_size; ++b) { executor.post([] HPX_DEVICE() {}); } target.get_future_with_callback().get(); } for (std::size_t i = 0; i < non_batch_iterations; ++i) { executor.post([] HPX_DEVICE() {}); } target.get_future_with_callback().get(); double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + get_future() callback batched: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.post([] HPX_DEVICE() {}); target.get_future_with_event().get(); } double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + get_future() event: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { for (std::size_t b = 0; b < batch_size; ++b) { executor.post([] HPX_DEVICE() {}); } target.get_future_with_event().get(); } for (std::size_t i = 0; i < non_batch_iterations; ++i) { executor.post([] HPX_DEVICE() {}); } target.get_future_with_event().get(); double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + get_future() event batched: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.async_execute([] HPX_DEVICE() {}).get(); } double elapsed = timer.elapsed(); std::cout << "executor.async_execute([](){}).get(): " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { hpx::future<void> f; for (std::size_t b = 0; b < batch_size; ++b) { f = executor.async_execute([] HPX_DEVICE() {}); } f.get(); } hpx::future<void> f; for (std::size_t i = 0; i < non_batch_iterations; ++i) { f = executor.async_execute([] HPX_DEVICE() {}); } f.get(); double elapsed = timer.elapsed(); std::cout << "executor.async_execute([](){}).get() batched: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](hipStream_t cuda_stream) { hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, ); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](hipStream_t cuda_stream) { hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, ); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { // We have to manually unroll this loop, because the type of the // sender changes for each additional transform_stream call. The // number of unrolled calls must match batch_size above. cu::transform_stream(ex::just(), f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | ex::sync_wait(); } // Do the remainder one-by-one for (std::size_t i = 0; i < non_batch_iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream batched: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](hipStream_t cuda_stream) { hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, ); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { // We have to manually unroll this loop, because the type of the // sender changes for each additional transform_stream call. The // number of unrolled calls must match batch_size above. Here we // intentionally insert dummy transform([]{}) calls between the // transform_stream calls to force synchronization between the // kernel launches. cu::transform_stream(ex::just(), f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::sync_wait(); } // Do the remainder one-by-one for (std::size_t i = 0; i < non_batch_iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream force synchronize batched: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](hipStream_t cuda_stream) { hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, ); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::on(ex::thread_pool_scheduler{}) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream with on: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](hipStream_t cuda_stream) { hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, ); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { // We have to manually unroll this loop, because the type of the // sender changes for each additional transform_stream call. The // number of unrolled calls must match batch_size above. cu::transform_stream(ex::just(), f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | ex::on(ex::thread_pool_scheduler{}) | ex::sync_wait(); } // Do the remainder one-by-one for (std::size_t i = 0; i < non_batch_iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::on(ex::thread_pool_scheduler{}) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream with on batched: " << elapsed << '\n'; } return hpx::local::finalize(); } int main(int argc, char* argv[]) { using namespace hpx::program_options; options_description cmdline("usage: " HPX_APPLICATION_STRING " [options]"); cmdline.add_options()("iterations", hpx::program_options::value<std::size_t>()->default_value(1024), "number of iterations (default: 1024)"); hpx::local::init_params init_args; init_args.desc_cmdline = cmdline; return hpx::local::init(hpx_main, argc, argv, init_args); }
d8a9bd33dc1de75bfd617010d4d6e2ae53285018.cu
// Copyright (c) 2017 Thomas Heller // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <hpx/local/chrono.hpp> #include <hpx/local/execution.hpp> #include <hpx/local/init.hpp> #include <hpx/modules/async_cuda.hpp> #include <hpx/modules/compute_cuda.hpp> #include <cstddef> #include <iostream> __global__ void dummy() {} int hpx_main(hpx::program_options::variables_map& vm) { std::size_t const iterations = vm["iterations"].as<std::size_t>(); std::size_t const batch_size = 10; std::size_t const batch_iterations = iterations / batch_size; std::size_t const non_batch_iterations = iterations % batch_size; // Get the cuda targets we want to run on hpx::cuda::experimental::target target; // Create the executor hpx::cuda::experimental::default_executor executor(target); // Warmup { auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { dummy<<<1, 1, 0, cuda_stream>>>(); hpx::cuda::experimental::check_cuda_error( cudaStreamSynchronize(cuda_stream)); } double elapsed = timer.elapsed(); std::cout << "native + synchronize (warmup): " << elapsed << '\n'; } { auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { dummy<<<1, 1, 0, cuda_stream>>>(); hpx::cuda::experimental::check_cuda_error( cudaStreamSynchronize(cuda_stream)); } double elapsed = timer.elapsed(); std::cout << "native + synchronize: " << elapsed << '\n'; } { auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { for (std::size_t b = 0; b < batch_size; ++b) { dummy<<<1, 1, 0, cuda_stream>>>(); } hpx::cuda::experimental::check_cuda_error( cudaStreamSynchronize(cuda_stream)); } for (std::size_t i = 0; i < non_batch_iterations; ++i) { dummy<<<1, 1, 0, cuda_stream>>>(); } hpx::cuda::experimental::check_cuda_error( cudaStreamSynchronize(cuda_stream)); double elapsed = timer.elapsed(); std::cout << "native + synchronize batched: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.sync_execute([] HPX_DEVICE() {}); } double elapsed = timer.elapsed(); std::cout << "executor.sync_execute([](){}): " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.post([] HPX_DEVICE() {}); target.synchronize(); } double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + synchronize: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { for (std::size_t b = 0; b < batch_size; ++b) { executor.post([] HPX_DEVICE() {}); } target.synchronize(); } for (std::size_t i = 0; i < non_batch_iterations; ++i) { executor.post([] HPX_DEVICE() {}); } target.synchronize(); double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + synchronize batched: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.post([] HPX_DEVICE() {}); target.get_future_with_callback().get(); } double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + get_future() callback: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { for (std::size_t b = 0; b < batch_size; ++b) { executor.post([] HPX_DEVICE() {}); } target.get_future_with_callback().get(); } for (std::size_t i = 0; i < non_batch_iterations; ++i) { executor.post([] HPX_DEVICE() {}); } target.get_future_with_callback().get(); double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + get_future() callback batched: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.post([] HPX_DEVICE() {}); target.get_future_with_event().get(); } double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + get_future() event: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { for (std::size_t b = 0; b < batch_size; ++b) { executor.post([] HPX_DEVICE() {}); } target.get_future_with_event().get(); } for (std::size_t i = 0; i < non_batch_iterations; ++i) { executor.post([] HPX_DEVICE() {}); } target.get_future_with_event().get(); double elapsed = timer.elapsed(); std::cout << "executor.post([](){}) + get_future() event batched: " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { executor.async_execute([] HPX_DEVICE() {}).get(); } double elapsed = timer.elapsed(); std::cout << "executor.async_execute([](){}).get(): " << elapsed << '\n'; } { hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { hpx::future<void> f; for (std::size_t b = 0; b < batch_size; ++b) { f = executor.async_execute([] HPX_DEVICE() {}); } f.get(); } hpx::future<void> f; for (std::size_t i = 0; i < non_batch_iterations; ++i) { f = executor.async_execute([] HPX_DEVICE() {}); } f.get(); double elapsed = timer.elapsed(); std::cout << "executor.async_execute([](){}).get() batched: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](cudaStream_t cuda_stream) { dummy<<<1, 1, 0, cuda_stream>>>(); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](cudaStream_t cuda_stream) { dummy<<<1, 1, 0, cuda_stream>>>(); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { // We have to manually unroll this loop, because the type of the // sender changes for each additional transform_stream call. The // number of unrolled calls must match batch_size above. cu::transform_stream(ex::just(), f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | ex::sync_wait(); } // Do the remainder one-by-one for (std::size_t i = 0; i < non_batch_iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream batched: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](cudaStream_t cuda_stream) { dummy<<<1, 1, 0, cuda_stream>>>(); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { // We have to manually unroll this loop, because the type of the // sender changes for each additional transform_stream call. The // number of unrolled calls must match batch_size above. Here we // intentionally insert dummy transform([]{}) calls between the // transform_stream calls to force synchronization between the // kernel launches. cu::transform_stream(ex::just(), f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::transform([] {}) | cu::transform_stream(f, cuda_stream) | ex::sync_wait(); } // Do the remainder one-by-one for (std::size_t i = 0; i < non_batch_iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream force synchronize batched: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](cudaStream_t cuda_stream) { dummy<<<1, 1, 0, cuda_stream>>>(); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i != iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::on(ex::thread_pool_scheduler{}) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream with on: " << elapsed << '\n'; } { hpx::cuda::experimental::enable_user_polling poll("default"); namespace ex = hpx::execution::experimental; namespace cu = hpx::cuda::experimental; auto const f = [](cudaStream_t cuda_stream) { dummy<<<1, 1, 0, cuda_stream>>>(); }; auto cuda_stream = target.native_handle().get_stream(); hpx::chrono::high_resolution_timer timer; for (std::size_t i = 0; i < batch_iterations; ++i) { // We have to manually unroll this loop, because the type of the // sender changes for each additional transform_stream call. The // number of unrolled calls must match batch_size above. cu::transform_stream(ex::just(), f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | cu::transform_stream(f, cuda_stream) | ex::on(ex::thread_pool_scheduler{}) | ex::sync_wait(); } // Do the remainder one-by-one for (std::size_t i = 0; i < non_batch_iterations; ++i) { cu::transform_stream(ex::just(), f, cuda_stream) | ex::on(ex::thread_pool_scheduler{}) | ex::sync_wait(); } double elapsed = timer.elapsed(); std::cout << "transform_stream with on batched: " << elapsed << '\n'; } return hpx::local::finalize(); } int main(int argc, char* argv[]) { using namespace hpx::program_options; options_description cmdline("usage: " HPX_APPLICATION_STRING " [options]"); cmdline.add_options()("iterations", hpx::program_options::value<std::size_t>()->default_value(1024), "number of iterations (default: 1024)"); hpx::local::init_params init_args; init_args.desc_cmdline = cmdline; return hpx::local::init(hpx_main, argc, argv, init_args); }
37516fea298d54b02161b15b8645d4410c1f1029.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <array> #include "paddle/fluid/framework/conv_search_cache.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/operators/math/padding.h" #include "paddle/fluid/platform/cudnn_helper.h" DECLARE_int64(cudnn_exhaustive_search_times); namespace paddle { namespace operators { #if CUDNN_VERSION >= 7100 using Tensor = framework::Tensor; using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using ScopedActivationDescriptor = platform::ScopedActivationDescriptor; using DataLayout = platform::DataLayout; using framework::AlgorithmsCache; using framework::ConvSearchCache; template <typename T> using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType; template <typename T> class CUDNNConvFusionOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto* input = ctx.Input<Tensor>("Input"); auto* filter = ctx.Input<Tensor>("Filter"); auto* bias = ctx.Input<Tensor>("Bias"); auto* residual = ctx.Input<Tensor>("ResidualData"); auto* output = ctx.Output<Tensor>("Output"); output->mutable_data<T>(ctx.GetPlace()); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); const std::string activation = ctx.Attr<std::string>("activation"); int groups = ctx.Attr<int>("groups"); int64_t user_workspace_size = static_cast<size_t>(ctx.Attr<int>("workspace_size_MB")); bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search"); const T* filter_data = filter->data<T>(); const T* bias_data = bias->data<T>(); const std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); Tensor transformed_input_channel(input->type()); Tensor transformed_output(output->type()); transformed_input_channel = *input; transformed_output = *output; T* output_data = transformed_output.data<T>(); const T* residual_data = residual ? residual->data<T>() : output_data; // update padding and dilation auto in_dims = transformed_input_channel.dims(); auto filter_dims = filter->dims(); framework::DDim in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_input; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_input_channel.dims()[0]; new_input_shape_vec[1] = transformed_input_channel.dims()[1]; std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0); for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_input_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_input = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); const int rank = transformed_input_channel.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; default: PADDLE_THROW(platform::errors::PermissionDenied( "Operator Conv2DFusion expects Input to be a 4-D or 5-D Tensor. " "But recieved the actual dimension = %d, shape = [%s].", rank, transformed_input_channel.dims())); } } else { transformed_input = transformed_input_channel; if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = transformed_input.data<T>(); // ------------------- cudnn descriptors --------------------- ScopedTensorDescriptor input_desc; ScopedTensorDescriptor output_desc; ScopedFilterDescriptor filter_desc; ScopedTensorDescriptor bias_desc; ScopedConvolutionDescriptor conv_desc; ScopedActivationDescriptor act_desc; DataLayout layout = DataLayout::kNCHW; if (input->dims().size() == 5) { layout = DataLayout::kNCDHW; } cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor<T>(padding_common, strides, dilations); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnSetConvolutionGroupCount(cudnn_conv_desc, groups)); cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>( layout, framework::vectorize<int>(transformed_input.dims())); cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>( layout, framework::vectorize<int>(transformed_output.dims())); cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>( layout, framework::vectorize<int>(filter->dims())); // Now only support NCHW std::vector<int> bias_dim = { 1, static_cast<int>(transformed_output.dims()[1]), 1, 1}; cudnnTensorDescriptor_t cudnn_bias_desc = bias_desc.descriptor<T>(layout, bias_dim); cudnnActivationDescriptor_t cudnn_act_desc = act_desc.descriptor<T>(activation); // ------------------- cudnn conv workspace --------------------- size_t workspace_size_in_bytes; // final workspace to allocate. size_t workspace_size_limit = 0; if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) { int64_t max_user_size = ::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit), user_workspace_size); workspace_size_limit = max_user_size * 1024 * 1024; } // ------------------- cudnn conv algorithm --------------------- cudnnConvolutionFwdAlgo_t algo; auto handle = dev_ctx.cudnn_handle(); auto workspace_handle = dev_ctx.cudnn_workspace_handle(); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( cudnn_conv_desc, CUDNN_DEFAULT_MATH)); auto x_dims = framework::vectorize(transformed_input.dims()); auto f_dims = framework::vectorize(filter->dims()); if (!exhaustive_search) { int perf_count; int best_algo_idx = 0; size_t tmp_size = 0; std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results( new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, kNUM_CUDNN_FWD_ALGS, &perf_count, perf_results.get())); algo = (perf_results.get())[best_algo_idx].algo; VLOG(3) << "cuDNN forward algo " << algo; } else { std::function<cudnnConvolutionFwdAlgo_t()> search_func = [&]() -> cudnnConvolutionFwdAlgo_t { int returned_algo_count; std::array<cudnnConvolutionFwdAlgoPerf_t, kNUM_CUDNN_FWD_ALGS> fwd_perf_stat; auto cudnn_find_func = [&](void* cudnn_workspace) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( handle, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, cudnn_output_desc, output_data, kNUM_CUDNN_FWD_ALGS, &returned_algo_count, fwd_perf_stat.data(), cudnn_workspace, workspace_size_limit)); }; workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit); VLOG(3) << "Perf result: (algo: stat, time, memory)"; for (int i = 0; i < returned_algo_count; ++i) { const auto& stat = fwd_perf_stat[i]; VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time << " " << stat.memory; } return fwd_perf_stat[0].algo; }; AlgorithmsCache<cudnnConvolutionFwdAlgo_t>& algo_cache = *(framework::ConvSearchCache::Instance().GetConvFusion()); int search_times = ctx.Attr<int>("search_times"); search_times = ::max( static_cast<int>(FLAGS_cudnn_exhaustive_search_times), search_times); // TODO(dangqingqing): Unify this if-else. if (search_times > 0) { // The searched algo will be cached by `search_times` times for // different input dimension. For other dimensions, select the algo // of closest area. algo = algo_cache.GetAlgorithm(x_dims[2] * x_dims[3], search_times, 0, search_func); } else { auto dtype = platform::CudnnDataType<T>::type; algo = algo_cache.GetAlgorithm(x_dims, f_dims, strides, paddings, dilations, 0, dtype, search_func); } VLOG(3) << "choose algo " << algo; } PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); PADDLE_ENFORCE_LE( workspace_size_in_bytes, workspace_size_limit, platform::errors::InvalidArgument( "The actual workspace size to be allocated for cuDNN is expected " "to be less than the limit. But recieved: the actual workspace " "size = %d, limit = %d.", workspace_size_in_bytes, workspace_size_limit)); if ((activation == "identity") && (!residual)) { // Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is // enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib. // But test in some case, the speed is slower, change to use // cudnnConvolutionForward and cudnnAddTensor // ------------- cudnn conv forward and bias add --------------------- ScalingParamType<T> alpha = 1.0f, beta = 0.0f; auto cudnn_func = [&](void* cudnn_workspace) { PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnConvolutionForward( handle, &alpha, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_output_desc, output_data)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnAddTensor( handle, &alpha, cudnn_bias_desc, bias_data, &alpha, cudnn_output_desc, output_data)); } else { if (activation == "identity") { algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; } // ------------------- cudnn conv+bias+act forward -------------------- ScalingParamType<T> alpha1 = 1.0f; ScalingParamType<T> alpha2 = residual ? 1.0f : 0.0f; auto cudnn_func = [&](void* cudnn_workspace) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBiasActivationForward( handle, &alpha1, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, &alpha2, cudnn_output_desc, residual_data, cudnn_bias_desc, bias_data, cudnn_act_desc, cudnn_output_desc, output_data)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } std::vector<int> channels = ctx.Attr<std::vector<int>>("split_channels"); if (channels.size()) { auto outs = ctx.MultiOutput<framework::Tensor>("Outputs"); if (x_dims[0] == 1) { // share data with Output framework::Tensor t; t.ShareDataWith(*output); auto y_dims = output->dims(); t.Resize({y_dims[1], y_dims[2], y_dims[3]}); int s = 0; for (size_t i = 0; i < channels.size(); ++i) { int e = s + channels[i]; outs[i]->ShareDataWith(t.Slice(s, e)); outs[i]->Resize({x_dims[0], channels[i], y_dims[2], y_dims[3]}); s = e; } } else { // TODO(qingiqng): do copy when batch size large than 1 PADDLE_THROW(platform::errors::Unimplemented( "Input with batch size greater than 1 is unsupported. The recieved " "batch size is %d, Input's shape is [%s].", x_dims[0], framework::make_ddim(x_dims))); } } } }; #endif } // namespace operators } // namespace paddle #if CUDNN_VERSION >= 7100 namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(conv2d_fusion, ops::CUDNNConvFusionOpKernel<float>, ops::CUDNNConvFusionOpKernel<double>); #endif
37516fea298d54b02161b15b8645d4410c1f1029.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <array> #include "paddle/fluid/framework/conv_search_cache.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/operators/math/padding.h" #include "paddle/fluid/platform/cudnn_helper.h" DECLARE_int64(cudnn_exhaustive_search_times); namespace paddle { namespace operators { #if CUDNN_VERSION >= 7100 using Tensor = framework::Tensor; using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using ScopedActivationDescriptor = platform::ScopedActivationDescriptor; using DataLayout = platform::DataLayout; using framework::AlgorithmsCache; using framework::ConvSearchCache; template <typename T> using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType; template <typename T> class CUDNNConvFusionOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto* input = ctx.Input<Tensor>("Input"); auto* filter = ctx.Input<Tensor>("Filter"); auto* bias = ctx.Input<Tensor>("Bias"); auto* residual = ctx.Input<Tensor>("ResidualData"); auto* output = ctx.Output<Tensor>("Output"); output->mutable_data<T>(ctx.GetPlace()); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); const std::string activation = ctx.Attr<std::string>("activation"); int groups = ctx.Attr<int>("groups"); int64_t user_workspace_size = static_cast<size_t>(ctx.Attr<int>("workspace_size_MB")); bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search"); const T* filter_data = filter->data<T>(); const T* bias_data = bias->data<T>(); const std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); Tensor transformed_input_channel(input->type()); Tensor transformed_output(output->type()); transformed_input_channel = *input; transformed_output = *output; T* output_data = transformed_output.data<T>(); const T* residual_data = residual ? residual->data<T>() : output_data; // update padding and dilation auto in_dims = transformed_input_channel.dims(); auto filter_dims = filter->dims(); framework::DDim in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_input; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_input_channel.dims()[0]; new_input_shape_vec[1] = transformed_input_channel.dims()[1]; std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0); for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_input_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_input = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); const int rank = transformed_input_channel.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; default: PADDLE_THROW(platform::errors::PermissionDenied( "Operator Conv2DFusion expects Input to be a 4-D or 5-D Tensor. " "But recieved the actual dimension = %d, shape = [%s].", rank, transformed_input_channel.dims())); } } else { transformed_input = transformed_input_channel; if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = transformed_input.data<T>(); // ------------------- cudnn descriptors --------------------- ScopedTensorDescriptor input_desc; ScopedTensorDescriptor output_desc; ScopedFilterDescriptor filter_desc; ScopedTensorDescriptor bias_desc; ScopedConvolutionDescriptor conv_desc; ScopedActivationDescriptor act_desc; DataLayout layout = DataLayout::kNCHW; if (input->dims().size() == 5) { layout = DataLayout::kNCDHW; } cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor<T>(padding_common, strides, dilations); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnSetConvolutionGroupCount(cudnn_conv_desc, groups)); cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>( layout, framework::vectorize<int>(transformed_input.dims())); cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>( layout, framework::vectorize<int>(transformed_output.dims())); cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>( layout, framework::vectorize<int>(filter->dims())); // Now only support NCHW std::vector<int> bias_dim = { 1, static_cast<int>(transformed_output.dims()[1]), 1, 1}; cudnnTensorDescriptor_t cudnn_bias_desc = bias_desc.descriptor<T>(layout, bias_dim); cudnnActivationDescriptor_t cudnn_act_desc = act_desc.descriptor<T>(activation); // ------------------- cudnn conv workspace --------------------- size_t workspace_size_in_bytes; // final workspace to allocate. size_t workspace_size_limit = 0; if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) { int64_t max_user_size = std::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit), user_workspace_size); workspace_size_limit = max_user_size * 1024 * 1024; } // ------------------- cudnn conv algorithm --------------------- cudnnConvolutionFwdAlgo_t algo; auto handle = dev_ctx.cudnn_handle(); auto workspace_handle = dev_ctx.cudnn_workspace_handle(); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( cudnn_conv_desc, CUDNN_DEFAULT_MATH)); auto x_dims = framework::vectorize(transformed_input.dims()); auto f_dims = framework::vectorize(filter->dims()); if (!exhaustive_search) { int perf_count; int best_algo_idx = 0; size_t tmp_size = 0; std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results( new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, kNUM_CUDNN_FWD_ALGS, &perf_count, perf_results.get())); algo = (perf_results.get())[best_algo_idx].algo; VLOG(3) << "cuDNN forward algo " << algo; } else { std::function<cudnnConvolutionFwdAlgo_t()> search_func = [&]() -> cudnnConvolutionFwdAlgo_t { int returned_algo_count; std::array<cudnnConvolutionFwdAlgoPerf_t, kNUM_CUDNN_FWD_ALGS> fwd_perf_stat; auto cudnn_find_func = [&](void* cudnn_workspace) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( handle, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, cudnn_output_desc, output_data, kNUM_CUDNN_FWD_ALGS, &returned_algo_count, fwd_perf_stat.data(), cudnn_workspace, workspace_size_limit)); }; workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit); VLOG(3) << "Perf result: (algo: stat, time, memory)"; for (int i = 0; i < returned_algo_count; ++i) { const auto& stat = fwd_perf_stat[i]; VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time << " " << stat.memory; } return fwd_perf_stat[0].algo; }; AlgorithmsCache<cudnnConvolutionFwdAlgo_t>& algo_cache = *(framework::ConvSearchCache::Instance().GetConvFusion()); int search_times = ctx.Attr<int>("search_times"); search_times = std::max( static_cast<int>(FLAGS_cudnn_exhaustive_search_times), search_times); // TODO(dangqingqing): Unify this if-else. if (search_times > 0) { // The searched algo will be cached by `search_times` times for // different input dimension. For other dimensions, select the algo // of closest area. algo = algo_cache.GetAlgorithm(x_dims[2] * x_dims[3], search_times, 0, search_func); } else { auto dtype = platform::CudnnDataType<T>::type; algo = algo_cache.GetAlgorithm(x_dims, f_dims, strides, paddings, dilations, 0, dtype, search_func); } VLOG(3) << "choose algo " << algo; } PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); PADDLE_ENFORCE_LE( workspace_size_in_bytes, workspace_size_limit, platform::errors::InvalidArgument( "The actual workspace size to be allocated for cuDNN is expected " "to be less than the limit. But recieved: the actual workspace " "size = %d, limit = %d.", workspace_size_in_bytes, workspace_size_limit)); if ((activation == "identity") && (!residual)) { // Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is // enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib. // But test in some case, the speed is slower, change to use // cudnnConvolutionForward and cudnnAddTensor // ------------- cudnn conv forward and bias add --------------------- ScalingParamType<T> alpha = 1.0f, beta = 0.0f; auto cudnn_func = [&](void* cudnn_workspace) { PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnConvolutionForward( handle, &alpha, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_output_desc, output_data)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnAddTensor( handle, &alpha, cudnn_bias_desc, bias_data, &alpha, cudnn_output_desc, output_data)); } else { if (activation == "identity") { algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; } // ------------------- cudnn conv+bias+act forward -------------------- ScalingParamType<T> alpha1 = 1.0f; ScalingParamType<T> alpha2 = residual ? 1.0f : 0.0f; auto cudnn_func = [&](void* cudnn_workspace) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBiasActivationForward( handle, &alpha1, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, &alpha2, cudnn_output_desc, residual_data, cudnn_bias_desc, bias_data, cudnn_act_desc, cudnn_output_desc, output_data)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } std::vector<int> channels = ctx.Attr<std::vector<int>>("split_channels"); if (channels.size()) { auto outs = ctx.MultiOutput<framework::Tensor>("Outputs"); if (x_dims[0] == 1) { // share data with Output framework::Tensor t; t.ShareDataWith(*output); auto y_dims = output->dims(); t.Resize({y_dims[1], y_dims[2], y_dims[3]}); int s = 0; for (size_t i = 0; i < channels.size(); ++i) { int e = s + channels[i]; outs[i]->ShareDataWith(t.Slice(s, e)); outs[i]->Resize({x_dims[0], channels[i], y_dims[2], y_dims[3]}); s = e; } } else { // TODO(qingiqng): do copy when batch size large than 1 PADDLE_THROW(platform::errors::Unimplemented( "Input with batch size greater than 1 is unsupported. The recieved " "batch size is %d, Input's shape is [%s].", x_dims[0], framework::make_ddim(x_dims))); } } } }; #endif } // namespace operators } // namespace paddle #if CUDNN_VERSION >= 7100 namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(conv2d_fusion, ops::CUDNNConvFusionOpKernel<float>, ops::CUDNNConvFusionOpKernel<double>); #endif
85037892f14639402475fa2b4cfe78a63a7a545c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vector_lgamma.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const REAL *x = NULL; hipMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int stride_x = 1; REAL *y = NULL; hipMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int stride_y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vector_lgamma), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vector_lgamma), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vector_lgamma), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
85037892f14639402475fa2b4cfe78a63a7a545c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vector_lgamma.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const REAL *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int stride_x = 1; REAL *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int stride_y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vector_lgamma<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vector_lgamma<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vector_lgamma<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0943f2d85e0ac80dd317ff8b35b52bd21db97643.hip
// !!! This is a file automatically generated by hipify!!! extern "C" { #include "im2col.h" #include "hip/hip_runtime.h" } // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ksize; ++i) { for (int j = 0; j < ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; } } } } void im2col_ongpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col){ // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. pad = pad ? ksize/2 : 0; int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK), dim3(BLOCK), 0, 0, num_kernels, im, height, width, ksize, pad, stride, height_col, width_col, data_col); } /* __global__ void im2col_pad_kernel(float *im, int channels, int height, int width, int ksize, int stride, float *data_col) { int c,h,w; int height_col = 1 + (height-1) / stride; int width_col = 1 + (width-1) / stride; int channels_col = channels * ksize * ksize; int pad = ksize/2; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int col_size = height_col*width_col*channels_col; if (id >= col_size) return; int col_index = id; w = id % width_col; id /= width_col; h = id % height_col; id /= height_col; c = id % channels_col; id /= channels_col; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int im_channel = c / ksize / ksize; int im_row = h_offset + h * stride - pad; int im_col = w_offset + w * stride - pad; int im_index = im_col + width*(im_row + height*im_channel); float val = (im_row < 0 || im_col < 0 || im_row >= height || im_col >= width) ? 0 : im[im_index]; data_col[col_index] = val; } __global__ void im2col_nopad_kernel(float *im, int channels, int height, int width, int ksize, int stride, float *data_col) { int c,h,w; int height_col = (height - ksize) / stride + 1; int width_col = (width - ksize) / stride + 1; int channels_col = channels * ksize * ksize; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int col_size = height_col*width_col*channels_col; if (id >= col_size) return; int col_index = id; w = id % width_col; id /= width_col; h = id % height_col; id /= height_col; c = id % channels_col; id /= channels_col; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int im_channel = c / ksize / ksize; int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int im_index = im_col + width*(im_row + height*im_channel); float val = (im_row < 0 || im_col < 0 || im_row >= height || im_col >= width) ? 0 : im[im_index]; data_col[col_index] = val; } extern "C" void im2col_ongpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col) { int height_col = (height - ksize) / stride + 1; int width_col = (width - ksize) / stride + 1; int channels_col = channels * ksize * ksize; if (pad){ height_col = 1 + (height-1) / stride; width_col = 1 + (width-1) / stride; } size_t n = channels_col*height_col*width_col; if(pad)im2col_pad_kernel<<<cuda_gridsize(n),BLOCK>>>(im, channels, height, width, ksize, stride, data_col); else im2col_nopad_kernel<<<cuda_gridsize(n),BLOCK>>>(im, channels, height, width, ksize, stride, data_col); check_error(hipPeekAtLastError()); } */
0943f2d85e0ac80dd317ff8b35b52bd21db97643.cu
extern "C" { #include "im2col.h" #include "cuda.h" } // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ksize; ++i) { for (int j = 0; j < ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; } } } } void im2col_ongpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col){ // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. pad = pad ? ksize/2 : 0; int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK, BLOCK>>>( num_kernels, im, height, width, ksize, pad, stride, height_col, width_col, data_col); } /* __global__ void im2col_pad_kernel(float *im, int channels, int height, int width, int ksize, int stride, float *data_col) { int c,h,w; int height_col = 1 + (height-1) / stride; int width_col = 1 + (width-1) / stride; int channels_col = channels * ksize * ksize; int pad = ksize/2; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int col_size = height_col*width_col*channels_col; if (id >= col_size) return; int col_index = id; w = id % width_col; id /= width_col; h = id % height_col; id /= height_col; c = id % channels_col; id /= channels_col; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int im_channel = c / ksize / ksize; int im_row = h_offset + h * stride - pad; int im_col = w_offset + w * stride - pad; int im_index = im_col + width*(im_row + height*im_channel); float val = (im_row < 0 || im_col < 0 || im_row >= height || im_col >= width) ? 0 : im[im_index]; data_col[col_index] = val; } __global__ void im2col_nopad_kernel(float *im, int channels, int height, int width, int ksize, int stride, float *data_col) { int c,h,w; int height_col = (height - ksize) / stride + 1; int width_col = (width - ksize) / stride + 1; int channels_col = channels * ksize * ksize; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int col_size = height_col*width_col*channels_col; if (id >= col_size) return; int col_index = id; w = id % width_col; id /= width_col; h = id % height_col; id /= height_col; c = id % channels_col; id /= channels_col; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int im_channel = c / ksize / ksize; int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int im_index = im_col + width*(im_row + height*im_channel); float val = (im_row < 0 || im_col < 0 || im_row >= height || im_col >= width) ? 0 : im[im_index]; data_col[col_index] = val; } extern "C" void im2col_ongpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col) { int height_col = (height - ksize) / stride + 1; int width_col = (width - ksize) / stride + 1; int channels_col = channels * ksize * ksize; if (pad){ height_col = 1 + (height-1) / stride; width_col = 1 + (width-1) / stride; } size_t n = channels_col*height_col*width_col; if(pad)im2col_pad_kernel<<<cuda_gridsize(n),BLOCK>>>(im, channels, height, width, ksize, stride, data_col); else im2col_nopad_kernel<<<cuda_gridsize(n),BLOCK>>>(im, channels, height, width, ksize, stride, data_col); check_error(cudaPeekAtLastError()); } */
f9e18408139cd20b8187ac9193562830e7100fbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************** * BenchIT - Performance Measurement for Scientific Applications * Contact: [email protected] * * For license details see COPYING in the package base directory *******************************************************************/ #include "work.h" #include "tools/repeat.h" __global__ void testLatency(uint* myArray, int arrayLength, int repeatCt, int its, uint* duration){ extern __shared__ uint sharedMem[]; //Fill array for(int i=0; i < arrayLength; i++) sharedMem[i] = myArray[i]; uint start, end; /*unsigned clockOverhead=0; //Measure clock overhead for(int i=0; i<=its; i++){ if(i == 1) clockOverhead = 0; start = clock(); end = clock(); clockOverhead += end - start; }*/ //Measure latency unsigned sumTime = 0; uint j = 0; for(int i=0; i<=its; i++){ if(i == 1) sumTime = 0; start = clock(); repeat256(j=sharedMem[j];) end = clock(); sumTime += end - start; } myArray[arrayLength] += j; duration[0] = sumTime; j = 0; for(int i=0; i<=its; i++){ if(i == 1) sumTime = 0; int k = 0; start = clock(); do{ k++; repeat6(j=sharedMem[j];) }while(k<repeatCt); end = clock(); sumTime += end - start; } myArray[arrayLength] += j; duration[1] = sumTime; }
f9e18408139cd20b8187ac9193562830e7100fbf.cu
/******************************************************************** * BenchIT - Performance Measurement for Scientific Applications * Contact: [email protected] * * For license details see COPYING in the package base directory *******************************************************************/ #include "work.h" #include "tools/repeat.h" __global__ void testLatency(uint* myArray, int arrayLength, int repeatCt, int its, uint* duration){ extern __shared__ uint sharedMem[]; //Fill array for(int i=0; i < arrayLength; i++) sharedMem[i] = myArray[i]; uint start, end; /*unsigned clockOverhead=0; //Measure clock overhead for(int i=0; i<=its; i++){ if(i == 1) clockOverhead = 0; start = clock(); end = clock(); clockOverhead += end - start; }*/ //Measure latency unsigned sumTime = 0; uint j = 0; for(int i=0; i<=its; i++){ if(i == 1) sumTime = 0; start = clock(); repeat256(j=sharedMem[j];) end = clock(); sumTime += end - start; } myArray[arrayLength] += j; duration[0] = sumTime; j = 0; for(int i=0; i<=its; i++){ if(i == 1) sumTime = 0; int k = 0; start = clock(); do{ k++; repeat6(j=sharedMem[j];) }while(k<repeatCt); end = clock(); sumTime += end - start; } myArray[arrayLength] += j; duration[1] = sumTime; }
13654610dec4af74fc58b688dd9cf66619a232f2.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> static __device__ __inline__ int __mysmid(){ int smid; asm volatile("mov.u32 %0, %%smid;" : "=r"(smid)); return smid; } template <int BLOCK_SIZE> __global__ void matrixMulCUDASingleBlock(float *C, float *A, float *B, int hA, int wA, int wB, int should_profile, char *name) { // Thread index int tx = threadIdx.x; int ty = threadIdx.y; long long int start_time, end_time; if (should_profile) { if (tx == 0 && ty == 0) { start_time = clock64(); } } for (int x_a = 0; x_a < wA; x_a += BLOCK_SIZE) { for (int y_a = 0; y_a < hA; y_a += BLOCK_SIZE) { for (int x_b = 0; x_b < wB; x_b += BLOCK_SIZE) { // Load blocks of size BLOCK_SIZE: <x_a, y_a>, <x_b, x_a> __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; As[ty][tx] = A[(y_a + ty) * wA + x_a + tx]; Bs[ty][tx] = B[(x_a + ty) * wB + x_b + tx]; __syncthreads(); float Csub = 0; for (int k = 0; k < BLOCK_SIZE; k++) { Csub += As[ty][k] * Bs[k][tx]; } // Block of c: <x_b, y_a> int c = y_a * wB + x_b; C[c + wB * ty + tx] += Csub; __syncthreads(); } } } if (should_profile) { if (tx == 0 && ty == 0) { end_time = clock64(); printf("- %d SM_%d_%s_nthreads=%d %lld %lld\n", __mysmid(), __mysmid(), name, BLOCK_SIZE*BLOCK_SIZE, start_time, end_time); } } } /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } hipError_t error; error = hipMalloc((void **) &d_A, mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_B, mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_C, mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } hipMemset(d_C, 0, mem_size_C); // copy host memory to device error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); char *device_name, *host_name = "MatrixMulOnePerSM"; hipMalloc(&device_name, sizeof(char) * strlen(host_name) + 1); hipMemcpy(device_name, host_name, strlen(host_name), hipMemcpyHostToDevice); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { //matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); hipLaunchKernelGGL(( matrixMulCUDASingleBlock<16>), dim3(1), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else if (block_size == 8) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<8>), dim3(1), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { //matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); hipLaunchKernelGGL(( matrixMulCUDASingleBlock<32>), dim3(1), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<16>), dim3(1), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else if (block_size == 8) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<8>), dim3(1), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<32>), dim3(1), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Correctness check hipMemset(d_C, 0, mem_size_C); if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<16>), dim3(1), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else if (block_size == 8) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<8>), dim3(1), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<32>), dim3(1), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } // Copy result from device to host error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6 ; // machine zero for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { double abs_err = fabs(h_C[i] - (dimsA.x * valB)); double dot_length = dimsA.x; double abs_val = fabs(h_C[i]); double rel_err = abs_err/abs_val/dot_length ; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Run a simple test of matrix multiplication using CUDA on every SM */ int matrixMultiplyOnePerSM(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Get number of SMs struct hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, 0); int num_sm = devProp.multiProcessorCount; printf("Number of SMs: %d\n", num_sm); // Allocate device memory float **d_A, **d_B, **d_C; d_A = (float **)malloc(sizeof(float *) * num_sm); d_B = (float **)malloc(sizeof(float *) * num_sm); d_C = (float **)malloc(sizeof(float *) * num_sm); hipStream_t *streams; streams = (hipStream_t *)malloc(sizeof(hipStream_t) * num_sm); // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } hipError_t error; for (int i = 0; i < num_sm; i++) { error = hipStreamCreate(&streams[i]); if (error != hipSuccess) { printf("hipStreamCreate returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_A[i], mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_B[i], mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_C[i], mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } hipMemset(d_C, 0, mem_size_C); // copy host memory to device error = hipMemcpy(d_A[i], h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B[i], h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); char *device_name, *host_name = "MatrixMulOnePerSM"; hipMalloc(&device_name, sizeof(char) * strlen(host_name) + 1); hipMemcpy(device_name, host_name, strlen(host_name)+1, hipMemcpyHostToDevice); // Performs warmup operation using matrixMul CUDA kernel for (int i = 0; i < num_sm; i++) { if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<16>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else if (block_size == 8) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<8>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<32>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); } } hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { for (int i = 0; i < num_sm; i++) { if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<16>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } else if (block_size == 8) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<8>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } else { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<32>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } } } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x * num_sm; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); return EXIT_SUCCESS; } /** * Run a simple test of matrix multiplication using CUDA on every SM */ int matrixMultiplyTwoPerSM(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Get number of SMs struct hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, 0); int num_sm = devProp.multiProcessorCount; printf("Number of SMs: %d\n", num_sm); // Allocate device memory float **d_A, **d_B, **d_C; d_A = (float **)malloc(sizeof(float *) * num_sm * 2); d_B = (float **)malloc(sizeof(float *) * num_sm * 2); d_C = (float **)malloc(sizeof(float *) * num_sm * 2); hipStream_t *streams; streams = (hipStream_t *)malloc(sizeof(hipStream_t) * num_sm * 2); // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } hipError_t error; for (int i = 0; i < num_sm * 2; i++) { error = hipStreamCreate(&streams[i]); if (error != hipSuccess) { printf("hipStreamCreate returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_A[i], mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_B[i], mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_C[i], mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } hipMemset(d_C, 0, mem_size_C); // copy host memory to device error = hipMemcpy(d_A[i], h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B[i], h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); char *device_name, *host_name = "MatrixMulTwoPerSM"; hipMalloc(&device_name, sizeof(char) * strlen(host_name) + 1); hipMemcpy(device_name, host_name, strlen(host_name)+1, hipMemcpyHostToDevice); // Performs warmup operation using matrixMul CUDA kernel for (int i = 0; i < num_sm * 2; i++) { if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<16>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<32>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); }} hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { for (int i = 0; i < num_sm * 2; i++) { if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<16>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } else if (block_size == 8) { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<8>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } else { hipLaunchKernelGGL(( matrixMulCUDASingleBlock<32>), dim3(1), dim3(threads), 0, streams[i], d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } } } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x * num_sm * 2; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); return EXIT_SUCCESS; } /** * Program main */ int main(int argc, char **argv) { if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); hipSetDevice(devID); } hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } // Use a larger block size for Fermi and above //int block_size = (deviceProp.major < 2) ? 16 : 32; int block_size = 8; dim3 dimsA(5*2*32, 5*2*32, 1); dim3 dimsB(5*4*32, 5*2*32, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } matrixMultiply(argc, argv, block_size, dimsA, dimsB); int matrix_result = matrixMultiplyOnePerSM(argc, argv, block_size, dimsA, dimsB); matrix_result = matrixMultiplyTwoPerSM(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
13654610dec4af74fc58b688dd9cf66619a232f2.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> static __device__ __inline__ int __mysmid(){ int smid; asm volatile("mov.u32 %0, %%smid;" : "=r"(smid)); return smid; } template <int BLOCK_SIZE> __global__ void matrixMulCUDASingleBlock(float *C, float *A, float *B, int hA, int wA, int wB, int should_profile, char *name) { // Thread index int tx = threadIdx.x; int ty = threadIdx.y; long long int start_time, end_time; if (should_profile) { if (tx == 0 && ty == 0) { start_time = clock64(); } } for (int x_a = 0; x_a < wA; x_a += BLOCK_SIZE) { for (int y_a = 0; y_a < hA; y_a += BLOCK_SIZE) { for (int x_b = 0; x_b < wB; x_b += BLOCK_SIZE) { // Load blocks of size BLOCK_SIZE: <x_a, y_a>, <x_b, x_a> __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; As[ty][tx] = A[(y_a + ty) * wA + x_a + tx]; Bs[ty][tx] = B[(x_a + ty) * wB + x_b + tx]; __syncthreads(); float Csub = 0; for (int k = 0; k < BLOCK_SIZE; k++) { Csub += As[ty][k] * Bs[k][tx]; } // Block of c: <x_b, y_a> int c = y_a * wB + x_b; C[c + wB * ty + tx] += Csub; __syncthreads(); } } } if (should_profile) { if (tx == 0 && ty == 0) { end_time = clock64(); printf("- %d SM_%d_%s_nthreads=%d %lld %lld\n", __mysmid(), __mysmid(), name, BLOCK_SIZE*BLOCK_SIZE, start_time, end_time); } } } /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; error = cudaMalloc((void **) &d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } cudaMemset(d_C, 0, mem_size_C); // copy host memory to device error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); char *device_name, *host_name = "MatrixMulOnePerSM"; cudaMalloc(&device_name, sizeof(char) * strlen(host_name) + 1); cudaMemcpy(device_name, host_name, strlen(host_name), cudaMemcpyHostToDevice); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { //matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); matrixMulCUDASingleBlock<16><<<1, threads>>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else if (block_size == 8) { matrixMulCUDASingleBlock<8><<<1, threads>>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { //matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); matrixMulCUDASingleBlock<32><<<1, threads>>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMulCUDASingleBlock<16><<<1, threads>>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else if (block_size == 8) { matrixMulCUDASingleBlock<8><<<1, threads>>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { matrixMulCUDASingleBlock<32><<<1, threads>>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Correctness check cudaMemset(d_C, 0, mem_size_C); if (block_size == 16) { matrixMulCUDASingleBlock<16><<<1, threads>>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else if (block_size == 8) { matrixMulCUDASingleBlock<8><<<1, threads>>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { matrixMulCUDASingleBlock<32><<<1, threads>>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x, 0, device_name); } // Copy result from device to host error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6 ; // machine zero for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { double abs_err = fabs(h_C[i] - (dimsA.x * valB)); double dot_length = dimsA.x; double abs_val = fabs(h_C[i]); double rel_err = abs_err/abs_val/dot_length ; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Run a simple test of matrix multiplication using CUDA on every SM */ int matrixMultiplyOnePerSM(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Get number of SMs struct cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); int num_sm = devProp.multiProcessorCount; printf("Number of SMs: %d\n", num_sm); // Allocate device memory float **d_A, **d_B, **d_C; d_A = (float **)malloc(sizeof(float *) * num_sm); d_B = (float **)malloc(sizeof(float *) * num_sm); d_C = (float **)malloc(sizeof(float *) * num_sm); cudaStream_t *streams; streams = (cudaStream_t *)malloc(sizeof(cudaStream_t) * num_sm); // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; for (int i = 0; i < num_sm; i++) { error = cudaStreamCreate(&streams[i]); if (error != cudaSuccess) { printf("cudaStreamCreate returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_A[i], mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_B[i], mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_C[i], mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } cudaMemset(d_C, 0, mem_size_C); // copy host memory to device error = cudaMemcpy(d_A[i], h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B[i], h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); char *device_name, *host_name = "MatrixMulOnePerSM"; cudaMalloc(&device_name, sizeof(char) * strlen(host_name) + 1); cudaMemcpy(device_name, host_name, strlen(host_name)+1, cudaMemcpyHostToDevice); // Performs warmup operation using matrixMul CUDA kernel for (int i = 0; i < num_sm; i++) { if (block_size == 16) { matrixMulCUDASingleBlock<16><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else if (block_size == 8) { matrixMulCUDASingleBlock<8><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { matrixMulCUDASingleBlock<32><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); } } cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { for (int i = 0; i < num_sm; i++) { if (block_size == 16) { matrixMulCUDASingleBlock<16><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } else if (block_size == 8) { matrixMulCUDASingleBlock<8><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } else { matrixMulCUDASingleBlock<32><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } } } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x * num_sm; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); return EXIT_SUCCESS; } /** * Run a simple test of matrix multiplication using CUDA on every SM */ int matrixMultiplyTwoPerSM(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Get number of SMs struct cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); int num_sm = devProp.multiProcessorCount; printf("Number of SMs: %d\n", num_sm); // Allocate device memory float **d_A, **d_B, **d_C; d_A = (float **)malloc(sizeof(float *) * num_sm * 2); d_B = (float **)malloc(sizeof(float *) * num_sm * 2); d_C = (float **)malloc(sizeof(float *) * num_sm * 2); cudaStream_t *streams; streams = (cudaStream_t *)malloc(sizeof(cudaStream_t) * num_sm * 2); // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; for (int i = 0; i < num_sm * 2; i++) { error = cudaStreamCreate(&streams[i]); if (error != cudaSuccess) { printf("cudaStreamCreate returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_A[i], mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_B[i], mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_C[i], mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } cudaMemset(d_C, 0, mem_size_C); // copy host memory to device error = cudaMemcpy(d_A[i], h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B[i], h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); char *device_name, *host_name = "MatrixMulTwoPerSM"; cudaMalloc(&device_name, sizeof(char) * strlen(host_name) + 1); cudaMemcpy(device_name, host_name, strlen(host_name)+1, cudaMemcpyHostToDevice); // Performs warmup operation using matrixMul CUDA kernel for (int i = 0; i < num_sm * 2; i++) { if (block_size == 16) { matrixMulCUDASingleBlock<16><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); } else { matrixMulCUDASingleBlock<32><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, 0, device_name); }} cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { for (int i = 0; i < num_sm * 2; i++) { if (block_size == 16) { matrixMulCUDASingleBlock<16><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } else if (block_size == 8) { matrixMulCUDASingleBlock<8><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } else { matrixMulCUDASingleBlock<32><<<1, threads, 0, streams[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.y, dimsA.x, dimsB.x, j==0, device_name); } } } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x * num_sm * 2; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); return EXIT_SUCCESS; } /** * Program main */ int main(int argc, char **argv) { if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); cudaSetDevice(devID); } cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } // Use a larger block size for Fermi and above //int block_size = (deviceProp.major < 2) ? 16 : 32; int block_size = 8; dim3 dimsA(5*2*32, 5*2*32, 1); dim3 dimsB(5*4*32, 5*2*32, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } matrixMultiply(argc, argv, block_size, dimsA, dimsB); int matrix_result = matrixMultiplyOnePerSM(argc, argv, block_size, dimsA, dimsB); matrix_result = matrixMultiplyTwoPerSM(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
7e9bb30091fe5e23c6a3780a7e048febd220a5fb.hip
// !!! This is a file automatically generated by hipify!!! #include "cudaLib.h" #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> namespace { template <typename T> __global__ void kernel(T aValue) { printf("Hello from CUDA with value=%d\n", aValue); } } // explicit instantiation template void cudaRun<int>(int); template void cudaRun<uint16_t>(uint16_t); template <typename T> void cudaRun(T aValue) { hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, aValue); hipDeviceSynchronize(); if (hipGetLastError() != hipSuccess) printf("Error: %s\n", hipGetErrorString(hipGetLastError())); }
7e9bb30091fe5e23c6a3780a7e048febd220a5fb.cu
#include "cudaLib.h" #include <iostream> #include <cuda.h> #include <cuda_runtime.h> namespace { template <typename T> __global__ void kernel(T aValue) { printf("Hello from CUDA with value=%d\n", aValue); } } // explicit instantiation template void cudaRun<int>(int); template void cudaRun<uint16_t>(uint16_t); template <typename T> void cudaRun(T aValue) { kernel<<<1,1>>>(aValue); cudaDeviceSynchronize(); if (cudaGetLastError() != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(cudaGetLastError())); }
56fee1405bf256b32fc19fbb62b1b73945d06418.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ __launch_bounds__(256) void sgemm_nn_128x128( const float *param_A, const float *param_B, float *param_C, float param_alpha, float param_beta, int param_lda8, int param_ldb8, int param_ldc, int param_m, int param_n, int param_k) { __shared__ float share[128 * 8 * 4 + 32]; int tid = threadIdx.x; share[tid] = 1; }
56fee1405bf256b32fc19fbb62b1b73945d06418.cu
extern "C" __global__ __launch_bounds__(256) void sgemm_nn_128x128( const float *param_A, const float *param_B, float *param_C, float param_alpha, float param_beta, int param_lda8, int param_ldb8, int param_ldc, int param_m, int param_n, int param_k) { __shared__ float share[128 * 8 * 4 + 32]; int tid = threadIdx.x; share[tid] = 1; }
b6f4abf8a684baf093ceb0436ec24c39f4e3a95b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=16 --blockDim=64 #include "common.h" __global__ void d_boxfilter_rgba_x(unsigned int *od, int w, int h, int r) { __requires(w == 1024); __requires(h == 1024); float scale = 1.0f / (float)((r << 1) + 1); unsigned int y = blockIdx.x*blockDim.x + threadIdx.x; // as long as address is always less than height, we do work if (y < h) { float4 t = make_float4(0.0f); for (int x = -r; x <= r; x++) { t += tex2D(rgbaTex, x, y); } od[y * w] = rgbaFloatToInt(t * scale); for (int x = 1; x < w; x++) { t += tex2D(rgbaTex, x + r, y); t -= tex2D(rgbaTex, x - r - 1, y); od[y * w + x] = rgbaFloatToInt(t * scale); } } }
b6f4abf8a684baf093ceb0436ec24c39f4e3a95b.cu
//pass //--gridDim=16 --blockDim=64 #include "common.h" __global__ void d_boxfilter_rgba_x(unsigned int *od, int w, int h, int r) { __requires(w == 1024); __requires(h == 1024); float scale = 1.0f / (float)((r << 1) + 1); unsigned int y = blockIdx.x*blockDim.x + threadIdx.x; // as long as address is always less than height, we do work if (y < h) { float4 t = make_float4(0.0f); for (int x = -r; x <= r; x++) { t += tex2D(rgbaTex, x, y); } od[y * w] = rgbaFloatToInt(t * scale); for (int x = 1; x < w; x++) { t += tex2D(rgbaTex, x + r, y); t -= tex2D(rgbaTex, x - r - 1, y); od[y * w + x] = rgbaFloatToInt(t * scale); } } }
7f182784a18248e8437a27d6fd6460db0f2437ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/native/GridSampler.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { static __forceinline__ __device__ float clip_coordinates(float in, int clip_limit) { return ::min(static_cast<float>(clip_limit - 1), ::max(in, 0.f)); } // clip_coordinates_set_grad works similarly to clip_coordinates except that // it also returns the `d output / d input` via pointer argument `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ float clip_coordinates_set_grad(float in, int clip_limit, scalar_t *grad_in) { if (in < 0.f) { *grad_in = static_cast<scalar_t>(0); return 0.f; } else { float max = static_cast<float>(clip_limit - 1); if (in > max) { *grad_in = static_cast<scalar_t>(0); return max; } else { *grad_in = static_cast<scalar_t>(1); return in; } } } static __forceinline__ __device__ float reflect_coordinates(float in, int clip_limit) { if (clip_limit == static_cast<int>(1)) { return 0.f; } in = ::fabs(in); float max = static_cast<float>(clip_limit - 1); // `fmod` returns same sign as `in`, which is positive after the `fabs` above. float extra = ::fmod(in, max); int flips = static_cast<int>(::floor(in / max)); if (flips % 2 == 0) { return extra; } else { return max - extra; } } // reflect_coordinates_set_grad works similarly to reflect_coordinates except // that it also returns the `d output / d input` via pointer argument // `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ float reflect_coordinates_set_grad(float in, int clip_limit, scalar_t *grad_in) { if (clip_limit == static_cast<int>(1)) { *grad_in = static_cast<scalar_t>(0); return 0.f; } int grad_in_mult_; if (in < 0.f) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } float max = static_cast<float>(clip_limit - 1); // `fmod` returns same sign as `in`, which is positive after the `if` above. float extra = ::fmod(in, max); int flips = static_cast<int>(::floor(in / max)); if (flips % 2 == 0) { *grad_in = static_cast<scalar_t>(grad_in_mult_); return extra; } else { *grad_in = static_cast<scalar_t>(-grad_in_mult_); return max - extra; } } static __forceinline__ __device__ bool within_bounds_2d(int h, int w, int H, int W) { return h >= 0 && h < H && w >= 0 && w < W; } static __forceinline__ __device__ bool within_bounds_3d(int d, int h, int w, int D, int H, int W) { return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; } template<typename scalar_t> static __forceinline__ __device__ void safe_add_2d(scalar_t *data, int h, int w, int sH, int sW, int H, int W, scalar_t delta) { if (within_bounds_2d(h, w, H, W)) { atomicAdd(data + h * sH + w * sW, delta); } } template<typename scalar_t> static __forceinline__ __device__ void safe_add_3d(scalar_t *data, int d, int h, int w, int sD, int sH, int sW, int D, int H, int W, scalar_t delta) { if (within_bounds_3d(d, h, w, D, H, W)) { atomicAdd(data + d * sD + h * sH + w * sW, delta); } } template <typename scalar_t> C10_LAUNCH_BOUNDS(1024) __global__ void grid_sampler_2d_kernel( const int nthreads, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_H = input.sizes[2]; int inp_W = input.sizes[3]; int out_H = grid.sizes[1]; int out_W = grid.sizes[2]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sH = input.strides[2]; int inp_sW = input.strides[3]; int grid_sN = grid.strides[0]; int grid_sH = grid.strides[1]; int grid_sW = grid.strides[2]; int grid_sCoor = grid.strides[3]; int out_sN = output.strides[0]; int out_sC = output.strides[1]; int out_sH = output.strides[2]; int out_sW = output.strides[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // normalize ix, iy from [-1, 1] to [0, IH-1] & [0, IW-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates(ixf, inp_W); iyf = clip_coordinates(iyf, inp_H); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates(ixf, inp_W); iyf = reflect_coordinates(iyf, inp_H); } ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ixf)); int iy_nw = static_cast<int>(::floor(iyf)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> C10_LAUNCH_BOUNDS(1024) __global__ void grid_sampler_3d_kernel( const int nthreads, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_D = input.sizes[2]; int inp_H = input.sizes[3]; int inp_W = input.sizes[4]; int out_D = grid.sizes[1]; int out_H = grid.sizes[2]; int out_W = grid.sizes[3]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sD = input.strides[2]; int inp_sH = input.strides[3]; int inp_sW = input.strides[4]; int grid_sN = grid.strides[0]; int grid_sD = grid.strides[1]; int grid_sH = grid.strides[2]; int grid_sW = grid.strides[3]; int grid_sCoor = grid.strides[4]; int out_sN = output.strides[0]; int out_sC = output.strides[1]; int out_sD = output.strides[2]; int out_sH = output.strides[3]; int out_sW = output.strides[4]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // normalize ix, iy, iz from [-1, 1] to [0, inp_W-1] & [0, inp_H-1] & [0, inp_D-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); float izf = ((iz + 1.f) / 2) * (inp_D - 1); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates(ixf, inp_W); iyf = clip_coordinates(iyf, inp_H); izf = clip_coordinates(izf, inp_D); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates(ixf, inp_W); iyf = reflect_coordinates(iyf, inp_H); izf = reflect_coordinates(izf, inp_D); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); iz = static_cast<scalar_t>(izf); // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); int iz_nearest = static_cast<int>(::round(izf)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> C10_LAUNCH_BOUNDS(1024) __global__ void grid_sampler_2d_backward_kernel( const int nthreads, TensorInfo<scalar_t, int> grad_output, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> grad_input, // initialized to zeros TensorInfo<scalar_t, int> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_H = input.sizes[2]; int inp_W = input.sizes[3]; int out_H = grid.sizes[1]; int out_W = grid.sizes[2]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sH = input.strides[2]; int inp_sW = input.strides[3]; int grid_sN = grid.strides[0]; int grid_sH = grid.strides[1]; int grid_sW = grid.strides[2]; int grid_sCoor = grid.strides[3]; int gOut_sN = grad_output.strides[0]; int gOut_sC = grad_output.strides[1]; int gOut_sH = grad_output.strides[2]; int gOut_sW = grad_output.strides[3]; int gInp_sN = grad_input.strides[0]; int gInp_sC = grad_input.strides[1]; int gInp_sH = grad_input.strides[2]; int gInp_sW = grad_input.strides[3]; int gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // normalize ix, iy from [-1, 1] to [0, IH-1] & [0, IW-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); // multipliers for gradients on ix and iy // E.g., 0 for out-of-bound indices when GridSamplerPadding::Border scalar_t gix_mult, giy_mult; if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = clip_coordinates_set_grad(iyf, inp_H, &giy_mult); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = reflect_coordinates_set_grad(iyf, inp_H, &giy_mult); } else { // padding_mode == GridSamplerPadding::Zeros gix_mult = static_cast<scalar_t>(1); giy_mult = static_cast<scalar_t>(1); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ixf)); int iy_nw = static_cast<int>(::floor(iyf)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut); safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut); safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut); safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut); // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // un-normalize grad_grid values back to [-1, 1] constraints gix = gix * (inp_W - 1.f) / 2; giy = giy * (inp_H - 1.f) / 2; // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (int c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } } } template <typename scalar_t> C10_LAUNCH_BOUNDS(1024) __global__ void grid_sampler_3d_backward_kernel( const int nthreads, TensorInfo<scalar_t, int> grad_output, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> grad_input, // initialized to zeros TensorInfo<scalar_t, int> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_D = input.sizes[2]; int inp_H = input.sizes[3]; int inp_W = input.sizes[4]; int out_D = grid.sizes[1]; int out_H = grid.sizes[2]; int out_W = grid.sizes[3]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sD = input.strides[2]; int inp_sH = input.strides[3]; int inp_sW = input.strides[4]; int grid_sN = grid.strides[0]; int grid_sD = grid.strides[1]; int grid_sH = grid.strides[2]; int grid_sW = grid.strides[3]; int grid_sCoor = grid.strides[4]; int gOut_sN = grad_output.strides[0]; int gOut_sC = grad_output.strides[1]; int gOut_sD = grad_output.strides[2]; int gOut_sH = grad_output.strides[3]; int gOut_sW = grad_output.strides[4]; int gInp_sN = grad_input.strides[0]; int gInp_sC = grad_input.strides[1]; int gInp_sD = grad_input.strides[2]; int gInp_sH = grad_input.strides[3]; int gInp_sW = grad_input.strides[4]; int gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // normalize ix, iy, iz from [-1, 1] to [0, inp_W-1] & [0, inp_H-1] & [0, inp_D-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); float izf = ((iz + 1.f) / 2) * (inp_D - 1); // multipliers for gradients on ix, iy, and iz // E.g., 0 for out-of-bound indices when GridSamplerPadding::Border scalar_t gix_mult, giy_mult, giz_mult; if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = clip_coordinates_set_grad(iyf, inp_H, &giy_mult); izf = clip_coordinates_set_grad(izf, inp_D, &giz_mult); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = reflect_coordinates_set_grad(iyf, inp_H, &giy_mult); izf = reflect_coordinates_set_grad(izf, inp_D, &giz_mult); } else { // padding_mode == GridSamplerPadding::Zeros gix_mult = static_cast<scalar_t>(1); giy_mult = static_cast<scalar_t>(1); giz_mult = static_cast<scalar_t>(1); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); iz = static_cast<scalar_t>(izf); // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut); safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut); safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut); safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut); safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut); safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut); safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut); safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut); // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // un-normalize grad_grid values back to [-1, 1] constraints gix = gix * (inp_W - 1) / 2; giy = giy * (inp_H - 1) / 2; giz = giz * (inp_D - 1) / 2; // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); int iz_nearest = static_cast<int>(::round(izf)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) { // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto output = at::empty({N, input.size(1), H, W}, input.options()); int count = static_cast<int>(N * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_cuda", [&] { hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto output = at::empty({N, input.size(1), D, H, W}, input.options()); int count = static_cast<int>(N * D * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_cuda", [&] { hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto grad_input = at::zeros_like(input); auto grad_grid = at::empty_like(grid); int count = static_cast<int>(N * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_backward_cuda", [&] { hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return std::make_tuple(grad_input, grad_grid); } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto grad_input = at::zeros_like(input); auto grad_grid = at::empty_like(grid); int count = static_cast<int>(N * D * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_3d_backward_cuda", [&] { hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return std::make_tuple(grad_input, grad_grid); } }} // namespace at::native
7f182784a18248e8437a27d6fd6460db0f2437ce.cu
#include <ATen/ATen.h> #include <ATen/native/GridSampler.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { static __forceinline__ __device__ float clip_coordinates(float in, int clip_limit) { return ::min(static_cast<float>(clip_limit - 1), ::max(in, 0.f)); } // clip_coordinates_set_grad works similarly to clip_coordinates except that // it also returns the `d output / d input` via pointer argument `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ float clip_coordinates_set_grad(float in, int clip_limit, scalar_t *grad_in) { if (in < 0.f) { *grad_in = static_cast<scalar_t>(0); return 0.f; } else { float max = static_cast<float>(clip_limit - 1); if (in > max) { *grad_in = static_cast<scalar_t>(0); return max; } else { *grad_in = static_cast<scalar_t>(1); return in; } } } static __forceinline__ __device__ float reflect_coordinates(float in, int clip_limit) { if (clip_limit == static_cast<int>(1)) { return 0.f; } in = ::fabs(in); float max = static_cast<float>(clip_limit - 1); // `fmod` returns same sign as `in`, which is positive after the `fabs` above. float extra = ::fmod(in, max); int flips = static_cast<int>(::floor(in / max)); if (flips % 2 == 0) { return extra; } else { return max - extra; } } // reflect_coordinates_set_grad works similarly to reflect_coordinates except // that it also returns the `d output / d input` via pointer argument // `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ float reflect_coordinates_set_grad(float in, int clip_limit, scalar_t *grad_in) { if (clip_limit == static_cast<int>(1)) { *grad_in = static_cast<scalar_t>(0); return 0.f; } int grad_in_mult_; if (in < 0.f) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } float max = static_cast<float>(clip_limit - 1); // `fmod` returns same sign as `in`, which is positive after the `if` above. float extra = ::fmod(in, max); int flips = static_cast<int>(::floor(in / max)); if (flips % 2 == 0) { *grad_in = static_cast<scalar_t>(grad_in_mult_); return extra; } else { *grad_in = static_cast<scalar_t>(-grad_in_mult_); return max - extra; } } static __forceinline__ __device__ bool within_bounds_2d(int h, int w, int H, int W) { return h >= 0 && h < H && w >= 0 && w < W; } static __forceinline__ __device__ bool within_bounds_3d(int d, int h, int w, int D, int H, int W) { return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; } template<typename scalar_t> static __forceinline__ __device__ void safe_add_2d(scalar_t *data, int h, int w, int sH, int sW, int H, int W, scalar_t delta) { if (within_bounds_2d(h, w, H, W)) { atomicAdd(data + h * sH + w * sW, delta); } } template<typename scalar_t> static __forceinline__ __device__ void safe_add_3d(scalar_t *data, int d, int h, int w, int sD, int sH, int sW, int D, int H, int W, scalar_t delta) { if (within_bounds_3d(d, h, w, D, H, W)) { atomicAdd(data + d * sD + h * sH + w * sW, delta); } } template <typename scalar_t> C10_LAUNCH_BOUNDS(1024) __global__ void grid_sampler_2d_kernel( const int nthreads, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_H = input.sizes[2]; int inp_W = input.sizes[3]; int out_H = grid.sizes[1]; int out_W = grid.sizes[2]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sH = input.strides[2]; int inp_sW = input.strides[3]; int grid_sN = grid.strides[0]; int grid_sH = grid.strides[1]; int grid_sW = grid.strides[2]; int grid_sCoor = grid.strides[3]; int out_sN = output.strides[0]; int out_sC = output.strides[1]; int out_sH = output.strides[2]; int out_sW = output.strides[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // normalize ix, iy from [-1, 1] to [0, IH-1] & [0, IW-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates(ixf, inp_W); iyf = clip_coordinates(iyf, inp_H); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates(ixf, inp_W); iyf = reflect_coordinates(iyf, inp_H); } ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ixf)); int iy_nw = static_cast<int>(::floor(iyf)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> C10_LAUNCH_BOUNDS(1024) __global__ void grid_sampler_3d_kernel( const int nthreads, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_D = input.sizes[2]; int inp_H = input.sizes[3]; int inp_W = input.sizes[4]; int out_D = grid.sizes[1]; int out_H = grid.sizes[2]; int out_W = grid.sizes[3]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sD = input.strides[2]; int inp_sH = input.strides[3]; int inp_sW = input.strides[4]; int grid_sN = grid.strides[0]; int grid_sD = grid.strides[1]; int grid_sH = grid.strides[2]; int grid_sW = grid.strides[3]; int grid_sCoor = grid.strides[4]; int out_sN = output.strides[0]; int out_sC = output.strides[1]; int out_sD = output.strides[2]; int out_sH = output.strides[3]; int out_sW = output.strides[4]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // normalize ix, iy, iz from [-1, 1] to [0, inp_W-1] & [0, inp_H-1] & [0, inp_D-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); float izf = ((iz + 1.f) / 2) * (inp_D - 1); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates(ixf, inp_W); iyf = clip_coordinates(iyf, inp_H); izf = clip_coordinates(izf, inp_D); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates(ixf, inp_W); iyf = reflect_coordinates(iyf, inp_H); izf = reflect_coordinates(izf, inp_D); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); iz = static_cast<scalar_t>(izf); // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); int iz_nearest = static_cast<int>(::round(izf)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> C10_LAUNCH_BOUNDS(1024) __global__ void grid_sampler_2d_backward_kernel( const int nthreads, TensorInfo<scalar_t, int> grad_output, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> grad_input, // initialized to zeros TensorInfo<scalar_t, int> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_H = input.sizes[2]; int inp_W = input.sizes[3]; int out_H = grid.sizes[1]; int out_W = grid.sizes[2]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sH = input.strides[2]; int inp_sW = input.strides[3]; int grid_sN = grid.strides[0]; int grid_sH = grid.strides[1]; int grid_sW = grid.strides[2]; int grid_sCoor = grid.strides[3]; int gOut_sN = grad_output.strides[0]; int gOut_sC = grad_output.strides[1]; int gOut_sH = grad_output.strides[2]; int gOut_sW = grad_output.strides[3]; int gInp_sN = grad_input.strides[0]; int gInp_sC = grad_input.strides[1]; int gInp_sH = grad_input.strides[2]; int gInp_sW = grad_input.strides[3]; int gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // normalize ix, iy from [-1, 1] to [0, IH-1] & [0, IW-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); // multipliers for gradients on ix and iy // E.g., 0 for out-of-bound indices when GridSamplerPadding::Border scalar_t gix_mult, giy_mult; if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = clip_coordinates_set_grad(iyf, inp_H, &giy_mult); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = reflect_coordinates_set_grad(iyf, inp_H, &giy_mult); } else { // padding_mode == GridSamplerPadding::Zeros gix_mult = static_cast<scalar_t>(1); giy_mult = static_cast<scalar_t>(1); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ixf)); int iy_nw = static_cast<int>(::floor(iyf)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut); safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut); safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut); safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut); // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // un-normalize grad_grid values back to [-1, 1] constraints gix = gix * (inp_W - 1.f) / 2; giy = giy * (inp_H - 1.f) / 2; // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (int c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } } } template <typename scalar_t> C10_LAUNCH_BOUNDS(1024) __global__ void grid_sampler_3d_backward_kernel( const int nthreads, TensorInfo<scalar_t, int> grad_output, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> grad_input, // initialized to zeros TensorInfo<scalar_t, int> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_D = input.sizes[2]; int inp_H = input.sizes[3]; int inp_W = input.sizes[4]; int out_D = grid.sizes[1]; int out_H = grid.sizes[2]; int out_W = grid.sizes[3]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sD = input.strides[2]; int inp_sH = input.strides[3]; int inp_sW = input.strides[4]; int grid_sN = grid.strides[0]; int grid_sD = grid.strides[1]; int grid_sH = grid.strides[2]; int grid_sW = grid.strides[3]; int grid_sCoor = grid.strides[4]; int gOut_sN = grad_output.strides[0]; int gOut_sC = grad_output.strides[1]; int gOut_sD = grad_output.strides[2]; int gOut_sH = grad_output.strides[3]; int gOut_sW = grad_output.strides[4]; int gInp_sN = grad_input.strides[0]; int gInp_sC = grad_input.strides[1]; int gInp_sD = grad_input.strides[2]; int gInp_sH = grad_input.strides[3]; int gInp_sW = grad_input.strides[4]; int gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // normalize ix, iy, iz from [-1, 1] to [0, inp_W-1] & [0, inp_H-1] & [0, inp_D-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); float izf = ((iz + 1.f) / 2) * (inp_D - 1); // multipliers for gradients on ix, iy, and iz // E.g., 0 for out-of-bound indices when GridSamplerPadding::Border scalar_t gix_mult, giy_mult, giz_mult; if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = clip_coordinates_set_grad(iyf, inp_H, &giy_mult); izf = clip_coordinates_set_grad(izf, inp_D, &giz_mult); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = reflect_coordinates_set_grad(iyf, inp_H, &giy_mult); izf = reflect_coordinates_set_grad(izf, inp_D, &giz_mult); } else { // padding_mode == GridSamplerPadding::Zeros gix_mult = static_cast<scalar_t>(1); giy_mult = static_cast<scalar_t>(1); giz_mult = static_cast<scalar_t>(1); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); iz = static_cast<scalar_t>(izf); // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut); safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut); safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut); safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut); safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut); safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut); safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut); safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut); // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // un-normalize grad_grid values back to [-1, 1] constraints gix = gix * (inp_W - 1) / 2; giy = giy * (inp_H - 1) / 2; giz = giz * (inp_D - 1) / 2; // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); int iz_nearest = static_cast<int>(::round(izf)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) { // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto output = at::empty({N, input.size(1), H, W}, input.options()); int count = static_cast<int>(N * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_cuda", [&] { grid_sampler_2d_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto output = at::empty({N, input.size(1), D, H, W}, input.options()); int count = static_cast<int>(N * D * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_cuda", [&] { grid_sampler_3d_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto grad_input = at::zeros_like(input); auto grad_grid = at::empty_like(grid); int count = static_cast<int>(N * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_backward_cuda", [&] { grid_sampler_2d_backward_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return std::make_tuple(grad_input, grad_grid); } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto grad_input = at::zeros_like(input); auto grad_grid = at::empty_like(grid); int count = static_cast<int>(N * D * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_3d_backward_cuda", [&] { grid_sampler_3d_backward_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return std::make_tuple(grad_input, grad_grid); } }} // namespace at::native
ba9c8cf0d1dfd7913f87dcc3f5bda26ce671506b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "DP.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *dT_seq = NULL; hipMalloc(&dT_seq, XSIZE*YSIZE); char *dTrace = NULL; hipMalloc(&dTrace, XSIZE*YSIZE); int *dScore = NULL; hipMalloc(&dScore, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( DP), dim3(gridBlock),dim3(threadBlock), 0, 0, dT_seq,dTrace,dScore); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( DP), dim3(gridBlock),dim3(threadBlock), 0, 0, dT_seq,dTrace,dScore); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( DP), dim3(gridBlock),dim3(threadBlock), 0, 0, dT_seq,dTrace,dScore); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ba9c8cf0d1dfd7913f87dcc3f5bda26ce671506b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "DP.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *dT_seq = NULL; cudaMalloc(&dT_seq, XSIZE*YSIZE); char *dTrace = NULL; cudaMalloc(&dTrace, XSIZE*YSIZE); int *dScore = NULL; cudaMalloc(&dScore, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); DP<<<gridBlock,threadBlock>>>(dT_seq,dTrace,dScore); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { DP<<<gridBlock,threadBlock>>>(dT_seq,dTrace,dScore); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { DP<<<gridBlock,threadBlock>>>(dT_seq,dTrace,dScore); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b6dd12a47c190752a84057d4a4fe213b9913d419.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "hessianKernelO.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_output = NULL; hipMalloc(&d_output, XSIZE*YSIZE); float *d_output_theta = NULL; hipMalloc(&d_output_theta, XSIZE*YSIZE); float *d_output_phi = NULL; hipMalloc(&d_output_phi, XSIZE*YSIZE); const float *d_gxx = NULL; hipMalloc(&d_gxx, XSIZE*YSIZE); const float *d_gxy = NULL; hipMalloc(&d_gxy, XSIZE*YSIZE); const float *d_gxz = NULL; hipMalloc(&d_gxz, XSIZE*YSIZE); const float *d_gyy = NULL; hipMalloc(&d_gyy, XSIZE*YSIZE); const float *d_gyz = NULL; hipMalloc(&d_gyz, XSIZE*YSIZE); const float *d_gzz = NULL; hipMalloc(&d_gzz, XSIZE*YSIZE); float sigma = 1; int imageW = 1; int imageH = 1; int imageD = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( hessianKernelO), dim3(gridBlock),dim3(threadBlock), 0, 0, d_output,d_output_theta,d_output_phi,d_gxx,d_gxy,d_gxz,d_gyy,d_gyz,d_gzz,sigma,imageW,imageH,imageD); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( hessianKernelO), dim3(gridBlock),dim3(threadBlock), 0, 0, d_output,d_output_theta,d_output_phi,d_gxx,d_gxy,d_gxz,d_gyy,d_gyz,d_gzz,sigma,imageW,imageH,imageD); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( hessianKernelO), dim3(gridBlock),dim3(threadBlock), 0, 0, d_output,d_output_theta,d_output_phi,d_gxx,d_gxy,d_gxz,d_gyy,d_gyz,d_gzz,sigma,imageW,imageH,imageD); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b6dd12a47c190752a84057d4a4fe213b9913d419.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "hessianKernelO.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_output = NULL; cudaMalloc(&d_output, XSIZE*YSIZE); float *d_output_theta = NULL; cudaMalloc(&d_output_theta, XSIZE*YSIZE); float *d_output_phi = NULL; cudaMalloc(&d_output_phi, XSIZE*YSIZE); const float *d_gxx = NULL; cudaMalloc(&d_gxx, XSIZE*YSIZE); const float *d_gxy = NULL; cudaMalloc(&d_gxy, XSIZE*YSIZE); const float *d_gxz = NULL; cudaMalloc(&d_gxz, XSIZE*YSIZE); const float *d_gyy = NULL; cudaMalloc(&d_gyy, XSIZE*YSIZE); const float *d_gyz = NULL; cudaMalloc(&d_gyz, XSIZE*YSIZE); const float *d_gzz = NULL; cudaMalloc(&d_gzz, XSIZE*YSIZE); float sigma = 1; int imageW = 1; int imageH = 1; int imageD = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); hessianKernelO<<<gridBlock,threadBlock>>>(d_output,d_output_theta,d_output_phi,d_gxx,d_gxy,d_gxz,d_gyy,d_gyz,d_gzz,sigma,imageW,imageH,imageD); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { hessianKernelO<<<gridBlock,threadBlock>>>(d_output,d_output_theta,d_output_phi,d_gxx,d_gxy,d_gxz,d_gyy,d_gyz,d_gzz,sigma,imageW,imageH,imageD); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { hessianKernelO<<<gridBlock,threadBlock>>>(d_output,d_output_theta,d_output_phi,d_gxx,d_gxy,d_gxz,d_gyy,d_gyz,d_gzz,sigma,imageW,imageH,imageD); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fd648a9cae895014685c42498c363bdf42a0fd6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int device = -1; hipGetDevice(&device); hipMemPrefetchAsync(x, N*sizeof(float), device, NULL); hipMemPrefetchAsync(y, N*sizeof(float), device, NULL); // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
fd648a9cae895014685c42498c363bdf42a0fd6e.cu
#include <iostream> #include <math.h> // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int device = -1; cudaGetDevice(&device); cudaMemPrefetchAsync(x, N*sizeof(float), device, NULL); cudaMemPrefetchAsync(y, N*sizeof(float), device, NULL); // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
5ed740e3a4fb87e7ded21d2cada7360f81540fe1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "device_atomic_functions.h" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/custom_layers.hpp" namespace caffe { template <typename Dtype> __global__ void FixCoordinate(const int n, Dtype* in_out, Dtype min_value, Dtype max_value) { CUDA_KERNEL_LOOP(index, n) { in_out[index] = (in_out[index] < min_value && in_out[index] > min_value - 1e-4) ? min_value : in_out[index]; in_out[index] = (in_out[index] > max_value && in_out[index] < (max_value + 1e-4)) ? max_value : in_out[index]; } } template <typename Dtype> __global__ void TransformerForward(const int num, const int channels, const int spatial_dim, const int height, const int width, const Dtype* data, Dtype* CoordinateSource_data, Dtype* transformed_data) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; Dtype x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (Dtype)height / 2; Dtype y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (Dtype)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { transformed_data[(((n * channels + c) * height + h) * width) + w] += data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (1 - abs(y - yy)); } } } } } } template <typename Dtype> __global__ void TransformerBackward(const int num, const int channels, const int spatial_dim, const int height, const int width, const Dtype* data, const Dtype* CoordinateSource_data, const Dtype* top_diff, Dtype* data_diff, Dtype* CoordinateSource_diff); template <> __global__ void TransformerBackward<float>(const int num, const int channels, const int spatial_dim, const int height, const int width, const float* data, const float* CoordinateSource_data, const float* top_diff, float* data_diff, float* CoordinateSource_diff) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; float x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (float)height / 2; float y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (float)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { atomicAdd(&data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w] * (1 - abs(x - xx)) * (1 - abs(y - yy))); //printf("(%d,%d,%d,%d)(%f,%f)(%d,%d)(%f,%f)\n", n, c, h, w, x, y, xx, yy, data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w]); if ((xx - x)>float(0)) CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (float)height / 2; else CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (float)height / 2; if ((yy - y) > float(0)) CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (float)width / 2; else CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (float)width / 2; } } } } } } #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif template <> __global__ void TransformerBackward<double>(const int num, const int channels, const int spatial_dim, const int height, const int width, const double* data, const double* CoordinateSource_data, const double* top_diff, double* data_diff, double* CoordinateSource_diff) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; double x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (double)height / 2; double y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (double)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { //atomicAdd do not support double float. Please avoid using Net<double>. atomicAdd(&data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w] * (1 - abs(x - xx)) * (1 - abs(y - yy))); //printf("(%d,%d,%d,%d)(%f,%f)(%d,%d)(%f,%f)\n", n, c, h, w, x, y, xx, yy, data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w]); if ((xx - x)>double(0)) CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (double)height / 2; else CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (double)height / 2; if ((yy - y) > double(0)) CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (double)width / 2; else CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (double)width / 2; } } } } } } template <typename Dtype> void TransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* theta_data = bottom[1]->gpu_data(); const Dtype* CoordinateTarget_data = CoordinateTarget.gpu_data(); Dtype* CoordinateSource_data = CoordinateSource.mutable_gpu_data(); int num = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3); caffe_gpu_set<Dtype>(top[0]->count(), 0, top_data);//why memset cause error? for (int n = 0; n < num; n++) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 2, spatial_dim, 3, Dtype(1), theta_data + n * 6, CoordinateTarget_data, Dtype(0), CoordinateSource_data + n * 2 * spatial_dim); FixCoordinate<Dtype> << <CAFFE_GET_BLOCKS(spatial_dim), CAFFE_CUDA_NUM_THREADS >> >( spatial_dim, CoordinateSource_data + n * 2 * spatial_dim, -1, 1 - 2 / (Dtype)bottom[0]->shape(2));//height = 10, then max = 9/5-1=0.8 FixCoordinate<Dtype> << <CAFFE_GET_BLOCKS(spatial_dim), CAFFE_CUDA_NUM_THREADS >> >( spatial_dim, CoordinateSource_data + n * 2 * spatial_dim + spatial_dim, -1, 1 - 2 / (Dtype)bottom[0]->shape(3)); } TransformerForward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom[0]->shape(2), bottom[0]->shape(3), bottom_data, CoordinateSource_data, top_data); } template <typename Dtype> void TransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* data_diff = bottom[0]->mutable_gpu_diff(); Dtype* theta_diff = bottom[1]->mutable_gpu_diff(); int num = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3); const Dtype* CoordinateTarget_data = CoordinateTarget.gpu_data(); const Dtype* CoordinateSource_data = CoordinateSource.gpu_data(); Dtype* CoordinateSource_diff = CoordinateSource.mutable_gpu_diff(); caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff); caffe_gpu_set<Dtype>(CoordinateSource.count(), 0, CoordinateSource_diff); TransformerBackward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom[0]->shape(2), bottom[0]->shape(3), bottom_data, CoordinateSource_data, top_diff, data_diff, CoordinateSource_diff); for (int n = 0; n < num; n++) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, spatial_dim, Dtype(1), CoordinateSource_diff + n * 2 * spatial_dim, CoordinateTarget_data, Dtype(0), theta_diff + n * 6); } } INSTANTIATE_LAYER_GPU_FUNCS(TransformerLayer); } // namespace caffe
5ed740e3a4fb87e7ded21d2cada7360f81540fe1.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "device_atomic_functions.h" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/custom_layers.hpp" namespace caffe { template <typename Dtype> __global__ void FixCoordinate(const int n, Dtype* in_out, Dtype min_value, Dtype max_value) { CUDA_KERNEL_LOOP(index, n) { in_out[index] = (in_out[index] < min_value && in_out[index] > min_value - 1e-4) ? min_value : in_out[index]; in_out[index] = (in_out[index] > max_value && in_out[index] < (max_value + 1e-4)) ? max_value : in_out[index]; } } template <typename Dtype> __global__ void TransformerForward(const int num, const int channels, const int spatial_dim, const int height, const int width, const Dtype* data, Dtype* CoordinateSource_data, Dtype* transformed_data) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; Dtype x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (Dtype)height / 2; Dtype y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (Dtype)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { transformed_data[(((n * channels + c) * height + h) * width) + w] += data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (1 - abs(y - yy)); } } } } } } template <typename Dtype> __global__ void TransformerBackward(const int num, const int channels, const int spatial_dim, const int height, const int width, const Dtype* data, const Dtype* CoordinateSource_data, const Dtype* top_diff, Dtype* data_diff, Dtype* CoordinateSource_diff); template <> __global__ void TransformerBackward<float>(const int num, const int channels, const int spatial_dim, const int height, const int width, const float* data, const float* CoordinateSource_data, const float* top_diff, float* data_diff, float* CoordinateSource_diff) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; float x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (float)height / 2; float y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (float)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { atomicAdd(&data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w] * (1 - abs(x - xx)) * (1 - abs(y - yy))); //printf("(%d,%d,%d,%d)(%f,%f)(%d,%d)(%f,%f)\n", n, c, h, w, x, y, xx, yy, data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w]); if ((xx - x)>float(0)) CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (float)height / 2; else CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (float)height / 2; if ((yy - y) > float(0)) CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (float)width / 2; else CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (float)width / 2; } } } } } } #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif template <> __global__ void TransformerBackward<double>(const int num, const int channels, const int spatial_dim, const int height, const int width, const double* data, const double* CoordinateSource_data, const double* top_diff, double* data_diff, double* CoordinateSource_diff) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; double x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (double)height / 2; double y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (double)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { //atomicAdd do not support double float. Please avoid using Net<double>. atomicAdd(&data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w] * (1 - abs(x - xx)) * (1 - abs(y - yy))); //printf("(%d,%d,%d,%d)(%f,%f)(%d,%d)(%f,%f)\n", n, c, h, w, x, y, xx, yy, data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w]); if ((xx - x)>double(0)) CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (double)height / 2; else CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (double)height / 2; if ((yy - y) > double(0)) CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (double)width / 2; else CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (double)width / 2; } } } } } } template <typename Dtype> void TransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* theta_data = bottom[1]->gpu_data(); const Dtype* CoordinateTarget_data = CoordinateTarget.gpu_data(); Dtype* CoordinateSource_data = CoordinateSource.mutable_gpu_data(); int num = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3); caffe_gpu_set<Dtype>(top[0]->count(), 0, top_data);//why memset cause error? for (int n = 0; n < num; n++) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 2, spatial_dim, 3, Dtype(1), theta_data + n * 6, CoordinateTarget_data, Dtype(0), CoordinateSource_data + n * 2 * spatial_dim); FixCoordinate<Dtype> << <CAFFE_GET_BLOCKS(spatial_dim), CAFFE_CUDA_NUM_THREADS >> >( spatial_dim, CoordinateSource_data + n * 2 * spatial_dim, -1, 1 - 2 / (Dtype)bottom[0]->shape(2));//height = 10, then max = 9/5-1=0.8 FixCoordinate<Dtype> << <CAFFE_GET_BLOCKS(spatial_dim), CAFFE_CUDA_NUM_THREADS >> >( spatial_dim, CoordinateSource_data + n * 2 * spatial_dim + spatial_dim, -1, 1 - 2 / (Dtype)bottom[0]->shape(3)); } TransformerForward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom[0]->shape(2), bottom[0]->shape(3), bottom_data, CoordinateSource_data, top_data); } template <typename Dtype> void TransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* data_diff = bottom[0]->mutable_gpu_diff(); Dtype* theta_diff = bottom[1]->mutable_gpu_diff(); int num = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3); const Dtype* CoordinateTarget_data = CoordinateTarget.gpu_data(); const Dtype* CoordinateSource_data = CoordinateSource.gpu_data(); Dtype* CoordinateSource_diff = CoordinateSource.mutable_gpu_diff(); caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff); caffe_gpu_set<Dtype>(CoordinateSource.count(), 0, CoordinateSource_diff); TransformerBackward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom[0]->shape(2), bottom[0]->shape(3), bottom_data, CoordinateSource_data, top_diff, data_diff, CoordinateSource_diff); for (int n = 0; n < num; n++) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, spatial_dim, Dtype(1), CoordinateSource_diff + n * 2 * spatial_dim, CoordinateTarget_data, Dtype(0), theta_diff + n * 6); } } INSTANTIATE_LAYER_GPU_FUNCS(TransformerLayer); } // namespace caffe
91c3165c119f4204dcfbe09563a915f6c41039f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <cstdio> static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __device__ int add(int a, int b) { return a + b; } __global__ void kernel(int a, int b, int* c) { int q = add(a,b); *c = q; } int main() { int c; int *dev_c; HANDLE_ERROR (hipMalloc((void**)&dev_c, sizeof(int))); hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, 2, 7, dev_c); HANDLE_ERROR (hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost)); printf ("2 + 7 = %d\n", c); hipFree(dev_c); return 0; }
91c3165c119f4204dcfbe09563a915f6c41039f5.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdio> static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __device__ int add(int a, int b) { return a + b; } __global__ void kernel(int a, int b, int* c) { int q = add(a,b); *c = q; } int main() { int c; int *dev_c; HANDLE_ERROR (cudaMalloc((void**)&dev_c, sizeof(int))); kernel<<<1, 1>>>(2, 7, dev_c); HANDLE_ERROR (cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost)); printf ("2 + 7 = %d\n", c); cudaFree(dev_c); return 0; }
7410894002492e1db1c8d9769b34275e35899242.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define DEFAULT_ROW 16384 #define DEFAULT_COL 16384 // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ float* ia = A, *ib =B, *ic =C; for (int iy =0; iy<ny; iy++){ for (int ix =0; ix<nx; ix++){ ic[ix] = ia[ix] + ib[ix]; //if (iy*nx + ix == 67133440) printf("the addition in host: %.6f + %.6f = %.6f\n",ia[ix],ib[ix],ic[ix]); } ia += nx; ib += nx; ic += nx; } } //host side matrix comparison int h_compareResult(float *h_C, float *d_C, int noElems){ float *host_c = h_C,*device_c = d_C; for (int i =0; i<noElems; i++){ if (*(host_c) != *(device_c)){ #ifdef DEBUG printf("the i = %d\n", i); printf("the data of CPU is %.6f\n", *(host_c)); printf("the data of GPU is %.6f\n", *(device_c)); #endif return 1; } host_c++; device_c++; } return 0; } // device-side matrix addition __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadIdx.x + blockIdx.x*blockDim.x ; int iy = threadIdx.y + blockIdx.y*blockDim.y ; int idx = iy*nx + ix ; if( (ix<nx) && (iy<ny) ) C[idx] = A[idx] + B[idx] ; //if (idx == 0) printf("the addition in device: %.6f + %.6f = %.6f\n",A[idx],B[idx],C[idx]); } void initData(float* add, int noElems){ int i; float a = 5.0; for (i=0; i< noElems; i++){ *(add++) = ((float)rand()/(float)(RAND_MAX)) * a; } } int main(int argc, char* argv[]){ if(argc != 3){ printf("Error: wrong number of argument\n"); exit(0); } int nx = atoi(argv[1]); int ny = atoi(argv[2]); int noElems = nx * ny; int bytes = noElems * sizeof(float); #ifdef DEBUG printf("the input row # is %d\n",nx); printf("the input col # is %d\n",ny); printf("the noElems is %d\n",noElems); printf("the bytes is %d\n",bytes); #endif // padding // alloc memeory host-side float *h_A = (float*) malloc(bytes); float *h_B = (float*) malloc(bytes); float *h_dC = (float*) malloc(bytes); //gpu result float *h_hC = (float*) malloc(bytes); // host result //float *h_dC; //hipHostMalloc(&h_dC, bytes, 0); // init matrices with random data initData(h_A, noElems); initData(h_B, noElems); //alloc memeory device-side float *d_A, *d_B, *d_C; hipMalloc( &d_A, bytes); hipMalloc( &d_B, bytes); hipMalloc( &d_C, bytes); double timeStampA = getTimeStamp() ; //transfer data to dev hipMemcpy( d_A, h_A, bytes, hipMemcpyHostToDevice) ; hipMemcpy( d_B, h_B, bytes, hipMemcpyHostToDevice) ; // note that the transfers would be twice as fast if h_A and h_B // matrices are pinned double timeStampB = getTimeStamp() ; // invoke Kernel int block_x, block_y = 1; if (nx < 1024){ block_x = nx; while ((ny + block_y-1)/block_y > 65535){ block_y ++; } while (block_x * block_y > 1024){ block_x --; } } else{ block_x = 1024; } #ifdef DEBUG printf("the final block size is x = %d and y = %d \n",block_x, block_y); printf("the final grid dimension is x = %d and y = %d \n",(nx + block_x-1)/block_x, (ny + block_y-1)/block_y); #endif dim3 block( block_x, block_y ) ; // you will want to configure this dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y ) ; hipLaunchKernelGGL(( f_addmat), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nx, ny ) ; hipDeviceSynchronize() ; double timeStampC = getTimeStamp() ; //copy data back hipMemcpy( h_dC, d_C, bytes, hipMemcpyDeviceToHost ) ; double timeStampD = getTimeStamp() ; // free GPU resources hipFree( d_A ) ; hipFree( d_B ) ; hipFree( d_C ) ; hipDeviceReset() ; // check result h_addmat( h_A, h_B, h_hC, nx, ny ) ; // h_dC == h+hC??? free(h_A); free(h_B); #ifdef DEBUG float *ptr; ptr = h_dC; int n = 0; ptr = ptr + n; printf("the data of GPU at index %d before comparison is %.6f\n", n,*(ptr)); #endif if (h_compareResult(h_hC,h_dC,noElems) == 1){ printf("the two results don't match\n"); } else{ //printf("totoal= %.6f CPU_GPU_transfer = %.6f kernel =%.6f GPU_CPU_transfer= %.6f\n",timeStampD - timeStampA,timeStampB - timeStampA, timeStampC - timeStampB, timeStampD - timeStampC ); printf("%.6f %.6f %.6f %.6f\n",timeStampD - timeStampA,timeStampB - timeStampA, timeStampC - timeStampB, timeStampD - timeStampC ); //printf("CPU_GPU_transfer_time = %.6f\n",timeStampB - timeStampA ); //printf("kernel_time = %.6f\n",timeStampC - timeStampB ); //printf("GPU_CPU_transfer_time = %.6f\n",timeStampD - timeStampC ); } hipHostFree(h_hC); }
7410894002492e1db1c8d9769b34275e35899242.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define DEFAULT_ROW 16384 #define DEFAULT_COL 16384 // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ float* ia = A, *ib =B, *ic =C; for (int iy =0; iy<ny; iy++){ for (int ix =0; ix<nx; ix++){ ic[ix] = ia[ix] + ib[ix]; //if (iy*nx + ix == 67133440) printf("the addition in host: %.6f + %.6f = %.6f\n",ia[ix],ib[ix],ic[ix]); } ia += nx; ib += nx; ic += nx; } } //host side matrix comparison int h_compareResult(float *h_C, float *d_C, int noElems){ float *host_c = h_C,*device_c = d_C; for (int i =0; i<noElems; i++){ if (*(host_c) != *(device_c)){ #ifdef DEBUG printf("the i = %d\n", i); printf("the data of CPU is %.6f\n", *(host_c)); printf("the data of GPU is %.6f\n", *(device_c)); #endif return 1; } host_c++; device_c++; } return 0; } // device-side matrix addition __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadIdx.x + blockIdx.x*blockDim.x ; int iy = threadIdx.y + blockIdx.y*blockDim.y ; int idx = iy*nx + ix ; if( (ix<nx) && (iy<ny) ) C[idx] = A[idx] + B[idx] ; //if (idx == 0) printf("the addition in device: %.6f + %.6f = %.6f\n",A[idx],B[idx],C[idx]); } void initData(float* add, int noElems){ int i; float a = 5.0; for (i=0; i< noElems; i++){ *(add++) = ((float)rand()/(float)(RAND_MAX)) * a; } } int main(int argc, char* argv[]){ if(argc != 3){ printf("Error: wrong number of argument\n"); exit(0); } int nx = atoi(argv[1]); int ny = atoi(argv[2]); int noElems = nx * ny; int bytes = noElems * sizeof(float); #ifdef DEBUG printf("the input row # is %d\n",nx); printf("the input col # is %d\n",ny); printf("the noElems is %d\n",noElems); printf("the bytes is %d\n",bytes); #endif // padding // alloc memeory host-side float *h_A = (float*) malloc(bytes); float *h_B = (float*) malloc(bytes); float *h_dC = (float*) malloc(bytes); //gpu result float *h_hC = (float*) malloc(bytes); // host result //float *h_dC; //cudaHostAlloc(&h_dC, bytes, 0); // init matrices with random data initData(h_A, noElems); initData(h_B, noElems); //alloc memeory device-side float *d_A, *d_B, *d_C; cudaMalloc( &d_A, bytes); cudaMalloc( &d_B, bytes); cudaMalloc( &d_C, bytes); double timeStampA = getTimeStamp() ; //transfer data to dev cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice) ; cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice) ; // note that the transfers would be twice as fast if h_A and h_B // matrices are pinned double timeStampB = getTimeStamp() ; // invoke Kernel int block_x, block_y = 1; if (nx < 1024){ block_x = nx; while ((ny + block_y-1)/block_y > 65535){ block_y ++; } while (block_x * block_y > 1024){ block_x --; } } else{ block_x = 1024; } #ifdef DEBUG printf("the final block size is x = %d and y = %d \n",block_x, block_y); printf("the final grid dimension is x = %d and y = %d \n",(nx + block_x-1)/block_x, (ny + block_y-1)/block_y); #endif dim3 block( block_x, block_y ) ; // you will want to configure this dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y ) ; f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny ) ; cudaDeviceSynchronize() ; double timeStampC = getTimeStamp() ; //copy data back cudaMemcpy( h_dC, d_C, bytes, cudaMemcpyDeviceToHost ) ; double timeStampD = getTimeStamp() ; // free GPU resources cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ; cudaDeviceReset() ; // check result h_addmat( h_A, h_B, h_hC, nx, ny ) ; // h_dC == h+hC??? free(h_A); free(h_B); #ifdef DEBUG float *ptr; ptr = h_dC; int n = 0; ptr = ptr + n; printf("the data of GPU at index %d before comparison is %.6f\n", n,*(ptr)); #endif if (h_compareResult(h_hC,h_dC,noElems) == 1){ printf("the two results don't match\n"); } else{ //printf("totoal= %.6f CPU_GPU_transfer = %.6f kernel =%.6f GPU_CPU_transfer= %.6f\n",timeStampD - timeStampA,timeStampB - timeStampA, timeStampC - timeStampB, timeStampD - timeStampC ); printf("%.6f %.6f %.6f %.6f\n",timeStampD - timeStampA,timeStampB - timeStampA, timeStampC - timeStampB, timeStampD - timeStampC ); //printf("CPU_GPU_transfer_time = %.6f\n",timeStampB - timeStampA ); //printf("kernel_time = %.6f\n",timeStampC - timeStampB ); //printf("GPU_CPU_transfer_time = %.6f\n",timeStampD - timeStampC ); } cudaFreeHost(h_hC); }
817f9c7c99c0aebdee52ad93156e745a4c7e0a54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * MO644 Projeto Final * * Renan Gomes Pereira 103927 * * Conway's Game of Life * * CUDA shared memory version */ extern "C" { #include "conway_functions.h" } #define TILE_WIDTH 16 // for shared memory #define MASK_WIDTH 3 #define RADIUS (MASK_WIDTH-1)/2 #define SM_LINE_SIZE (TILE_WIDTH+MASK_WIDTH-1) #define SM_SIZE SM_LINE_SIZE*SM_LINE_SIZE extern char *board; extern char *temp; extern int nrows, ncols; // cuda variables char *d_board; char *d_temp; int board_size; // allocates and initialize cuda board and variables void initialize_cuda_board() { board_size = sizeof(char)*nrows*ncols; hipMalloc((void **) &d_board, board_size); hipMalloc((void **) &d_temp, board_size); hipMemcpy(d_board, board, board_size, hipMemcpyHostToDevice); hipMemcpy(d_temp, temp, board_size, hipMemcpyHostToDevice); } __device__ inline int num_neighbours_cuda(char *board) { int num_adj = 0; int i,j; for(i = threadIdx.y; i < MASK_WIDTH+threadIdx.y; i++) { for(j = threadIdx.x; j < MASK_WIDTH+threadIdx.x; j++) { if(board[i*SM_LINE_SIZE + j] == ON) num_adj++; } } return num_adj; } __global__ void copy_temp_to_board(char *board, char *temp, int nrows, int ncols) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; int id = col + row*ncols; if (row < nrows && col < ncols) { board[id] = temp[id]; } } __global__ void update_board_cuda(char *board, char *temp, int nrows, int ncols) { __shared__ char shared_board_part[SM_SIZE]; int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; int id = col + row*ncols; int neighbours, curr_r, curr_c, mapID; // fills the shared memory array for(curr_r=row-RADIUS; curr_r-row+RADIUS+threadIdx.y < MASK_WIDTH-1+TILE_WIDTH; curr_r += TILE_WIDTH) { for(curr_c=col-RADIUS; curr_c-col+RADIUS+threadIdx.x < MASK_WIDTH-1+TILE_WIDTH; curr_c += TILE_WIDTH) { mapID = SM_LINE_SIZE*(curr_r-row+RADIUS + threadIdx.y) + curr_c-col+RADIUS + threadIdx.x; if(curr_c >= 0 && curr_c < ncols && curr_r >= 0 && curr_r < nrows) { shared_board_part[mapID] = board[curr_r*ncols + curr_c]; } // borders else { shared_board_part[mapID] = 0; } } } __syncthreads(); if (row < nrows && col < ncols) { neighbours = num_neighbours_cuda(shared_board_part); // a cell is not a neighbour of itself if(board[id] == ON) neighbours--; /* Dies by underpopulation. */ if (neighbours < 2 && board[id] == ON) { temp[id] = OFF; } /* Dies by overpopulation. */ else if (neighbours > 3 && board[id] == ON) { temp[id] = OFF; } /* Become alive because of reproduction. */ else if (neighbours == 3 && board[id] == OFF) { temp[id] = ON; } /* Otherwise the cell lives with just the right company. */ else { temp[id] = board[id]; } } } void update_board(int n, int nt) { printf("Running CUDA shared!\n"); // switch boards so we dont have to copy temp to board every time int switch_boards = 0; initialize_cuda_board(); dim3 dimGrid(ceil(ncols/(float)TILE_WIDTH), ceil(nrows/(float) TILE_WIDTH)); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); for(int it = 0; it < n; it++) { if(switch_boards) { hipLaunchKernelGGL(( update_board_cuda), dim3(dimGrid),dim3(dimBlock), 0, 0, d_board, d_temp, nrows, ncols); switch_boards = 0; } else { hipLaunchKernelGGL(( update_board_cuda), dim3(dimGrid),dim3(dimBlock), 0, 0, d_temp, d_board, nrows, ncols); switch_boards = 1; } hipDeviceSynchronize(); } // copies the result back to the host if(n%2 != 0) { hipMemcpy(board, d_board, board_size, hipMemcpyDeviceToHost); } else { hipMemcpy(board, d_temp, board_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( copy_temp_to_board), dim3(dimGrid),dim3(dimBlock), 0, 0, d_board, d_temp, nrows, ncols); } hipFree(d_board); hipFree(d_temp); }
817f9c7c99c0aebdee52ad93156e745a4c7e0a54.cu
/* * MO644 Projeto Final * * Renan Gomes Pereira 103927 * * Conway's Game of Life * * CUDA shared memory version */ extern "C" { #include "conway_functions.h" } #define TILE_WIDTH 16 // for shared memory #define MASK_WIDTH 3 #define RADIUS (MASK_WIDTH-1)/2 #define SM_LINE_SIZE (TILE_WIDTH+MASK_WIDTH-1) #define SM_SIZE SM_LINE_SIZE*SM_LINE_SIZE extern char *board; extern char *temp; extern int nrows, ncols; // cuda variables char *d_board; char *d_temp; int board_size; // allocates and initialize cuda board and variables void initialize_cuda_board() { board_size = sizeof(char)*nrows*ncols; cudaMalloc((void **) &d_board, board_size); cudaMalloc((void **) &d_temp, board_size); cudaMemcpy(d_board, board, board_size, cudaMemcpyHostToDevice); cudaMemcpy(d_temp, temp, board_size, cudaMemcpyHostToDevice); } __device__ inline int num_neighbours_cuda(char *board) { int num_adj = 0; int i,j; for(i = threadIdx.y; i < MASK_WIDTH+threadIdx.y; i++) { for(j = threadIdx.x; j < MASK_WIDTH+threadIdx.x; j++) { if(board[i*SM_LINE_SIZE + j] == ON) num_adj++; } } return num_adj; } __global__ void copy_temp_to_board(char *board, char *temp, int nrows, int ncols) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; int id = col + row*ncols; if (row < nrows && col < ncols) { board[id] = temp[id]; } } __global__ void update_board_cuda(char *board, char *temp, int nrows, int ncols) { __shared__ char shared_board_part[SM_SIZE]; int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; int id = col + row*ncols; int neighbours, curr_r, curr_c, mapID; // fills the shared memory array for(curr_r=row-RADIUS; curr_r-row+RADIUS+threadIdx.y < MASK_WIDTH-1+TILE_WIDTH; curr_r += TILE_WIDTH) { for(curr_c=col-RADIUS; curr_c-col+RADIUS+threadIdx.x < MASK_WIDTH-1+TILE_WIDTH; curr_c += TILE_WIDTH) { mapID = SM_LINE_SIZE*(curr_r-row+RADIUS + threadIdx.y) + curr_c-col+RADIUS + threadIdx.x; if(curr_c >= 0 && curr_c < ncols && curr_r >= 0 && curr_r < nrows) { shared_board_part[mapID] = board[curr_r*ncols + curr_c]; } // borders else { shared_board_part[mapID] = 0; } } } __syncthreads(); if (row < nrows && col < ncols) { neighbours = num_neighbours_cuda(shared_board_part); // a cell is not a neighbour of itself if(board[id] == ON) neighbours--; /* Dies by underpopulation. */ if (neighbours < 2 && board[id] == ON) { temp[id] = OFF; } /* Dies by overpopulation. */ else if (neighbours > 3 && board[id] == ON) { temp[id] = OFF; } /* Become alive because of reproduction. */ else if (neighbours == 3 && board[id] == OFF) { temp[id] = ON; } /* Otherwise the cell lives with just the right company. */ else { temp[id] = board[id]; } } } void update_board(int n, int nt) { printf("Running CUDA shared!\n"); // switch boards so we dont have to copy temp to board every time int switch_boards = 0; initialize_cuda_board(); dim3 dimGrid(ceil(ncols/(float)TILE_WIDTH), ceil(nrows/(float) TILE_WIDTH)); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); for(int it = 0; it < n; it++) { if(switch_boards) { update_board_cuda<<<dimGrid,dimBlock>>>(d_board, d_temp, nrows, ncols); switch_boards = 0; } else { update_board_cuda<<<dimGrid,dimBlock>>>(d_temp, d_board, nrows, ncols); switch_boards = 1; } cudaThreadSynchronize(); } // copies the result back to the host if(n%2 != 0) { cudaMemcpy(board, d_board, board_size, cudaMemcpyDeviceToHost); } else { cudaMemcpy(board, d_temp, board_size, cudaMemcpyDeviceToHost); copy_temp_to_board<<<dimGrid,dimBlock>>>(d_board, d_temp, nrows, ncols); } cudaFree(d_board); cudaFree(d_temp); }
b663fb8defe6bf26e7b98763ef65d79ed95b7428.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #if __DEVICE_EMULATION__ #define DEBUG_SYNC __syncthreads(); #else #define DEBUG_SYNC #endif #if (__CUDA_ARCH__ < 200) #define int_mult(x,y) __mul24(x,y) #else #define int_mult(x,y) x*y #endif #define inf 0x7f800000 const int blockSize1 = 4096/2; /*const int blockSize2 = 8192; const int blockSize3 = 16384; const int blockSize4 = 32768; const int blockSize5 = 65536;*/ const int threads = 64; __device__ void warp_reduce_max(volatile float smem[64]) { smem[threadIdx.x] = smem[threadIdx.x+32] > smem[threadIdx.x] ? smem[threadIdx.x+32] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+16] > smem[threadIdx.x] ? smem[threadIdx.x+16] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+8] > smem[threadIdx.x] ? smem[threadIdx.x+8] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+4] > smem[threadIdx.x] ? smem[threadIdx.x+4] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+2] > smem[threadIdx.x] ? smem[threadIdx.x+2] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+1] > smem[threadIdx.x] ? smem[threadIdx.x+1] : smem[threadIdx.x]; } __device__ void warp_reduce_min(volatile float smem[64]) { smem[threadIdx.x] = smem[threadIdx.x+32] < smem[threadIdx.x] ? smem[threadIdx.x+32] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+16] < smem[threadIdx.x] ? smem[threadIdx.x+16] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+8] < smem[threadIdx.x] ? smem[threadIdx.x+8] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+4] < smem[threadIdx.x] ? smem[threadIdx.x+4] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+2] < smem[threadIdx.x] ? smem[threadIdx.x+2] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+1] < smem[threadIdx.x] ? smem[threadIdx.x+1] : smem[threadIdx.x]; } template<int threads> __global__ void find_min_max_dynamic(float* in, float* out, int n, int start_adr, int num_blocks) { __shared__ float smem_min[64]; __shared__ float smem_max[64]; int tid = threadIdx.x + start_adr; float max = -inf; float min = inf; float val; // tail part int mult = 0; for(int i = 1; mult + tid < n; i++) { val = in[tid + mult]; min = val < min ? val : min; max = val > max ? val : max; mult = int_mult(i,threads); } // previously reduced MIN part mult = 0; int i; for(i = 1; mult+threadIdx.x < num_blocks; i++) { val = out[threadIdx.x + mult]; min = val < min ? val : min; mult = int_mult(i,threads); } // MAX part for(; mult+threadIdx.x < num_blocks*2; i++) { val = out[threadIdx.x + mult]; max = val > max ? val : max; mult = int_mult(i,threads); } if(threads == 32) { smem_min[threadIdx.x+32] = 0.0f; smem_max[threadIdx.x+32] = 0.0f; } smem_min[threadIdx.x] = min; smem_max[threadIdx.x] = max; __syncthreads(); if(threadIdx.x < 32) { warp_reduce_min(smem_min); warp_reduce_max(smem_max); } if(threadIdx.x == 0) { out[blockIdx.x] = smem_min[threadIdx.x]; // out[0] == ans out[blockIdx.x + gridDim.x] = smem_max[threadIdx.x]; } } template<int els_per_block, int threads> __global__ void find_min_max(float* in, float* out) { __shared__ float smem_min[64]; __shared__ float smem_max[64]; int tid = threadIdx.x + blockIdx.x*els_per_block; float max = -inf; float min = inf; float val; const int iters = els_per_block/threads; #pragma unroll for(int i = 0; i < iters; i++) { val = in[tid + i*threads]; min = val < min ? val : min; max = val > max ? val : max; } if(threads == 32) { smem_min[threadIdx.x+32] = 0.0f; smem_max[threadIdx.x+32] = 0.0f; } smem_min[threadIdx.x] = min; smem_max[threadIdx.x] = max; __syncthreads(); if(threadIdx.x < 32) { warp_reduce_min(smem_min); warp_reduce_max(smem_max); } if(threadIdx.x == 0) { out[blockIdx.x] = smem_min[threadIdx.x]; // out[0] == ans out[blockIdx.x + gridDim.x] = smem_max[threadIdx.x]; } } float cpu_min(float* in, int num_els) { float min = inf; for(int i = 0; i < num_els; i++) min = in[i] < min ? in[i] : min; return min; } float cpu_max(float* in, int num_els) { float max = -inf; for(int i = 0; i < num_els; i++) max = in[i] > max ? in[i] : max; return max; } void findBlockSize(int* whichSize, int* num_el) { const float pretty_big_number = 24.0f*1024.0f*1024.0f; float ratio = float((*num_el))/pretty_big_number; if(ratio > 0.8f) (*whichSize) = 5; else if(ratio > 0.6f) (*whichSize) = 4; else if(ratio > 0.4f) (*whichSize) = 3; else if(ratio > 0.2f) (*whichSize) = 2; else (*whichSize) = 1; } void compute_reduction(float* d_in, float* d_out, int num_els) { int whichSize = -1; findBlockSize(&whichSize,&num_els); //whichSize = 5; int block_size = powf(2,whichSize-1)*blockSize1; int num_blocks = num_els/block_size; int tail = num_els - num_blocks*block_size; int start_adr = num_els - tail; if(whichSize == 1) hipLaunchKernelGGL(( find_min_max<blockSize1,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); else if(whichSize == 2) hipLaunchKernelGGL(( find_min_max<blockSize1*2,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); else if(whichSize == 3) hipLaunchKernelGGL(( find_min_max<blockSize1*4,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); else if(whichSize == 4) hipLaunchKernelGGL(( find_min_max<blockSize1*8,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); else hipLaunchKernelGGL(( find_min_max<blockSize1*16,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); hipLaunchKernelGGL(( find_min_max_dynamic<threads>), dim3(1), dim3(threads), 0, 0, d_in, d_out, num_els, start_adr, num_blocks); } void my_min_max_test(int num_els) { // timers unsigned long long int start; unsigned long long int delta; int testIterations = 100; int size = num_els*sizeof(float); float* d_in; float* d_out; float* d_warm1; float* d_warm2; float* in = (float*)malloc(size); float* out = (float*)malloc(size); for(int i = 0; i < num_els; i++) { in[i] = rand()&1; } in[1024] = 34.0f; in[333] = 55.0f; in[23523] = -42.0f; hipMalloc((void**)&d_in, size); hipMalloc((void**)&d_out, size); hipMalloc((void**)&d_warm1, 1024*sizeof(float)); hipMalloc((void**)&d_warm2, 1024*sizeof(float)); hipMemcpy(d_in, in, size, hipMemcpyHostToDevice); ////////// /// warmup ////////// hipLaunchKernelGGL(( find_min_max<32,threads>), dim3(32), dim3(32), 0, 0, d_warm1, d_warm2); hipDeviceSynchronize(); ///// // end warmup ///// //time it ////////////// // real reduce ///////////// for(int i = 0; i < testIterations; i++) compute_reduction(d_in, d_out, num_els); hipDeviceSynchronize(); float dt = float(delta)/float(testIterations); hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); // need not be SIZE! (just 2 elements) float throughput = num_els*sizeof(float)*0.001f/(dt); int tail = num_els - (num_els/blockSize1)*blockSize1; printf(" %7.0d \t %0.2f \t\t %0.2f % \t %0.1f \t\t %s \n", num_els, throughput, (throughput/70.6f)*100.0f,dt, (cpu_min(in,num_els) == out[0] && cpu_max(in,num_els) == out[1]) ? "Pass" : "Fail"); //printf("\n min: %0.3f \n", out[0]); //printf("\n max: %0.3f \n", out[1]); hipFree(d_in); hipFree(d_out); hipFree(d_warm1); hipFree(d_warm2); free(in); free(out); //system("pause"); } int main(int argc, char* argv[]) { printf(" GTS250 @ 70.6 GB/s - Finding min and max"); printf("\n N \t\t [GB/s] \t [perc] \t [usec] \t test \n"); #pragma unroll for(int i = 1024*1024; i <= 32*1024*1024; i=i*2) { my_min_max_test(i); } printf("\n Non-base 2 tests! \n"); printf("\n N \t\t [GB/s] \t [perc] \t [usec] \t test \n"); // just some large numbers.... my_min_max_test(14*1024*1024+38); my_min_max_test(14*1024*1024+55); my_min_max_test(18*1024*1024+1232); my_min_max_test(7*1024*1024+94854); for(int i = 0; i < 4; i++) { float ratio = float(rand())/float(RAND_MAX); ratio = ratio >= 0 ? ratio : -ratio; int big_num = ratio*18*1e6; my_min_max_test(big_num); } return 0; }
b663fb8defe6bf26e7b98763ef65d79ed95b7428.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #if __DEVICE_EMULATION__ #define DEBUG_SYNC __syncthreads(); #else #define DEBUG_SYNC #endif #if (__CUDA_ARCH__ < 200) #define int_mult(x,y) __mul24(x,y) #else #define int_mult(x,y) x*y #endif #define inf 0x7f800000 const int blockSize1 = 4096/2; /*const int blockSize2 = 8192; const int blockSize3 = 16384; const int blockSize4 = 32768; const int blockSize5 = 65536;*/ const int threads = 64; __device__ void warp_reduce_max(volatile float smem[64]) { smem[threadIdx.x] = smem[threadIdx.x+32] > smem[threadIdx.x] ? smem[threadIdx.x+32] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+16] > smem[threadIdx.x] ? smem[threadIdx.x+16] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+8] > smem[threadIdx.x] ? smem[threadIdx.x+8] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+4] > smem[threadIdx.x] ? smem[threadIdx.x+4] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+2] > smem[threadIdx.x] ? smem[threadIdx.x+2] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+1] > smem[threadIdx.x] ? smem[threadIdx.x+1] : smem[threadIdx.x]; } __device__ void warp_reduce_min(volatile float smem[64]) { smem[threadIdx.x] = smem[threadIdx.x+32] < smem[threadIdx.x] ? smem[threadIdx.x+32] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+16] < smem[threadIdx.x] ? smem[threadIdx.x+16] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+8] < smem[threadIdx.x] ? smem[threadIdx.x+8] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+4] < smem[threadIdx.x] ? smem[threadIdx.x+4] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+2] < smem[threadIdx.x] ? smem[threadIdx.x+2] : smem[threadIdx.x]; smem[threadIdx.x] = smem[threadIdx.x+1] < smem[threadIdx.x] ? smem[threadIdx.x+1] : smem[threadIdx.x]; } template<int threads> __global__ void find_min_max_dynamic(float* in, float* out, int n, int start_adr, int num_blocks) { __shared__ float smem_min[64]; __shared__ float smem_max[64]; int tid = threadIdx.x + start_adr; float max = -inf; float min = inf; float val; // tail part int mult = 0; for(int i = 1; mult + tid < n; i++) { val = in[tid + mult]; min = val < min ? val : min; max = val > max ? val : max; mult = int_mult(i,threads); } // previously reduced MIN part mult = 0; int i; for(i = 1; mult+threadIdx.x < num_blocks; i++) { val = out[threadIdx.x + mult]; min = val < min ? val : min; mult = int_mult(i,threads); } // MAX part for(; mult+threadIdx.x < num_blocks*2; i++) { val = out[threadIdx.x + mult]; max = val > max ? val : max; mult = int_mult(i,threads); } if(threads == 32) { smem_min[threadIdx.x+32] = 0.0f; smem_max[threadIdx.x+32] = 0.0f; } smem_min[threadIdx.x] = min; smem_max[threadIdx.x] = max; __syncthreads(); if(threadIdx.x < 32) { warp_reduce_min(smem_min); warp_reduce_max(smem_max); } if(threadIdx.x == 0) { out[blockIdx.x] = smem_min[threadIdx.x]; // out[0] == ans out[blockIdx.x + gridDim.x] = smem_max[threadIdx.x]; } } template<int els_per_block, int threads> __global__ void find_min_max(float* in, float* out) { __shared__ float smem_min[64]; __shared__ float smem_max[64]; int tid = threadIdx.x + blockIdx.x*els_per_block; float max = -inf; float min = inf; float val; const int iters = els_per_block/threads; #pragma unroll for(int i = 0; i < iters; i++) { val = in[tid + i*threads]; min = val < min ? val : min; max = val > max ? val : max; } if(threads == 32) { smem_min[threadIdx.x+32] = 0.0f; smem_max[threadIdx.x+32] = 0.0f; } smem_min[threadIdx.x] = min; smem_max[threadIdx.x] = max; __syncthreads(); if(threadIdx.x < 32) { warp_reduce_min(smem_min); warp_reduce_max(smem_max); } if(threadIdx.x == 0) { out[blockIdx.x] = smem_min[threadIdx.x]; // out[0] == ans out[blockIdx.x + gridDim.x] = smem_max[threadIdx.x]; } } float cpu_min(float* in, int num_els) { float min = inf; for(int i = 0; i < num_els; i++) min = in[i] < min ? in[i] : min; return min; } float cpu_max(float* in, int num_els) { float max = -inf; for(int i = 0; i < num_els; i++) max = in[i] > max ? in[i] : max; return max; } void findBlockSize(int* whichSize, int* num_el) { const float pretty_big_number = 24.0f*1024.0f*1024.0f; float ratio = float((*num_el))/pretty_big_number; if(ratio > 0.8f) (*whichSize) = 5; else if(ratio > 0.6f) (*whichSize) = 4; else if(ratio > 0.4f) (*whichSize) = 3; else if(ratio > 0.2f) (*whichSize) = 2; else (*whichSize) = 1; } void compute_reduction(float* d_in, float* d_out, int num_els) { int whichSize = -1; findBlockSize(&whichSize,&num_els); //whichSize = 5; int block_size = powf(2,whichSize-1)*blockSize1; int num_blocks = num_els/block_size; int tail = num_els - num_blocks*block_size; int start_adr = num_els - tail; if(whichSize == 1) find_min_max<blockSize1,threads><<< num_blocks, threads>>>(d_in, d_out); else if(whichSize == 2) find_min_max<blockSize1*2,threads><<< num_blocks, threads>>>(d_in, d_out); else if(whichSize == 3) find_min_max<blockSize1*4,threads><<< num_blocks, threads>>>(d_in, d_out); else if(whichSize == 4) find_min_max<blockSize1*8,threads><<< num_blocks, threads>>>(d_in, d_out); else find_min_max<blockSize1*16,threads><<< num_blocks, threads>>>(d_in, d_out); find_min_max_dynamic<threads><<< 1, threads>>>(d_in, d_out, num_els, start_adr, num_blocks); } void my_min_max_test(int num_els) { // timers unsigned long long int start; unsigned long long int delta; int testIterations = 100; int size = num_els*sizeof(float); float* d_in; float* d_out; float* d_warm1; float* d_warm2; float* in = (float*)malloc(size); float* out = (float*)malloc(size); for(int i = 0; i < num_els; i++) { in[i] = rand()&1; } in[1024] = 34.0f; in[333] = 55.0f; in[23523] = -42.0f; cudaMalloc((void**)&d_in, size); cudaMalloc((void**)&d_out, size); cudaMalloc((void**)&d_warm1, 1024*sizeof(float)); cudaMalloc((void**)&d_warm2, 1024*sizeof(float)); cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); ////////// /// warmup ////////// find_min_max<32,threads><<< 32, 32>>>(d_warm1, d_warm2); cudaThreadSynchronize(); ///// // end warmup ///// //time it ////////////// // real reduce ///////////// for(int i = 0; i < testIterations; i++) compute_reduction(d_in, d_out, num_els); cudaThreadSynchronize(); float dt = float(delta)/float(testIterations); cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); // need not be SIZE! (just 2 elements) float throughput = num_els*sizeof(float)*0.001f/(dt); int tail = num_els - (num_els/blockSize1)*blockSize1; printf(" %7.0d \t %0.2f \t\t %0.2f % \t %0.1f \t\t %s \n", num_els, throughput, (throughput/70.6f)*100.0f,dt, (cpu_min(in,num_els) == out[0] && cpu_max(in,num_els) == out[1]) ? "Pass" : "Fail"); //printf("\n min: %0.3f \n", out[0]); //printf("\n max: %0.3f \n", out[1]); cudaFree(d_in); cudaFree(d_out); cudaFree(d_warm1); cudaFree(d_warm2); free(in); free(out); //system("pause"); } int main(int argc, char* argv[]) { printf(" GTS250 @ 70.6 GB/s - Finding min and max"); printf("\n N \t\t [GB/s] \t [perc] \t [usec] \t test \n"); #pragma unroll for(int i = 1024*1024; i <= 32*1024*1024; i=i*2) { my_min_max_test(i); } printf("\n Non-base 2 tests! \n"); printf("\n N \t\t [GB/s] \t [perc] \t [usec] \t test \n"); // just some large numbers.... my_min_max_test(14*1024*1024+38); my_min_max_test(14*1024*1024+55); my_min_max_test(18*1024*1024+1232); my_min_max_test(7*1024*1024+94854); for(int i = 0; i < 4; i++) { float ratio = float(rand())/float(RAND_MAX); ratio = ratio >= 0 ? ratio : -ratio; int big_num = ratio*18*1e6; my_min_max_test(big_num); } return 0; }
9e78364da40aaa6a08cfcfec9fb084f7e30eb91e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* The implementation of this file is based on skipLayerNorm plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "layer_norm.cuh" #include "skip_layer_norm_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { template <typename T, unsigned TPB> __global__ void SkipLayerNormKernelSmall( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, T* output) { const T reverse_ld = T(1) / T(ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 hipcub::KeyValuePair<T, T> thread_data(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < ld) { val = input[idx] + skip[idx]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val)); } LayerNormSmall<T, TPB>(val, thread_data, ld, idx, beta, gamma, output); } template <typename T, unsigned TPB> __global__ void SkipLayerNormKernel( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, T* output) { const T reverse_ld = T(1) / T(ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 hipcub::KeyValuePair<T, T> thread_data(0, 0); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = input[idx] + skip[idx]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val)); output[idx] = val; } LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, output); } template <typename T> bool ComputeSkipLayerNorm( hipStream_t stream, const int ld, const int n, const T* input, const T* skip, const T* beta, const T* gamma, T* output) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int grid_size = n / ld; if (ld <= 32) { constexpr int block_size = 32; hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size>) , dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, output); } else if (ld <= 128) { constexpr int block_size = 128; hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size>) , dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, output); } else if (ld == 384) { constexpr int block_size = 384; hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size>) , dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, output); } else { constexpr int block_size = 256; hipLaunchKernelGGL(( SkipLayerNormKernel<T, block_size>), dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, output); } return CUDA_CALL(hipPeekAtLastError()); } bool LaunchSkipLayerNormKernel( void* output, const void* input, const void* skip, const void* gamma, const void* beta, const int batch_size, const int hidden_size, const int element_count, const size_t element_size) { // use default stream const hipStream_t stream = nullptr; if (element_size == 2) { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const half*>(input), reinterpret_cast<const half*>(skip), reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma), reinterpret_cast<half*>(output)); } else { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const float*>(input), reinterpret_cast<const float*>(skip), reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma), reinterpret_cast<float*>(output)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
9e78364da40aaa6a08cfcfec9fb084f7e30eb91e.cu
/* The implementation of this file is based on skipLayerNorm plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "layer_norm.cuh" #include "skip_layer_norm_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { template <typename T, unsigned TPB> __global__ void SkipLayerNormKernelSmall( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, T* output) { const T reverse_ld = T(1) / T(ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 cub::KeyValuePair<T, T> thread_data(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < ld) { val = input[idx] + skip[idx]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val)); } LayerNormSmall<T, TPB>(val, thread_data, ld, idx, beta, gamma, output); } template <typename T, unsigned TPB> __global__ void SkipLayerNormKernel( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, T* output) { const T reverse_ld = T(1) / T(ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 cub::KeyValuePair<T, T> thread_data(0, 0); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = input[idx] + skip[idx]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val)); output[idx] = val; } LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, output); } template <typename T> bool ComputeSkipLayerNorm( cudaStream_t stream, const int ld, const int n, const T* input, const T* skip, const T* beta, const T* gamma, T* output) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int grid_size = n / ld; if (ld <= 32) { constexpr int block_size = 32; SkipLayerNormKernelSmall<T, block_size> <<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, output); } else if (ld <= 128) { constexpr int block_size = 128; SkipLayerNormKernelSmall<T, block_size> <<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, output); } else if (ld == 384) { constexpr int block_size = 384; SkipLayerNormKernelSmall<T, block_size> <<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, output); } else { constexpr int block_size = 256; SkipLayerNormKernel<T, block_size><<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, output); } return CUDA_CALL(cudaPeekAtLastError()); } bool LaunchSkipLayerNormKernel( void* output, const void* input, const void* skip, const void* gamma, const void* beta, const int batch_size, const int hidden_size, const int element_count, const size_t element_size) { // use default stream const cudaStream_t stream = nullptr; if (element_size == 2) { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const half*>(input), reinterpret_cast<const half*>(skip), reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma), reinterpret_cast<half*>(output)); } else { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const float*>(input), reinterpret_cast<const float*>(skip), reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma), reinterpret_cast<float*>(output)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
a387b6c5380a117067ce8b4cef62fb00c45ef3ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*********************************************************************//** * @file * * @section LICENCE * * Mathematica source file * * Copyright 1986 through 2010 by Wolfram Research Inc. * * @section DESCRIPTION * * * * $Id: clamp.cu,v 1.8 2010/09/29 08:40:55 abduld Exp $ ************************************************************************/ #include <wgl.h> #include <wgl_cuda_runtime.h> #ifdef CONFIG_USE_DOUBLE_PRECISION #define Real_t double #define WGL_Real_t WGL_Type_Double #define CUDA_Runtime_getDeviceMemoryAsReal CUDA_Runtime_getDeviceMemoryAsDouble #else #define Real_t float #define WGL_Real_t WGL_Type_Float #define CUDA_Runtime_getDeviceMemoryAsReal CUDA_Runtime_getDeviceMemoryAsFloat #endif #define wglState (wglData->state) #define wglErr (wglData->getError(wglData)) #define WGL_SuccessQ (wglErr->code == WGL_Success) #define WGL_FailQ (!WGL_SuccessQ) #define WGL_Type_RealQ(mem) ((mem)->type == WGL_Real_t) #define WGL_SAFE_CALL(stmt, jmp) stmt; if (WGL_FailQ) { goto jmp; } extern WolframGPULibraryData wglData; static int iCUDAColorNegate(WGL_Memory_t input, WGL_Memory_t output); template <typename T> __device__ T negate(const T & a) { return 255 - a; } template <> __device__ float negate<float>(const float & a) { return 1.0f - a; } #ifdef CONFIG_USE_DOUBLE_PRECISION template <> __device__ double negate(const double & a) { return 1.0 - a; } #endif /* CONFIG_USE_DOUBLE_PRECISION */ template <typename T> __global__ void colorNegate(T * in, T * out, int len) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < len) { out[index] = negate(in[index]); } } static int iCUDAColorNegate(WGL_Memory_t input, WGL_Memory_t output) { int err = LIBRARY_FUNCTION_ERROR; dim3 blockDim(256); dim3 gridDim(Ceil(input->flattenedLength, blockDim.x)); if (input->type != output->type) { return LIBRARY_TYPE_ERROR; } else if (input->flattenedLength != output->flattenedLength) { return LIBRARY_DIMENSION_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, input, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, output, wglErr), cleanup); switch (input->type) { case WGL_Type_Char: hipLaunchKernelGGL(( colorNegate<char>), dim3(gridDim), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsChar(input), CUDA_Runtime_getDeviceMemoryAsChar(output), input->flattenedLength ); break ; case WGL_Type_UnsignedChar: hipLaunchKernelGGL(( colorNegate<unsigned char>), dim3(gridDim), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsUnsignedChar(input), CUDA_Runtime_getDeviceMemoryAsUnsignedChar(output), input->flattenedLength ); break ; case WGL_Type_Short: hipLaunchKernelGGL(( colorNegate<short>), dim3(gridDim), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsShort(input), CUDA_Runtime_getDeviceMemoryAsShort(output), input->flattenedLength ); break ; case WGL_Type_Integer: hipLaunchKernelGGL(( colorNegate<int>), dim3(gridDim), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsInteger(input), CUDA_Runtime_getDeviceMemoryAsInteger(output), input->flattenedLength ); break ; case WGL_Type_Long: hipLaunchKernelGGL(( colorNegate<int64_t>), dim3(gridDim), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsLong(input), CUDA_Runtime_getDeviceMemoryAsLong(output), input->flattenedLength ); break ; case WGL_Type_Float: hipLaunchKernelGGL(( colorNegate<float>), dim3(gridDim), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsFloat(input), CUDA_Runtime_getDeviceMemoryAsFloat(output), input->flattenedLength ); break ; #ifdef CONFIG_USE_DOUBLE_PRECISION case WGL_Type_Double: hipLaunchKernelGGL(( colorNegate<double>), dim3(gridDim), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsDouble(input), CUDA_Runtime_getDeviceMemoryAsDouble(output), input->flattenedLength ); break ; #endif /* CONFIG_USE_DOUBLE_PRECISION */ default: err = LIBRARY_TYPE_ERROR; } CUDA_Runtime_synchronize(wglErr); if (WGL_SuccessQ) { err = LIBRARY_NO_ERROR; } cleanup: if (WGL_SuccessQ && err == LIBRARY_NO_ERROR) { CUDA_Runtime_setMemoryAsValidOutput(wglState, output, wglErr); } else { CUDA_Runtime_setMemoryAsInvalidOutput(wglState, output, wglErr); } CUDA_Runtime_unsetMemoryAsInput(wglState, input, wglErr); return err; } EXTERN_C DLLEXPORT int oCUDAColorNegate(WolframLibraryData libData, mint Argc, MArgument * Args, MArgument Res) { WGL_Memory_t input, output; mint inputId, outputId; int err = LIBRARY_FUNCTION_ERROR; inputId = MArgument_getInteger(Args[0]); outputId = MArgument_getInteger(Args[1]); WGL_SAFE_CALL(wglData->setWolframLibraryData(wglData, libData), cleanup); WGL_SAFE_CALL(input = wglData->findMemory(wglData, inputId), cleanup); WGL_SAFE_CALL(output = wglData->findMemory(wglData, outputId), cleanup); err = iCUDAColorNegate(input, output); cleanup: if (err == LIBRARY_NO_ERROR && WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else if (err != LIBRARY_NO_ERROR) { return err; } else { return LIBRARY_FUNCTION_ERROR; } }
a387b6c5380a117067ce8b4cef62fb00c45ef3ad.cu
/*********************************************************************//** * @file * * @section LICENCE * * Mathematica source file * * Copyright 1986 through 2010 by Wolfram Research Inc. * * @section DESCRIPTION * * * * $Id: clamp.cu,v 1.8 2010/09/29 08:40:55 abduld Exp $ ************************************************************************/ #include <wgl.h> #include <wgl_cuda_runtime.h> #ifdef CONFIG_USE_DOUBLE_PRECISION #define Real_t double #define WGL_Real_t WGL_Type_Double #define CUDA_Runtime_getDeviceMemoryAsReal CUDA_Runtime_getDeviceMemoryAsDouble #else #define Real_t float #define WGL_Real_t WGL_Type_Float #define CUDA_Runtime_getDeviceMemoryAsReal CUDA_Runtime_getDeviceMemoryAsFloat #endif #define wglState (wglData->state) #define wglErr (wglData->getError(wglData)) #define WGL_SuccessQ (wglErr->code == WGL_Success) #define WGL_FailQ (!WGL_SuccessQ) #define WGL_Type_RealQ(mem) ((mem)->type == WGL_Real_t) #define WGL_SAFE_CALL(stmt, jmp) stmt; if (WGL_FailQ) { goto jmp; } extern WolframGPULibraryData wglData; static int iCUDAColorNegate(WGL_Memory_t input, WGL_Memory_t output); template <typename T> __device__ T negate(const T & a) { return 255 - a; } template <> __device__ float negate<float>(const float & a) { return 1.0f - a; } #ifdef CONFIG_USE_DOUBLE_PRECISION template <> __device__ double negate(const double & a) { return 1.0 - a; } #endif /* CONFIG_USE_DOUBLE_PRECISION */ template <typename T> __global__ void colorNegate(T * in, T * out, int len) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < len) { out[index] = negate(in[index]); } } static int iCUDAColorNegate(WGL_Memory_t input, WGL_Memory_t output) { int err = LIBRARY_FUNCTION_ERROR; dim3 blockDim(256); dim3 gridDim(Ceil(input->flattenedLength, blockDim.x)); if (input->type != output->type) { return LIBRARY_TYPE_ERROR; } else if (input->flattenedLength != output->flattenedLength) { return LIBRARY_DIMENSION_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, input, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, output, wglErr), cleanup); switch (input->type) { case WGL_Type_Char: colorNegate<char><<<gridDim, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsChar(input), CUDA_Runtime_getDeviceMemoryAsChar(output), input->flattenedLength ); break ; case WGL_Type_UnsignedChar: colorNegate<unsigned char><<<gridDim, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsUnsignedChar(input), CUDA_Runtime_getDeviceMemoryAsUnsignedChar(output), input->flattenedLength ); break ; case WGL_Type_Short: colorNegate<short><<<gridDim, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsShort(input), CUDA_Runtime_getDeviceMemoryAsShort(output), input->flattenedLength ); break ; case WGL_Type_Integer: colorNegate<int><<<gridDim, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsInteger(input), CUDA_Runtime_getDeviceMemoryAsInteger(output), input->flattenedLength ); break ; case WGL_Type_Long: colorNegate<int64_t><<<gridDim, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsLong(input), CUDA_Runtime_getDeviceMemoryAsLong(output), input->flattenedLength ); break ; case WGL_Type_Float: colorNegate<float><<<gridDim, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsFloat(input), CUDA_Runtime_getDeviceMemoryAsFloat(output), input->flattenedLength ); break ; #ifdef CONFIG_USE_DOUBLE_PRECISION case WGL_Type_Double: colorNegate<double><<<gridDim, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsDouble(input), CUDA_Runtime_getDeviceMemoryAsDouble(output), input->flattenedLength ); break ; #endif /* CONFIG_USE_DOUBLE_PRECISION */ default: err = LIBRARY_TYPE_ERROR; } CUDA_Runtime_synchronize(wglErr); if (WGL_SuccessQ) { err = LIBRARY_NO_ERROR; } cleanup: if (WGL_SuccessQ && err == LIBRARY_NO_ERROR) { CUDA_Runtime_setMemoryAsValidOutput(wglState, output, wglErr); } else { CUDA_Runtime_setMemoryAsInvalidOutput(wglState, output, wglErr); } CUDA_Runtime_unsetMemoryAsInput(wglState, input, wglErr); return err; } EXTERN_C DLLEXPORT int oCUDAColorNegate(WolframLibraryData libData, mint Argc, MArgument * Args, MArgument Res) { WGL_Memory_t input, output; mint inputId, outputId; int err = LIBRARY_FUNCTION_ERROR; inputId = MArgument_getInteger(Args[0]); outputId = MArgument_getInteger(Args[1]); WGL_SAFE_CALL(wglData->setWolframLibraryData(wglData, libData), cleanup); WGL_SAFE_CALL(input = wglData->findMemory(wglData, inputId), cleanup); WGL_SAFE_CALL(output = wglData->findMemory(wglData, outputId), cleanup); err = iCUDAColorNegate(input, output); cleanup: if (err == LIBRARY_NO_ERROR && WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else if (err != LIBRARY_NO_ERROR) { return err; } else { return LIBRARY_FUNCTION_ERROR; } }
cf06d501e1fbd10aa29b49705441c4744a20ecd7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #define N 10 __host__ __device__ void fun(int *arr) { for (unsigned ii = 0; ii < N; ++ii) ++arr[ii]; } __global__ void dfun(int *arr) { fun(arr); } __global__ void dprint(int *arr, int x); __host__ __device__ void print(int *arr, int x) { for (unsigned ii = 0; ii < N; ++ii) printf("%d, ", arr[ii]); printf("\n"); hipLaunchKernelGGL(( dprint), dim3(1), dim3(5), 0, 0, arr, ++x); } __global__ void dprint(int *arr, int x = 0) { if (x < 1) print(arr, x); } int main() { int arr[N], *darr; hipMalloc(&darr, N * sizeof(int)); for (unsigned ii = 0; ii < N; ++ii) arr[ii] = ii; hipMemcpy(darr, arr, N * sizeof(int), hipMemcpyHostToDevice); fun(arr); hipLaunchKernelGGL(( dfun), dim3(1), dim3(1), 0, 0, darr); hipDeviceSynchronize(); print(arr, -1); hipLaunchKernelGGL(( dprint), dim3(1), dim3(1), 0, 0, darr); hipDeviceSynchronize(); return 0; }
cf06d501e1fbd10aa29b49705441c4744a20ecd7.cu
#include <stdio.h> #include <cuda.h> #define N 10 __host__ __device__ void fun(int *arr) { for (unsigned ii = 0; ii < N; ++ii) ++arr[ii]; } __global__ void dfun(int *arr) { fun(arr); } __global__ void dprint(int *arr, int x); __host__ __device__ void print(int *arr, int x) { for (unsigned ii = 0; ii < N; ++ii) printf("%d, ", arr[ii]); printf("\n"); dprint<<<1, 5>>>(arr, ++x); } __global__ void dprint(int *arr, int x = 0) { if (x < 1) print(arr, x); } int main() { int arr[N], *darr; cudaMalloc(&darr, N * sizeof(int)); for (unsigned ii = 0; ii < N; ++ii) arr[ii] = ii; cudaMemcpy(darr, arr, N * sizeof(int), cudaMemcpyHostToDevice); fun(arr); dfun<<<1, 1>>>(darr); cudaDeviceSynchronize(); print(arr, -1); dprint<<<1, 1>>>(darr); cudaDeviceSynchronize(); return 0; }
my_util.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include "mylibenv.h" __global__ void _transpose_OD(float *odata, float *idata, int width, int height) { __shared__ float block[CUDA_BLOCK_SIZE2][CUDA_BLOCK_SIZE2+1]; // read the matrix tile into shared memory // load one element per thread from device memory (idata) and store it // in transposed order in block[][] unsigned int xIndex = blockIdx.x * CUDA_BLOCK_SIZE2 + threadIdx.x; unsigned int yIndex = blockIdx.y * CUDA_BLOCK_SIZE2 + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } // synchronise to ensure all writes to block[][] have completed __syncthreads(); // write the transposed matrix tile to global memory (odata) in linear order xIndex = blockIdx.y * CUDA_BLOCK_SIZE2 + threadIdx.x; yIndex = blockIdx.x * CUDA_BLOCK_SIZE2 + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } // Transform float data to cucomplex form // PARAMETERS: // real: tell the src data is real or complex, if it's // real, fill the image part with 0 __global__ void _floatToCuComplex_OD(float *src, hipfftComplex *dst, int sz) { int index = threadIdx.x + blockIdx.x * blockDim.x; for(int i = index ;i < index + 128 && i < sz; i++){ dst[i].x = src[i]; dst[i].y = 0; } } __global__ void _dotMul_OD(float *data1, float *data2, float *res, int sz) { __shared__ float s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz) s_res[threadIdx.x] = data1[index] * data2[index]; __syncthreads(); if(index < sz) res[index] = s_res[threadIdx.x]; } __global__ void _mulSpectrums_OD(hipfftComplex *data1, hipfftComplex *data2, hipfftComplex *res, int sz) { __shared__ hipfftComplex s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ s_res[threadIdx.x].x = data1[index].x * data2[index].x + data1[index].y * data2[index].y; s_res[threadIdx.x].y = -1 * data1[index].x * data2[index].y + data1[index].y * data2[index].x; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _dotMulComplex_OD(hipfftComplex *data1, hipfftComplex *data2, hipfftComplex *res, int sz) { __shared__ hipfftComplex s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ s_res[threadIdx.x].x = data1[index].x * data2[index].x - data1[index].y * data2[index].y; s_res[threadIdx.x].y = data1[index].x * data2[index].y + data1[index].y * data2[index].x; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _dotDivComplex_OD(hipfftComplex *data1, hipfftComplex *data2, hipfftComplex *res, int sz) { __shared__ hipfftComplex s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; float divisor = 1.f / (data2[index].x * data2[index].x + data2[index].y * data2[index].y); if(index < sz){ s_res[threadIdx.x].x = (data1[index].x * data2[index].x + data1[index].y * data2[index].y) * divisor; s_res[threadIdx.x].y = (-1 * data1[index].x * data2[index].y + data1[index].y * data2[index].x) * divisor; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _axpb_OD(float *data, float a, float b, float *res, int sz) { __shared__ float s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ s_res[threadIdx.x] = a * data[index] + b; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _axpb_complex_OD(hipfftComplex *data, hipfftComplex a, hipfftComplex b, hipfftComplex *res, int sz) { __shared__ hipfftComplex s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ s_res[threadIdx.x].x = a.x * data[index].x - a.y * data[index].y + b.x; s_res[threadIdx.x].y = a.x * data[index].y + a.y * data[index].x + b.y; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _computeRes_OD(float s_x1, float s_x2, float sigma, float *c, float *res, int sz, int divisor) { __shared__ float s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ float tmp = (s_x1 + s_x2 - 2 * c[index]) / divisor; if(tmp > 0) s_res[threadIdx.x] = exp(-1 * tmp / (sigma * sigma)); else s_res[threadIdx.x] = 1; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } }
my_util.cu
#include <cuda.h> #include <cuda_runtime.h> #include <cufft.h> #include "mylibenv.h" __global__ void _transpose_OD(float *odata, float *idata, int width, int height) { __shared__ float block[CUDA_BLOCK_SIZE2][CUDA_BLOCK_SIZE2+1]; // read the matrix tile into shared memory // load one element per thread from device memory (idata) and store it // in transposed order in block[][] unsigned int xIndex = blockIdx.x * CUDA_BLOCK_SIZE2 + threadIdx.x; unsigned int yIndex = blockIdx.y * CUDA_BLOCK_SIZE2 + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } // synchronise to ensure all writes to block[][] have completed __syncthreads(); // write the transposed matrix tile to global memory (odata) in linear order xIndex = blockIdx.y * CUDA_BLOCK_SIZE2 + threadIdx.x; yIndex = blockIdx.x * CUDA_BLOCK_SIZE2 + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } // Transform float data to cucomplex form // PARAMETERS: // real: tell the src data is real or complex, if it's // real, fill the image part with 0 __global__ void _floatToCuComplex_OD(float *src, cufftComplex *dst, int sz) { int index = threadIdx.x + blockIdx.x * blockDim.x; for(int i = index ;i < index + 128 && i < sz; i++){ dst[i].x = src[i]; dst[i].y = 0; } } __global__ void _dotMul_OD(float *data1, float *data2, float *res, int sz) { __shared__ float s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz) s_res[threadIdx.x] = data1[index] * data2[index]; __syncthreads(); if(index < sz) res[index] = s_res[threadIdx.x]; } __global__ void _mulSpectrums_OD(cufftComplex *data1, cufftComplex *data2, cufftComplex *res, int sz) { __shared__ cufftComplex s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ s_res[threadIdx.x].x = data1[index].x * data2[index].x + data1[index].y * data2[index].y; s_res[threadIdx.x].y = -1 * data1[index].x * data2[index].y + data1[index].y * data2[index].x; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _dotMulComplex_OD(cufftComplex *data1, cufftComplex *data2, cufftComplex *res, int sz) { __shared__ cufftComplex s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ s_res[threadIdx.x].x = data1[index].x * data2[index].x - data1[index].y * data2[index].y; s_res[threadIdx.x].y = data1[index].x * data2[index].y + data1[index].y * data2[index].x; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _dotDivComplex_OD(cufftComplex *data1, cufftComplex *data2, cufftComplex *res, int sz) { __shared__ cufftComplex s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; float divisor = 1.f / (data2[index].x * data2[index].x + data2[index].y * data2[index].y); if(index < sz){ s_res[threadIdx.x].x = (data1[index].x * data2[index].x + data1[index].y * data2[index].y) * divisor; s_res[threadIdx.x].y = (-1 * data1[index].x * data2[index].y + data1[index].y * data2[index].x) * divisor; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _axpb_OD(float *data, float a, float b, float *res, int sz) { __shared__ float s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ s_res[threadIdx.x] = a * data[index] + b; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _axpb_complex_OD(cufftComplex *data, cufftComplex a, cufftComplex b, cufftComplex *res, int sz) { __shared__ cufftComplex s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ s_res[threadIdx.x].x = a.x * data[index].x - a.y * data[index].y + b.x; s_res[threadIdx.x].y = a.x * data[index].y + a.y * data[index].x + b.y; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } } __global__ void _computeRes_OD(float s_x1, float s_x2, float sigma, float *c, float *res, int sz, int divisor) { __shared__ float s_res[CUDA_BLOCK_SIZE]; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < sz){ float tmp = (s_x1 + s_x2 - 2 * c[index]) / divisor; if(tmp > 0) s_res[threadIdx.x] = exp(-1 * tmp / (sigma * sigma)); else s_res[threadIdx.x] = 1; } __syncthreads(); if(index < sz){ res[index] = s_res[threadIdx.x]; } }
4bcf1e9ee4e3eeb1b951ee7f267e8e7c17f7ee0e.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/bboxUtils.h" #include "hipcub/hipcub.hpp" #include "hip/hip_runtime_api.h" #include "efficientNMSInference.cuh" #include "efficientNMSInference.h" #define NMS_TILES 5 using namespace nvinfer1; using namespace nvinfer1::plugin; template <typename T> __device__ float IOU(EfficientNMSParameters param, BoxCorner<T> box1, BoxCorner<T> box2) { // Regardless of the selected box coding, IOU is always performed in BoxCorner coding. // The boxes are copied so that they can be reordered without affecting the originals. BoxCorner<T> b1 = box1; BoxCorner<T> b2 = box2; b1.reorder(); b2.reorder(); float intersectArea = BoxCorner<T>::intersect(b1, b2).area(); if (intersectArea <= 0.f) { return 0.f; } float unionArea = b1.area() + b2.area() - intersectArea; if (unionArea <= 0.f) { return 0.f; } return intersectArea / unionArea; } template <typename T, typename Tb> __device__ BoxCorner<T> DecodeBoxes(EfficientNMSParameters param, int boxIdx, int anchorIdx, const Tb* __restrict__ boxesInput, const Tb* __restrict__ anchorsInput) { // The inputs will be in the selected coding format, as well as the decoding function. But the decoded box // will always be returned as BoxCorner. Tb box = boxesInput[boxIdx]; if (!param.boxDecoder) { return BoxCorner<T>(box); } Tb anchor = anchorsInput[anchorIdx]; box.reorder(); anchor.reorder(); return BoxCorner<T>(box.decode(anchor)); } template <typename T, typename Tb> __device__ void MapNMSData(EfficientNMSParameters param, int idx, int imageIdx, const Tb* __restrict__ boxesInput, const Tb* __restrict__ anchorsInput, const int* __restrict__ topClassData, const int* __restrict__ topAnchorsData, const int* __restrict__ topNumData, const T* __restrict__ sortedScoresData, const int* __restrict__ sortedIndexData, T& scoreMap, int& classMap, BoxCorner<T>& boxMap, int& boxIdxMap) { // idx: Holds the NMS box index, within the current batch. // idxSort: Holds the batched NMS box index, which indexes the (filtered, but sorted) score buffer. // scoreMap: Holds the score that corresponds to the indexed box being processed by NMS. if (idx >= topNumData[imageIdx]) { return; } int idxSort = imageIdx * param.numScoreElements + idx; scoreMap = sortedScoresData[idxSort]; // idxMap: Holds the re-mapped index, which indexes the (filtered, but unsorted) buffers. // classMap: Holds the class that corresponds to the idx'th sorted score being processed by NMS. // anchorMap: Holds the anchor that corresponds to the idx'th sorted score being processed by NMS. int idxMap = imageIdx * param.numScoreElements + sortedIndexData[idxSort]; classMap = topClassData[idxMap]; int anchorMap = topAnchorsData[idxMap]; // boxIdxMap: Holds the re-re-mapped index, which indexes the (unfiltered, and unsorted) boxes input buffer. boxIdxMap = -1; if (param.shareLocation) // Shape of boxesInput: [batchSize, numAnchors, 1, 4] { boxIdxMap = imageIdx * param.numAnchors + anchorMap; } else // Shape of boxesInput: [batchSize, numAnchors, numClasses, 4] { int batchOffset = imageIdx * param.numAnchors * param.numClasses; int anchorOffset = anchorMap * param.numClasses; boxIdxMap = batchOffset + anchorOffset + classMap; } // anchorIdxMap: Holds the re-re-mapped index, which indexes the (unfiltered, and unsorted) anchors input buffer. int anchorIdxMap = -1; if (param.shareAnchors) // Shape of anchorsInput: [1, numAnchors, 4] { anchorIdxMap = anchorMap; } else // Shape of anchorsInput: [batchSize, numAnchors, 4] { anchorIdxMap = imageIdx * param.numAnchors + anchorMap; } // boxMap: Holds the box that corresponds to the idx'th sorted score being processed by NMS. boxMap = DecodeBoxes<T, Tb>(param, boxIdxMap, anchorIdxMap, boxesInput, anchorsInput); } template <typename T> __device__ void WriteNMSResult(EfficientNMSParameters param, int* __restrict__ numDetectionsOutput, T* __restrict__ nmsScoresOutput, int* __restrict__ nmsClassesOutput, BoxCorner<T>* __restrict__ nmsBoxesOutput, T threadScore, int threadClass, BoxCorner<T> threadBox, int imageIdx, unsigned int resultsCounter) { int outputIdx = imageIdx * param.numOutputBoxes + resultsCounter - 1; if (param.scoreSigmoid) { nmsScoresOutput[outputIdx] = sigmoid_mp(threadScore); } else if (param.scoreBits > 0) { nmsScoresOutput[outputIdx] = add_mp(threadScore, (T) -1); } else { nmsScoresOutput[outputIdx] = threadScore; } nmsClassesOutput[outputIdx] = threadClass; if (param.clipBoxes) { nmsBoxesOutput[outputIdx] = threadBox.clip((T) 0, (T) 1); } else { nmsBoxesOutput[outputIdx] = threadBox; } numDetectionsOutput[imageIdx] = resultsCounter; } __device__ void WriteONNXResult(EfficientNMSParameters param, int* outputIndexData, int* __restrict__ nmsIndicesOutput, int imageIdx, int threadClass, int boxIdxMap) { int index = boxIdxMap % param.numAnchors; int idx = atomicAdd((unsigned int*) &outputIndexData[0], 1); nmsIndicesOutput[idx * 3 + 0] = imageIdx; nmsIndicesOutput[idx * 3 + 1] = threadClass; nmsIndicesOutput[idx * 3 + 2] = index; } __global__ void PadONNXResult(EfficientNMSParameters param, int* outputIndexData, int* __restrict__ nmsIndicesOutput) { if (threadIdx.x > 0) { return; } int pidx = outputIndexData[0] - 1; if (pidx < 0) { return; } for (int idx = pidx + 1; idx < param.batchSize * param.numOutputBoxes; idx++) { nmsIndicesOutput[idx * 3 + 0] = nmsIndicesOutput[pidx * 3 + 0]; nmsIndicesOutput[idx * 3 + 1] = nmsIndicesOutput[pidx * 3 + 1]; nmsIndicesOutput[idx * 3 + 2] = nmsIndicesOutput[pidx * 3 + 2]; } } template <typename T, typename Tb> __global__ void EfficientNMS(EfficientNMSParameters param, const int* topNumData, int* outputIndexData, int* outputClassData, const int* sortedIndexData, const T* __restrict__ sortedScoresData, const int* __restrict__ topClassData, const int* __restrict__ topAnchorsData, const Tb* __restrict__ boxesInput, const Tb* __restrict__ anchorsInput, int* __restrict__ numDetectionsOutput, T* __restrict__ nmsScoresOutput, int* __restrict__ nmsClassesOutput, int* __restrict__ nmsIndicesOutput, BoxCorner<T>* __restrict__ nmsBoxesOutput) { unsigned int thread = threadIdx.x; unsigned int imageIdx = blockIdx.y; unsigned int tileSize = blockDim.x; if (imageIdx >= param.batchSize) { return; } int numSelectedBoxes = min(topNumData[imageIdx], param.numSelectedBoxes); int numTiles = (numSelectedBoxes + tileSize - 1) / tileSize; if (thread >= numSelectedBoxes) { return; } __shared__ int blockState; __shared__ unsigned int resultsCounter; if (thread == 0) { blockState = 0; resultsCounter = 0; } int threadState[NMS_TILES]; unsigned int boxIdx[NMS_TILES]; T threadScore[NMS_TILES]; int threadClass[NMS_TILES]; BoxCorner<T> threadBox[NMS_TILES]; int boxIdxMap[NMS_TILES]; for (int tile = 0; tile < numTiles; tile++) { threadState[tile] = 0; boxIdx[tile] = thread + tile * blockDim.x; MapNMSData<T, Tb>(param, boxIdx[tile], imageIdx, boxesInput, anchorsInput, topClassData, topAnchorsData, topNumData, sortedScoresData, sortedIndexData, threadScore[tile], threadClass[tile], threadBox[tile], boxIdxMap[tile]); } // Iterate through all boxes to NMS against. for (int i = 0; i < numSelectedBoxes; i++) { int tile = i / tileSize; if (boxIdx[tile] == i) { // Iteration lead thread, figure out what the other threads should do, // this will be signaled via the blockState shared variable. if (threadState[tile] == -1) { // Thread already dead, this box was already dropped in a previous iteration, // because it had a large IOU overlap with another lead thread previously, so // it would never be kept anyway, therefore it can safely be skip all IOU operations // in this iteration. blockState = -1; // -1 => Signal all threads to skip iteration } else if (threadState[tile] == 0) { // As this box will be kept, this is a good place to find what index in the results buffer it // should have, as this allows to perform an early loop exit if there are enough results. if (resultsCounter >= param.numOutputBoxes) { blockState = -2; // -2 => Signal all threads to do an early loop exit. } else { // Thread is still alive, because it has not had a large enough IOU overlap with // any other kept box previously. Therefore, this box will be kept for sure. However, // we need to check against all other subsequent boxes from this position onward, // to see how those other boxes will behave in future iterations. blockState = 1; // +1 => Signal all (higher index) threads to calculate IOU against this box threadState[tile] = 1; // +1 => Mark this box's thread to be kept and written out to results // If the numOutputBoxesPerClass check is enabled, write the result only if the limit for this // class on this image has not been reached yet. Other than (possibly) skipping the write, this // won't affect anything else in the NMS threading. bool write = true; if (param.numOutputBoxesPerClass >= 0) { int classCounterIdx = imageIdx * param.numClasses + threadClass[tile]; write = (outputClassData[classCounterIdx] < param.numOutputBoxesPerClass); outputClassData[classCounterIdx]++; } if (write) { // This branch is visited by one thread per iteration, so it's safe to do non-atomic increments. resultsCounter++; if (param.outputONNXIndices) { WriteONNXResult( param, outputIndexData, nmsIndicesOutput, imageIdx, threadClass[tile], boxIdxMap[tile]); } else { WriteNMSResult<T>(param, numDetectionsOutput, nmsScoresOutput, nmsClassesOutput, nmsBoxesOutput, threadScore[tile], threadClass[tile], threadBox[tile], imageIdx, resultsCounter); } } } } else { // This state should never be reached, but just in case... blockState = 0; // 0 => Signal all threads to not do any updates, nothing happens. } } __syncthreads(); if (blockState == -2) { // This is the signal to exit from the loop. return; } if (blockState == -1) { // This is the signal for all threads to just skip this iteration, as no IOU's need to be checked. continue; } // Grab a box and class to test the current box against. The test box corresponds to iteration i, // therefore it will have a lower index than the current thread box, and will therefore have a higher score // than the current box because it's located "before" in the sorted score list. T testScore; int testClass; BoxCorner<T> testBox; int testBoxIdxMap; MapNMSData<T, Tb>(param, i, imageIdx, boxesInput, anchorsInput, topClassData, topAnchorsData, topNumData, sortedScoresData, sortedIndexData, testScore, testClass, testBox, testBoxIdxMap); for (int tile = 0; tile < numTiles; tile++) { bool ignoreClass = true; if (!param.classAgnostic) { ignoreClass = threadClass[tile] == testClass; } // IOU if (boxIdx[tile] > i && // Make sure two different boxes are being tested, and that it's a higher index; boxIdx[tile] < numSelectedBoxes && // Make sure the box is within numSelectedBoxes; blockState == 1 && // Signal that allows IOU checks to be performed; threadState[tile] == 0 && // Make sure this box hasn't been either dropped or kept already; ignoreClass && // Compare only boxes of matching classes when classAgnostic is false; lte_mp(threadScore[tile], testScore) && // Make sure the sorting order of scores is as expected; IOU<T>(param, threadBox[tile], testBox) >= param.iouThreshold) // And... IOU overlap. { // Current box overlaps with the box tested in this iteration, this box will be skipped. threadState[tile] = -1; // -1 => Mark this box's thread to be dropped. } } } } template <typename T> hipError_t EfficientNMSLauncher(EfficientNMSParameters& param, int* topNumData, int* outputIndexData, int* outputClassData, int* sortedIndexData, T* sortedScoresData, int* topClassData, int* topAnchorsData, const void* boxesInput, const void* anchorsInput, int* numDetectionsOutput, T* nmsScoresOutput, int* nmsClassesOutput, int* nmsIndicesOutput, void* nmsBoxesOutput, hipStream_t stream) { unsigned int tileSize = param.numSelectedBoxes / NMS_TILES; if (param.numSelectedBoxes <= 512) { tileSize = 512; } if (param.numSelectedBoxes <= 256) { tileSize = 256; } const dim3 blockSize = {tileSize, 1, 1}; const dim3 gridSize = {1, (unsigned int) param.batchSize, 1}; if (param.boxCoding == 0) { hipLaunchKernelGGL(( EfficientNMS<T, BoxCorner<T>>), dim3(gridSize), dim3(blockSize), 0, stream, param, topNumData, outputIndexData, outputClassData, sortedIndexData, sortedScoresData, topClassData, topAnchorsData, (BoxCorner<T>*) boxesInput, (BoxCorner<T>*) anchorsInput, numDetectionsOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, (BoxCorner<T>*) nmsBoxesOutput); } else if (param.boxCoding == 1) { // Note that nmsBoxesOutput is always coded as BoxCorner<T>, regardless of the input coding type. hipLaunchKernelGGL(( EfficientNMS<T, BoxCenterSize<T>>), dim3(gridSize), dim3(blockSize), 0, stream, param, topNumData, outputIndexData, outputClassData, sortedIndexData, sortedScoresData, topClassData, topAnchorsData, (BoxCenterSize<T>*) boxesInput, (BoxCenterSize<T>*) anchorsInput, numDetectionsOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, (BoxCorner<T>*) nmsBoxesOutput); } if (param.outputONNXIndices) { hipLaunchKernelGGL(( PadONNXResult), dim3(1), dim3(1), 0, stream, param, outputIndexData, nmsIndicesOutput); } return hipGetLastError(); } __global__ void EfficientNMSFilterSegments(EfficientNMSParameters param, const int* __restrict__ topNumData, int* __restrict__ topOffsetsStartData, int* __restrict__ topOffsetsEndData) { int imageIdx = threadIdx.x; if (imageIdx > param.batchSize) { return; } topOffsetsStartData[imageIdx] = imageIdx * param.numScoreElements; topOffsetsEndData[imageIdx] = imageIdx * param.numScoreElements + topNumData[imageIdx]; } template <typename T> __global__ void EfficientNMSFilter(EfficientNMSParameters param, const T* __restrict__ scoresInput, int* __restrict__ topNumData, int* __restrict__ topIndexData, int* __restrict__ topAnchorsData, T* __restrict__ topScoresData, int* __restrict__ topClassData) { int elementIdx = blockDim.x * blockIdx.x + threadIdx.x; int imageIdx = blockDim.y * blockIdx.y + threadIdx.y; // Boundary Conditions if (elementIdx >= param.numScoreElements || imageIdx >= param.batchSize) { return; } // Shape of scoresInput: [batchSize, numAnchors, numClasses] int scoresInputIdx = imageIdx * param.numScoreElements + elementIdx; // For each class, check its corresponding score if it crosses the threshold, and if so select this anchor, // and keep track of the maximum score and the corresponding (argmax) class id T score = scoresInput[scoresInputIdx]; if (gte_mp(score, (T) param.scoreThreshold)) { // Unpack the class and anchor index from the element index int classIdx = elementIdx % param.numClasses; int anchorIdx = elementIdx / param.numClasses; // If this is a background class, ignore it. if (classIdx == param.backgroundClass) { return; } // Use an atomic to find an open slot where to write the selected anchor data. if (topNumData[imageIdx] >= param.numScoreElements) { return; } int selectedIdx = atomicAdd((unsigned int*) &topNumData[imageIdx], 1); if (selectedIdx >= param.numScoreElements) { topNumData[imageIdx] = param.numScoreElements; return; } // Shape of topScoresData / topClassData: [batchSize, numScoreElements] int topIdx = imageIdx * param.numScoreElements + selectedIdx; if (param.scoreBits > 0) { score = add_mp(score, (T) 1); if (gt_mp(score, (T) (2.f - 1.f / 1024.f))) { // Ensure the incremented score fits in the mantissa without changing the exponent score = (2.f - 1.f / 1024.f); } } topIndexData[topIdx] = selectedIdx; topAnchorsData[topIdx] = anchorIdx; topScoresData[topIdx] = score; topClassData[topIdx] = classIdx; } } template <typename T> __global__ void EfficientNMSDenseIndex(EfficientNMSParameters param, int* __restrict__ topNumData, int* __restrict__ topIndexData, int* __restrict__ topAnchorsData, int* __restrict__ topOffsetsStartData, int* __restrict__ topOffsetsEndData, T* __restrict__ topScoresData, int* __restrict__ topClassData) { int elementIdx = blockDim.x * blockIdx.x + threadIdx.x; int imageIdx = blockDim.y * blockIdx.y + threadIdx.y; if (elementIdx >= param.numScoreElements || imageIdx >= param.batchSize) { return; } int dataIdx = imageIdx * param.numScoreElements + elementIdx; int anchorIdx = elementIdx / param.numClasses; int classIdx = elementIdx % param.numClasses; if (param.scoreBits > 0) { T score = topScoresData[dataIdx]; if (lt_mp(score, (T) param.scoreThreshold)) { score = (T) 1; } else if (classIdx == param.backgroundClass) { score = (T) 1; } else { score = add_mp(score, (T) 1); if (gt_mp(score, (T) (2.f - 1.f / 1024.f))) { // Ensure the incremented score fits in the mantissa without changing the exponent score = (2.f - 1.f / 1024.f); } } topScoresData[dataIdx] = score; } else { T score = topScoresData[dataIdx]; if (lt_mp(score, (T) param.scoreThreshold)) { topScoresData[dataIdx] = -(1 << 15); } else if (classIdx == param.backgroundClass) { topScoresData[dataIdx] = -(1 << 15); } } topIndexData[dataIdx] = elementIdx; topAnchorsData[dataIdx] = anchorIdx; topClassData[dataIdx] = classIdx; if (elementIdx == 0) { // Saturate counters topNumData[imageIdx] = param.numScoreElements; topOffsetsStartData[imageIdx] = imageIdx * param.numScoreElements; topOffsetsEndData[imageIdx] = (imageIdx + 1) * param.numScoreElements; } } template <typename T> hipError_t EfficientNMSFilterLauncher(EfficientNMSParameters& param, const T* scoresInput, int* topNumData, int* topIndexData, int* topAnchorsData, int* topOffsetsStartData, int* topOffsetsEndData, T* topScoresData, int* topClassData, hipStream_t stream) { const unsigned int elementsPerBlock = 512; const unsigned int imagesPerBlock = 1; const unsigned int elementBlocks = (param.numScoreElements + elementsPerBlock - 1) / elementsPerBlock; const unsigned int imageBlocks = (param.batchSize + imagesPerBlock - 1) / imagesPerBlock; const dim3 blockSize = {elementsPerBlock, imagesPerBlock, 1}; const dim3 gridSize = {elementBlocks, imageBlocks, 1}; float kernelSelectThreshold = 0.007f; if (param.scoreSigmoid) { // Inverse Sigmoid if (param.scoreThreshold <= 0.f) { param.scoreThreshold = -(1 << 15); } else { param.scoreThreshold = logf(param.scoreThreshold / (1.f - param.scoreThreshold)); } kernelSelectThreshold = logf(kernelSelectThreshold / (1.f - kernelSelectThreshold)); // Disable Score Bits Optimization param.scoreBits = -1; } if (param.scoreThreshold < kernelSelectThreshold) { // A full copy of the buffer is necessary because sorting will scramble the input data otherwise. PLUGIN_CHECK_CUDA(hipMemcpyAsync(topScoresData, scoresInput, param.batchSize * param.numScoreElements * sizeof(T), hipMemcpyDeviceToDevice, stream)); hipLaunchKernelGGL(( EfficientNMSDenseIndex<T>), dim3(gridSize), dim3(blockSize), 0, stream, param, topNumData, topIndexData, topAnchorsData, topOffsetsStartData, topOffsetsEndData, topScoresData, topClassData); } else { hipLaunchKernelGGL(( EfficientNMSFilter<T>), dim3(gridSize), dim3(blockSize), 0, stream, param, scoresInput, topNumData, topIndexData, topAnchorsData, topScoresData, topClassData); hipLaunchKernelGGL(( EfficientNMSFilterSegments), dim3(1), dim3(param.batchSize), 0, stream, param, topNumData, topOffsetsStartData, topOffsetsEndData); } return hipGetLastError(); } template <typename T> size_t EfficientNMSSortWorkspaceSize(int batchSize, int numScoreElements) { size_t sortedWorkspaceSize = 0; cub::DoubleBuffer<T> keysDB(nullptr, nullptr); cub::DoubleBuffer<int> valuesDB(nullptr, nullptr); hipcub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, sortedWorkspaceSize, keysDB, valuesDB, numScoreElements, batchSize, (const int*) nullptr, (const int*) nullptr); return sortedWorkspaceSize; } size_t EfficientNMSWorkspaceSize(int batchSize, int numScoreElements, int numClasses, DataType datatype) { size_t total = 0; const size_t align = 256; // Counters // 3 for Filtering // 1 for Output Indexing // C for Max per Class Limiting size_t size = (3 + 1 + numClasses) * batchSize * sizeof(int); total += size + (size % align ? align - (size % align) : 0); // Int Buffers for (int i = 0; i < 4; i++) { size = batchSize * numScoreElements * sizeof(int); total += size + (size % align ? align - (size % align) : 0); } // Float Buffers for (int i = 0; i < 2; i++) { size = batchSize * numScoreElements * dataTypeSize(datatype); total += size + (size % align ? align - (size % align) : 0); } // Sort Workspace if (datatype == DataType::kHALF) { size = EfficientNMSSortWorkspaceSize<__half>(batchSize, numScoreElements); total += size + (size % align ? align - (size % align) : 0); } else if (datatype == DataType::kFLOAT) { size = EfficientNMSSortWorkspaceSize<float>(batchSize, numScoreElements); total += size + (size % align ? align - (size % align) : 0); } return total; } template <typename T> T* EfficientNMSWorkspace(void* workspace, size_t& offset, size_t elements) { T* buffer = (T*) ((size_t) workspace + offset); size_t align = 256; size_t size = elements * sizeof(T); size_t sizeAligned = size + (size % align ? align - (size % align) : 0); offset += sizeAligned; return buffer; } template <typename T> pluginStatus_t EfficientNMSDispatch(EfficientNMSParameters param, const void* boxesInput, const void* scoresInput, const void* anchorsInput, void* numDetectionsOutput, void* nmsBoxesOutput, void* nmsScoresOutput, void* nmsClassesOutput, void* nmsIndicesOutput, void* workspace, hipStream_t stream) { // Clear Outputs (not all elements will get overwritten by the kernels, so safer to clear everything out) if (param.outputONNXIndices) { CSC(hipMemsetAsync(nmsIndicesOutput, 0xFF, param.batchSize * param.numOutputBoxes * 3 * sizeof(int), stream), STATUS_FAILURE); } else { CSC(hipMemsetAsync(numDetectionsOutput, 0x00, param.batchSize * sizeof(int), stream), STATUS_FAILURE); CSC(hipMemsetAsync(nmsScoresOutput, 0x00, param.batchSize * param.numOutputBoxes * sizeof(T), stream), STATUS_FAILURE); CSC(hipMemsetAsync(nmsBoxesOutput, 0x00, param.batchSize * param.numOutputBoxes * 4 * sizeof(T), stream), STATUS_FAILURE); CSC(hipMemsetAsync(nmsClassesOutput, 0x00, param.batchSize * param.numOutputBoxes * sizeof(int), stream), STATUS_FAILURE); } // Empty Inputs if (param.numScoreElements < 1) { return STATUS_SUCCESS; } // Counters Workspace size_t workspaceOffset = 0; int countersTotalSize = (3 + 1 + param.numClasses) * param.batchSize; int* topNumData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, countersTotalSize); int* topOffsetsStartData = topNumData + param.batchSize; int* topOffsetsEndData = topNumData + 2 * param.batchSize; int* outputIndexData = topNumData + 3 * param.batchSize; int* outputClassData = topNumData + 4 * param.batchSize; CSC(hipMemsetAsync(topNumData, 0x00, countersTotalSize * sizeof(int), stream), STATUS_FAILURE); hipError_t status = hipGetLastError(); CSC(status, STATUS_FAILURE); // Other Buffers Workspace int* topIndexData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); int* topClassData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); int* topAnchorsData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); int* sortedIndexData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); T* topScoresData = EfficientNMSWorkspace<T>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); T* sortedScoresData = EfficientNMSWorkspace<T>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); size_t sortedWorkspaceSize = EfficientNMSSortWorkspaceSize<T>(param.batchSize, param.numScoreElements); char* sortedWorkspaceData = EfficientNMSWorkspace<char>(workspace, workspaceOffset, sortedWorkspaceSize); cub::DoubleBuffer<T> scoresDB(topScoresData, sortedScoresData); cub::DoubleBuffer<int> indexDB(topIndexData, sortedIndexData); // Kernels status = EfficientNMSFilterLauncher<T>(param, (T*) scoresInput, topNumData, topIndexData, topAnchorsData, topOffsetsStartData, topOffsetsEndData, topScoresData, topClassData, stream); CSC(status, STATUS_FAILURE); status = hipcub::DeviceSegmentedRadixSort::SortPairsDescending(sortedWorkspaceData, sortedWorkspaceSize, scoresDB, indexDB, param.batchSize * param.numScoreElements, param.batchSize, topOffsetsStartData, topOffsetsEndData, param.scoreBits > 0 ? (10 - param.scoreBits) : 0, param.scoreBits > 0 ? 10 : sizeof(T) * 8, stream); CSC(status, STATUS_FAILURE); status = EfficientNMSLauncher<T>(param, topNumData, outputIndexData, outputClassData, indexDB.Current(), scoresDB.Current(), topClassData, topAnchorsData, boxesInput, anchorsInput, (int*) numDetectionsOutput, (T*) nmsScoresOutput, (int*) nmsClassesOutput, (int*) nmsIndicesOutput, nmsBoxesOutput, stream); CSC(status, STATUS_FAILURE); return STATUS_SUCCESS; } pluginStatus_t EfficientNMSInference(EfficientNMSParameters param, const void* boxesInput, const void* scoresInput, const void* anchorsInput, void* numDetectionsOutput, void* nmsBoxesOutput, void* nmsScoresOutput, void* nmsClassesOutput, void* nmsIndicesOutput, void* workspace, hipStream_t stream) { if (param.datatype == DataType::kFLOAT) { param.scoreBits = -1; return EfficientNMSDispatch<float>(param, boxesInput, scoresInput, anchorsInput, numDetectionsOutput, nmsBoxesOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, workspace, stream); } else if (param.datatype == DataType::kHALF) { if (param.scoreBits <= 0 || param.scoreBits > 10) { param.scoreBits = -1; } return EfficientNMSDispatch<__half>(param, boxesInput, scoresInput, anchorsInput, numDetectionsOutput, nmsBoxesOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, workspace, stream); } else { return STATUS_NOT_SUPPORTED; } }
4bcf1e9ee4e3eeb1b951ee7f267e8e7c17f7ee0e.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/bboxUtils.h" #include "cub/cub.cuh" #include "cuda_runtime_api.h" #include "efficientNMSInference.cuh" #include "efficientNMSInference.h" #define NMS_TILES 5 using namespace nvinfer1; using namespace nvinfer1::plugin; template <typename T> __device__ float IOU(EfficientNMSParameters param, BoxCorner<T> box1, BoxCorner<T> box2) { // Regardless of the selected box coding, IOU is always performed in BoxCorner coding. // The boxes are copied so that they can be reordered without affecting the originals. BoxCorner<T> b1 = box1; BoxCorner<T> b2 = box2; b1.reorder(); b2.reorder(); float intersectArea = BoxCorner<T>::intersect(b1, b2).area(); if (intersectArea <= 0.f) { return 0.f; } float unionArea = b1.area() + b2.area() - intersectArea; if (unionArea <= 0.f) { return 0.f; } return intersectArea / unionArea; } template <typename T, typename Tb> __device__ BoxCorner<T> DecodeBoxes(EfficientNMSParameters param, int boxIdx, int anchorIdx, const Tb* __restrict__ boxesInput, const Tb* __restrict__ anchorsInput) { // The inputs will be in the selected coding format, as well as the decoding function. But the decoded box // will always be returned as BoxCorner. Tb box = boxesInput[boxIdx]; if (!param.boxDecoder) { return BoxCorner<T>(box); } Tb anchor = anchorsInput[anchorIdx]; box.reorder(); anchor.reorder(); return BoxCorner<T>(box.decode(anchor)); } template <typename T, typename Tb> __device__ void MapNMSData(EfficientNMSParameters param, int idx, int imageIdx, const Tb* __restrict__ boxesInput, const Tb* __restrict__ anchorsInput, const int* __restrict__ topClassData, const int* __restrict__ topAnchorsData, const int* __restrict__ topNumData, const T* __restrict__ sortedScoresData, const int* __restrict__ sortedIndexData, T& scoreMap, int& classMap, BoxCorner<T>& boxMap, int& boxIdxMap) { // idx: Holds the NMS box index, within the current batch. // idxSort: Holds the batched NMS box index, which indexes the (filtered, but sorted) score buffer. // scoreMap: Holds the score that corresponds to the indexed box being processed by NMS. if (idx >= topNumData[imageIdx]) { return; } int idxSort = imageIdx * param.numScoreElements + idx; scoreMap = sortedScoresData[idxSort]; // idxMap: Holds the re-mapped index, which indexes the (filtered, but unsorted) buffers. // classMap: Holds the class that corresponds to the idx'th sorted score being processed by NMS. // anchorMap: Holds the anchor that corresponds to the idx'th sorted score being processed by NMS. int idxMap = imageIdx * param.numScoreElements + sortedIndexData[idxSort]; classMap = topClassData[idxMap]; int anchorMap = topAnchorsData[idxMap]; // boxIdxMap: Holds the re-re-mapped index, which indexes the (unfiltered, and unsorted) boxes input buffer. boxIdxMap = -1; if (param.shareLocation) // Shape of boxesInput: [batchSize, numAnchors, 1, 4] { boxIdxMap = imageIdx * param.numAnchors + anchorMap; } else // Shape of boxesInput: [batchSize, numAnchors, numClasses, 4] { int batchOffset = imageIdx * param.numAnchors * param.numClasses; int anchorOffset = anchorMap * param.numClasses; boxIdxMap = batchOffset + anchorOffset + classMap; } // anchorIdxMap: Holds the re-re-mapped index, which indexes the (unfiltered, and unsorted) anchors input buffer. int anchorIdxMap = -1; if (param.shareAnchors) // Shape of anchorsInput: [1, numAnchors, 4] { anchorIdxMap = anchorMap; } else // Shape of anchorsInput: [batchSize, numAnchors, 4] { anchorIdxMap = imageIdx * param.numAnchors + anchorMap; } // boxMap: Holds the box that corresponds to the idx'th sorted score being processed by NMS. boxMap = DecodeBoxes<T, Tb>(param, boxIdxMap, anchorIdxMap, boxesInput, anchorsInput); } template <typename T> __device__ void WriteNMSResult(EfficientNMSParameters param, int* __restrict__ numDetectionsOutput, T* __restrict__ nmsScoresOutput, int* __restrict__ nmsClassesOutput, BoxCorner<T>* __restrict__ nmsBoxesOutput, T threadScore, int threadClass, BoxCorner<T> threadBox, int imageIdx, unsigned int resultsCounter) { int outputIdx = imageIdx * param.numOutputBoxes + resultsCounter - 1; if (param.scoreSigmoid) { nmsScoresOutput[outputIdx] = sigmoid_mp(threadScore); } else if (param.scoreBits > 0) { nmsScoresOutput[outputIdx] = add_mp(threadScore, (T) -1); } else { nmsScoresOutput[outputIdx] = threadScore; } nmsClassesOutput[outputIdx] = threadClass; if (param.clipBoxes) { nmsBoxesOutput[outputIdx] = threadBox.clip((T) 0, (T) 1); } else { nmsBoxesOutput[outputIdx] = threadBox; } numDetectionsOutput[imageIdx] = resultsCounter; } __device__ void WriteONNXResult(EfficientNMSParameters param, int* outputIndexData, int* __restrict__ nmsIndicesOutput, int imageIdx, int threadClass, int boxIdxMap) { int index = boxIdxMap % param.numAnchors; int idx = atomicAdd((unsigned int*) &outputIndexData[0], 1); nmsIndicesOutput[idx * 3 + 0] = imageIdx; nmsIndicesOutput[idx * 3 + 1] = threadClass; nmsIndicesOutput[idx * 3 + 2] = index; } __global__ void PadONNXResult(EfficientNMSParameters param, int* outputIndexData, int* __restrict__ nmsIndicesOutput) { if (threadIdx.x > 0) { return; } int pidx = outputIndexData[0] - 1; if (pidx < 0) { return; } for (int idx = pidx + 1; idx < param.batchSize * param.numOutputBoxes; idx++) { nmsIndicesOutput[idx * 3 + 0] = nmsIndicesOutput[pidx * 3 + 0]; nmsIndicesOutput[idx * 3 + 1] = nmsIndicesOutput[pidx * 3 + 1]; nmsIndicesOutput[idx * 3 + 2] = nmsIndicesOutput[pidx * 3 + 2]; } } template <typename T, typename Tb> __global__ void EfficientNMS(EfficientNMSParameters param, const int* topNumData, int* outputIndexData, int* outputClassData, const int* sortedIndexData, const T* __restrict__ sortedScoresData, const int* __restrict__ topClassData, const int* __restrict__ topAnchorsData, const Tb* __restrict__ boxesInput, const Tb* __restrict__ anchorsInput, int* __restrict__ numDetectionsOutput, T* __restrict__ nmsScoresOutput, int* __restrict__ nmsClassesOutput, int* __restrict__ nmsIndicesOutput, BoxCorner<T>* __restrict__ nmsBoxesOutput) { unsigned int thread = threadIdx.x; unsigned int imageIdx = blockIdx.y; unsigned int tileSize = blockDim.x; if (imageIdx >= param.batchSize) { return; } int numSelectedBoxes = min(topNumData[imageIdx], param.numSelectedBoxes); int numTiles = (numSelectedBoxes + tileSize - 1) / tileSize; if (thread >= numSelectedBoxes) { return; } __shared__ int blockState; __shared__ unsigned int resultsCounter; if (thread == 0) { blockState = 0; resultsCounter = 0; } int threadState[NMS_TILES]; unsigned int boxIdx[NMS_TILES]; T threadScore[NMS_TILES]; int threadClass[NMS_TILES]; BoxCorner<T> threadBox[NMS_TILES]; int boxIdxMap[NMS_TILES]; for (int tile = 0; tile < numTiles; tile++) { threadState[tile] = 0; boxIdx[tile] = thread + tile * blockDim.x; MapNMSData<T, Tb>(param, boxIdx[tile], imageIdx, boxesInput, anchorsInput, topClassData, topAnchorsData, topNumData, sortedScoresData, sortedIndexData, threadScore[tile], threadClass[tile], threadBox[tile], boxIdxMap[tile]); } // Iterate through all boxes to NMS against. for (int i = 0; i < numSelectedBoxes; i++) { int tile = i / tileSize; if (boxIdx[tile] == i) { // Iteration lead thread, figure out what the other threads should do, // this will be signaled via the blockState shared variable. if (threadState[tile] == -1) { // Thread already dead, this box was already dropped in a previous iteration, // because it had a large IOU overlap with another lead thread previously, so // it would never be kept anyway, therefore it can safely be skip all IOU operations // in this iteration. blockState = -1; // -1 => Signal all threads to skip iteration } else if (threadState[tile] == 0) { // As this box will be kept, this is a good place to find what index in the results buffer it // should have, as this allows to perform an early loop exit if there are enough results. if (resultsCounter >= param.numOutputBoxes) { blockState = -2; // -2 => Signal all threads to do an early loop exit. } else { // Thread is still alive, because it has not had a large enough IOU overlap with // any other kept box previously. Therefore, this box will be kept for sure. However, // we need to check against all other subsequent boxes from this position onward, // to see how those other boxes will behave in future iterations. blockState = 1; // +1 => Signal all (higher index) threads to calculate IOU against this box threadState[tile] = 1; // +1 => Mark this box's thread to be kept and written out to results // If the numOutputBoxesPerClass check is enabled, write the result only if the limit for this // class on this image has not been reached yet. Other than (possibly) skipping the write, this // won't affect anything else in the NMS threading. bool write = true; if (param.numOutputBoxesPerClass >= 0) { int classCounterIdx = imageIdx * param.numClasses + threadClass[tile]; write = (outputClassData[classCounterIdx] < param.numOutputBoxesPerClass); outputClassData[classCounterIdx]++; } if (write) { // This branch is visited by one thread per iteration, so it's safe to do non-atomic increments. resultsCounter++; if (param.outputONNXIndices) { WriteONNXResult( param, outputIndexData, nmsIndicesOutput, imageIdx, threadClass[tile], boxIdxMap[tile]); } else { WriteNMSResult<T>(param, numDetectionsOutput, nmsScoresOutput, nmsClassesOutput, nmsBoxesOutput, threadScore[tile], threadClass[tile], threadBox[tile], imageIdx, resultsCounter); } } } } else { // This state should never be reached, but just in case... blockState = 0; // 0 => Signal all threads to not do any updates, nothing happens. } } __syncthreads(); if (blockState == -2) { // This is the signal to exit from the loop. return; } if (blockState == -1) { // This is the signal for all threads to just skip this iteration, as no IOU's need to be checked. continue; } // Grab a box and class to test the current box against. The test box corresponds to iteration i, // therefore it will have a lower index than the current thread box, and will therefore have a higher score // than the current box because it's located "before" in the sorted score list. T testScore; int testClass; BoxCorner<T> testBox; int testBoxIdxMap; MapNMSData<T, Tb>(param, i, imageIdx, boxesInput, anchorsInput, topClassData, topAnchorsData, topNumData, sortedScoresData, sortedIndexData, testScore, testClass, testBox, testBoxIdxMap); for (int tile = 0; tile < numTiles; tile++) { bool ignoreClass = true; if (!param.classAgnostic) { ignoreClass = threadClass[tile] == testClass; } // IOU if (boxIdx[tile] > i && // Make sure two different boxes are being tested, and that it's a higher index; boxIdx[tile] < numSelectedBoxes && // Make sure the box is within numSelectedBoxes; blockState == 1 && // Signal that allows IOU checks to be performed; threadState[tile] == 0 && // Make sure this box hasn't been either dropped or kept already; ignoreClass && // Compare only boxes of matching classes when classAgnostic is false; lte_mp(threadScore[tile], testScore) && // Make sure the sorting order of scores is as expected; IOU<T>(param, threadBox[tile], testBox) >= param.iouThreshold) // And... IOU overlap. { // Current box overlaps with the box tested in this iteration, this box will be skipped. threadState[tile] = -1; // -1 => Mark this box's thread to be dropped. } } } } template <typename T> cudaError_t EfficientNMSLauncher(EfficientNMSParameters& param, int* topNumData, int* outputIndexData, int* outputClassData, int* sortedIndexData, T* sortedScoresData, int* topClassData, int* topAnchorsData, const void* boxesInput, const void* anchorsInput, int* numDetectionsOutput, T* nmsScoresOutput, int* nmsClassesOutput, int* nmsIndicesOutput, void* nmsBoxesOutput, cudaStream_t stream) { unsigned int tileSize = param.numSelectedBoxes / NMS_TILES; if (param.numSelectedBoxes <= 512) { tileSize = 512; } if (param.numSelectedBoxes <= 256) { tileSize = 256; } const dim3 blockSize = {tileSize, 1, 1}; const dim3 gridSize = {1, (unsigned int) param.batchSize, 1}; if (param.boxCoding == 0) { EfficientNMS<T, BoxCorner<T>><<<gridSize, blockSize, 0, stream>>>(param, topNumData, outputIndexData, outputClassData, sortedIndexData, sortedScoresData, topClassData, topAnchorsData, (BoxCorner<T>*) boxesInput, (BoxCorner<T>*) anchorsInput, numDetectionsOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, (BoxCorner<T>*) nmsBoxesOutput); } else if (param.boxCoding == 1) { // Note that nmsBoxesOutput is always coded as BoxCorner<T>, regardless of the input coding type. EfficientNMS<T, BoxCenterSize<T>><<<gridSize, blockSize, 0, stream>>>(param, topNumData, outputIndexData, outputClassData, sortedIndexData, sortedScoresData, topClassData, topAnchorsData, (BoxCenterSize<T>*) boxesInput, (BoxCenterSize<T>*) anchorsInput, numDetectionsOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, (BoxCorner<T>*) nmsBoxesOutput); } if (param.outputONNXIndices) { PadONNXResult<<<1, 1, 0, stream>>>(param, outputIndexData, nmsIndicesOutput); } return cudaGetLastError(); } __global__ void EfficientNMSFilterSegments(EfficientNMSParameters param, const int* __restrict__ topNumData, int* __restrict__ topOffsetsStartData, int* __restrict__ topOffsetsEndData) { int imageIdx = threadIdx.x; if (imageIdx > param.batchSize) { return; } topOffsetsStartData[imageIdx] = imageIdx * param.numScoreElements; topOffsetsEndData[imageIdx] = imageIdx * param.numScoreElements + topNumData[imageIdx]; } template <typename T> __global__ void EfficientNMSFilter(EfficientNMSParameters param, const T* __restrict__ scoresInput, int* __restrict__ topNumData, int* __restrict__ topIndexData, int* __restrict__ topAnchorsData, T* __restrict__ topScoresData, int* __restrict__ topClassData) { int elementIdx = blockDim.x * blockIdx.x + threadIdx.x; int imageIdx = blockDim.y * blockIdx.y + threadIdx.y; // Boundary Conditions if (elementIdx >= param.numScoreElements || imageIdx >= param.batchSize) { return; } // Shape of scoresInput: [batchSize, numAnchors, numClasses] int scoresInputIdx = imageIdx * param.numScoreElements + elementIdx; // For each class, check its corresponding score if it crosses the threshold, and if so select this anchor, // and keep track of the maximum score and the corresponding (argmax) class id T score = scoresInput[scoresInputIdx]; if (gte_mp(score, (T) param.scoreThreshold)) { // Unpack the class and anchor index from the element index int classIdx = elementIdx % param.numClasses; int anchorIdx = elementIdx / param.numClasses; // If this is a background class, ignore it. if (classIdx == param.backgroundClass) { return; } // Use an atomic to find an open slot where to write the selected anchor data. if (topNumData[imageIdx] >= param.numScoreElements) { return; } int selectedIdx = atomicAdd((unsigned int*) &topNumData[imageIdx], 1); if (selectedIdx >= param.numScoreElements) { topNumData[imageIdx] = param.numScoreElements; return; } // Shape of topScoresData / topClassData: [batchSize, numScoreElements] int topIdx = imageIdx * param.numScoreElements + selectedIdx; if (param.scoreBits > 0) { score = add_mp(score, (T) 1); if (gt_mp(score, (T) (2.f - 1.f / 1024.f))) { // Ensure the incremented score fits in the mantissa without changing the exponent score = (2.f - 1.f / 1024.f); } } topIndexData[topIdx] = selectedIdx; topAnchorsData[topIdx] = anchorIdx; topScoresData[topIdx] = score; topClassData[topIdx] = classIdx; } } template <typename T> __global__ void EfficientNMSDenseIndex(EfficientNMSParameters param, int* __restrict__ topNumData, int* __restrict__ topIndexData, int* __restrict__ topAnchorsData, int* __restrict__ topOffsetsStartData, int* __restrict__ topOffsetsEndData, T* __restrict__ topScoresData, int* __restrict__ topClassData) { int elementIdx = blockDim.x * blockIdx.x + threadIdx.x; int imageIdx = blockDim.y * blockIdx.y + threadIdx.y; if (elementIdx >= param.numScoreElements || imageIdx >= param.batchSize) { return; } int dataIdx = imageIdx * param.numScoreElements + elementIdx; int anchorIdx = elementIdx / param.numClasses; int classIdx = elementIdx % param.numClasses; if (param.scoreBits > 0) { T score = topScoresData[dataIdx]; if (lt_mp(score, (T) param.scoreThreshold)) { score = (T) 1; } else if (classIdx == param.backgroundClass) { score = (T) 1; } else { score = add_mp(score, (T) 1); if (gt_mp(score, (T) (2.f - 1.f / 1024.f))) { // Ensure the incremented score fits in the mantissa without changing the exponent score = (2.f - 1.f / 1024.f); } } topScoresData[dataIdx] = score; } else { T score = topScoresData[dataIdx]; if (lt_mp(score, (T) param.scoreThreshold)) { topScoresData[dataIdx] = -(1 << 15); } else if (classIdx == param.backgroundClass) { topScoresData[dataIdx] = -(1 << 15); } } topIndexData[dataIdx] = elementIdx; topAnchorsData[dataIdx] = anchorIdx; topClassData[dataIdx] = classIdx; if (elementIdx == 0) { // Saturate counters topNumData[imageIdx] = param.numScoreElements; topOffsetsStartData[imageIdx] = imageIdx * param.numScoreElements; topOffsetsEndData[imageIdx] = (imageIdx + 1) * param.numScoreElements; } } template <typename T> cudaError_t EfficientNMSFilterLauncher(EfficientNMSParameters& param, const T* scoresInput, int* topNumData, int* topIndexData, int* topAnchorsData, int* topOffsetsStartData, int* topOffsetsEndData, T* topScoresData, int* topClassData, cudaStream_t stream) { const unsigned int elementsPerBlock = 512; const unsigned int imagesPerBlock = 1; const unsigned int elementBlocks = (param.numScoreElements + elementsPerBlock - 1) / elementsPerBlock; const unsigned int imageBlocks = (param.batchSize + imagesPerBlock - 1) / imagesPerBlock; const dim3 blockSize = {elementsPerBlock, imagesPerBlock, 1}; const dim3 gridSize = {elementBlocks, imageBlocks, 1}; float kernelSelectThreshold = 0.007f; if (param.scoreSigmoid) { // Inverse Sigmoid if (param.scoreThreshold <= 0.f) { param.scoreThreshold = -(1 << 15); } else { param.scoreThreshold = logf(param.scoreThreshold / (1.f - param.scoreThreshold)); } kernelSelectThreshold = logf(kernelSelectThreshold / (1.f - kernelSelectThreshold)); // Disable Score Bits Optimization param.scoreBits = -1; } if (param.scoreThreshold < kernelSelectThreshold) { // A full copy of the buffer is necessary because sorting will scramble the input data otherwise. PLUGIN_CHECK_CUDA(cudaMemcpyAsync(topScoresData, scoresInput, param.batchSize * param.numScoreElements * sizeof(T), cudaMemcpyDeviceToDevice, stream)); EfficientNMSDenseIndex<T><<<gridSize, blockSize, 0, stream>>>(param, topNumData, topIndexData, topAnchorsData, topOffsetsStartData, topOffsetsEndData, topScoresData, topClassData); } else { EfficientNMSFilter<T><<<gridSize, blockSize, 0, stream>>>( param, scoresInput, topNumData, topIndexData, topAnchorsData, topScoresData, topClassData); EfficientNMSFilterSegments<<<1, param.batchSize, 0, stream>>>( param, topNumData, topOffsetsStartData, topOffsetsEndData); } return cudaGetLastError(); } template <typename T> size_t EfficientNMSSortWorkspaceSize(int batchSize, int numScoreElements) { size_t sortedWorkspaceSize = 0; cub::DoubleBuffer<T> keysDB(nullptr, nullptr); cub::DoubleBuffer<int> valuesDB(nullptr, nullptr); cub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, sortedWorkspaceSize, keysDB, valuesDB, numScoreElements, batchSize, (const int*) nullptr, (const int*) nullptr); return sortedWorkspaceSize; } size_t EfficientNMSWorkspaceSize(int batchSize, int numScoreElements, int numClasses, DataType datatype) { size_t total = 0; const size_t align = 256; // Counters // 3 for Filtering // 1 for Output Indexing // C for Max per Class Limiting size_t size = (3 + 1 + numClasses) * batchSize * sizeof(int); total += size + (size % align ? align - (size % align) : 0); // Int Buffers for (int i = 0; i < 4; i++) { size = batchSize * numScoreElements * sizeof(int); total += size + (size % align ? align - (size % align) : 0); } // Float Buffers for (int i = 0; i < 2; i++) { size = batchSize * numScoreElements * dataTypeSize(datatype); total += size + (size % align ? align - (size % align) : 0); } // Sort Workspace if (datatype == DataType::kHALF) { size = EfficientNMSSortWorkspaceSize<__half>(batchSize, numScoreElements); total += size + (size % align ? align - (size % align) : 0); } else if (datatype == DataType::kFLOAT) { size = EfficientNMSSortWorkspaceSize<float>(batchSize, numScoreElements); total += size + (size % align ? align - (size % align) : 0); } return total; } template <typename T> T* EfficientNMSWorkspace(void* workspace, size_t& offset, size_t elements) { T* buffer = (T*) ((size_t) workspace + offset); size_t align = 256; size_t size = elements * sizeof(T); size_t sizeAligned = size + (size % align ? align - (size % align) : 0); offset += sizeAligned; return buffer; } template <typename T> pluginStatus_t EfficientNMSDispatch(EfficientNMSParameters param, const void* boxesInput, const void* scoresInput, const void* anchorsInput, void* numDetectionsOutput, void* nmsBoxesOutput, void* nmsScoresOutput, void* nmsClassesOutput, void* nmsIndicesOutput, void* workspace, cudaStream_t stream) { // Clear Outputs (not all elements will get overwritten by the kernels, so safer to clear everything out) if (param.outputONNXIndices) { CSC(cudaMemsetAsync(nmsIndicesOutput, 0xFF, param.batchSize * param.numOutputBoxes * 3 * sizeof(int), stream), STATUS_FAILURE); } else { CSC(cudaMemsetAsync(numDetectionsOutput, 0x00, param.batchSize * sizeof(int), stream), STATUS_FAILURE); CSC(cudaMemsetAsync(nmsScoresOutput, 0x00, param.batchSize * param.numOutputBoxes * sizeof(T), stream), STATUS_FAILURE); CSC(cudaMemsetAsync(nmsBoxesOutput, 0x00, param.batchSize * param.numOutputBoxes * 4 * sizeof(T), stream), STATUS_FAILURE); CSC(cudaMemsetAsync(nmsClassesOutput, 0x00, param.batchSize * param.numOutputBoxes * sizeof(int), stream), STATUS_FAILURE); } // Empty Inputs if (param.numScoreElements < 1) { return STATUS_SUCCESS; } // Counters Workspace size_t workspaceOffset = 0; int countersTotalSize = (3 + 1 + param.numClasses) * param.batchSize; int* topNumData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, countersTotalSize); int* topOffsetsStartData = topNumData + param.batchSize; int* topOffsetsEndData = topNumData + 2 * param.batchSize; int* outputIndexData = topNumData + 3 * param.batchSize; int* outputClassData = topNumData + 4 * param.batchSize; CSC(cudaMemsetAsync(topNumData, 0x00, countersTotalSize * sizeof(int), stream), STATUS_FAILURE); cudaError_t status = cudaGetLastError(); CSC(status, STATUS_FAILURE); // Other Buffers Workspace int* topIndexData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); int* topClassData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); int* topAnchorsData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); int* sortedIndexData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); T* topScoresData = EfficientNMSWorkspace<T>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); T* sortedScoresData = EfficientNMSWorkspace<T>(workspace, workspaceOffset, param.batchSize * param.numScoreElements); size_t sortedWorkspaceSize = EfficientNMSSortWorkspaceSize<T>(param.batchSize, param.numScoreElements); char* sortedWorkspaceData = EfficientNMSWorkspace<char>(workspace, workspaceOffset, sortedWorkspaceSize); cub::DoubleBuffer<T> scoresDB(topScoresData, sortedScoresData); cub::DoubleBuffer<int> indexDB(topIndexData, sortedIndexData); // Kernels status = EfficientNMSFilterLauncher<T>(param, (T*) scoresInput, topNumData, topIndexData, topAnchorsData, topOffsetsStartData, topOffsetsEndData, topScoresData, topClassData, stream); CSC(status, STATUS_FAILURE); status = cub::DeviceSegmentedRadixSort::SortPairsDescending(sortedWorkspaceData, sortedWorkspaceSize, scoresDB, indexDB, param.batchSize * param.numScoreElements, param.batchSize, topOffsetsStartData, topOffsetsEndData, param.scoreBits > 0 ? (10 - param.scoreBits) : 0, param.scoreBits > 0 ? 10 : sizeof(T) * 8, stream); CSC(status, STATUS_FAILURE); status = EfficientNMSLauncher<T>(param, topNumData, outputIndexData, outputClassData, indexDB.Current(), scoresDB.Current(), topClassData, topAnchorsData, boxesInput, anchorsInput, (int*) numDetectionsOutput, (T*) nmsScoresOutput, (int*) nmsClassesOutput, (int*) nmsIndicesOutput, nmsBoxesOutput, stream); CSC(status, STATUS_FAILURE); return STATUS_SUCCESS; } pluginStatus_t EfficientNMSInference(EfficientNMSParameters param, const void* boxesInput, const void* scoresInput, const void* anchorsInput, void* numDetectionsOutput, void* nmsBoxesOutput, void* nmsScoresOutput, void* nmsClassesOutput, void* nmsIndicesOutput, void* workspace, cudaStream_t stream) { if (param.datatype == DataType::kFLOAT) { param.scoreBits = -1; return EfficientNMSDispatch<float>(param, boxesInput, scoresInput, anchorsInput, numDetectionsOutput, nmsBoxesOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, workspace, stream); } else if (param.datatype == DataType::kHALF) { if (param.scoreBits <= 0 || param.scoreBits > 10) { param.scoreBits = -1; } return EfficientNMSDispatch<__half>(param, boxesInput, scoresInput, anchorsInput, numDetectionsOutput, nmsBoxesOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, workspace, stream); } else { return STATUS_NOT_SUPPORTED; } }
eeee8ce3238e09eb841e9ae88f474cfda9d9b093.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //works for real to real and complex interleaved to complex interleaved template<typename T, int micro_tile_col_size, int micro_tile_row_size, int wg_col_size, int wg_row_size> __global__ void transpose_kernel_outplace(hipLaunchParm lp, T *input_matrix, T *output_matrix, size_t input_row_size, size_t input_col_size, size_t input_leading_dim_size, size_t output_leading_dim_size, size_t batch_size) { // WG size can be assumed to be 16 by 16 size_t local_idx_0 = hipThreadIdx_x;// 0-15 size_t local_idx_1 = hipThreadIdx_y;// 0-15 size_t block_idx_0 = hipBlockIdx_x;// index of work groups size_t block_idx_1 = hipBlockIdx_y; size_t block_dim_0 = hipBlockDim_x;// size of work groups 16 size_t block_dim_1 = hipBlockDim_y;// size of work groups 16 size_t grid_dim_0 = hipGridDim_x;// number of blocks // for 64 x 64 macro tile size we will need 16 x 4 x 64 blocks (4 x 64 == 16 x 16) // for 32 x 32 macro tile size we will need 4 x 8 x 32 blocks const size_t macro_tile_col_size = micro_tile_col_size * wg_col_size; const size_t macro_tile_row_size = micro_tile_row_size * wg_row_size; const size_t reshape_factor = macro_tile_col_size / block_dim_0; // 64 / 16 = 4 need to fit 4 rows into one row in LDS; 32 / 16 = 2 const size_t unroll_factor = macro_tile_row_size / (block_dim_1 / reshape_factor); // 64 / (16 / 4) = 16; 32 / (16 / 2) = 4 __shared__ T lds[macro_tile_row_size][macro_tile_col_size]; size_t batch_idx = 0; size_t blocks_per_batch = grid_dim_0 / batch_size; batch_idx += (block_idx_0) / blocks_per_batch; input_matrix += batch_idx * input_leading_dim_size * input_row_size; size_t input_offset = 0; input_offset += input_leading_dim_size * block_idx_1 * macro_tile_row_size;// each WG works on 64 by 64 block or 32 by 32 input_offset += (block_idx_0 % blocks_per_batch) * macro_tile_col_size; input_matrix += input_offset; for(int i = 0; i < unroll_factor; i++) { //each iteration 256 work items will read from a 4 x 64 subblock //there are 16 iterations size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0; // local_idx_0 + (local_idx_1 % 4) * 16 size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor); //transpose happened here lds[subblock_idx_0][subblock_idx_1] = input_matrix[subblock_idx_1 * input_leading_dim_size + subblock_idx_0]; } __syncthreads(); output_matrix += batch_idx * input_col_size * output_leading_dim_size; size_t output_offset = 0; output_offset += output_leading_dim_size * (block_idx_0 % blocks_per_batch) * macro_tile_row_size;//input_row_size == ouput_col_size output_offset += block_idx_1 * macro_tile_col_size; output_matrix += output_offset; for(int i = 0; i < unroll_factor; i++) { size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0;// 0-63 size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor);// 0-3, 4-7 ... 60-63 T temp = lds[subblock_idx_1][subblock_idx_0]; output_matrix[subblock_idx_1 * output_leading_dim_size + subblock_idx_0] = temp;//lds[subblock_idx_1][subblock_idx_0]; } }
eeee8ce3238e09eb841e9ae88f474cfda9d9b093.cu
//works for real to real and complex interleaved to complex interleaved template<typename T, int micro_tile_col_size, int micro_tile_row_size, int wg_col_size, int wg_row_size> __global__ void transpose_kernel_outplace(hipLaunchParm lp, T *input_matrix, T *output_matrix, size_t input_row_size, size_t input_col_size, size_t input_leading_dim_size, size_t output_leading_dim_size, size_t batch_size) { // WG size can be assumed to be 16 by 16 size_t local_idx_0 = hipThreadIdx_x;// 0-15 size_t local_idx_1 = hipThreadIdx_y;// 0-15 size_t block_idx_0 = hipBlockIdx_x;// index of work groups size_t block_idx_1 = hipBlockIdx_y; size_t block_dim_0 = hipBlockDim_x;// size of work groups 16 size_t block_dim_1 = hipBlockDim_y;// size of work groups 16 size_t grid_dim_0 = hipGridDim_x;// number of blocks // for 64 x 64 macro tile size we will need 16 x 4 x 64 blocks (4 x 64 == 16 x 16) // for 32 x 32 macro tile size we will need 4 x 8 x 32 blocks const size_t macro_tile_col_size = micro_tile_col_size * wg_col_size; const size_t macro_tile_row_size = micro_tile_row_size * wg_row_size; const size_t reshape_factor = macro_tile_col_size / block_dim_0; // 64 / 16 = 4 need to fit 4 rows into one row in LDS; 32 / 16 = 2 const size_t unroll_factor = macro_tile_row_size / (block_dim_1 / reshape_factor); // 64 / (16 / 4) = 16; 32 / (16 / 2) = 4 __shared__ T lds[macro_tile_row_size][macro_tile_col_size]; size_t batch_idx = 0; size_t blocks_per_batch = grid_dim_0 / batch_size; batch_idx += (block_idx_0) / blocks_per_batch; input_matrix += batch_idx * input_leading_dim_size * input_row_size; size_t input_offset = 0; input_offset += input_leading_dim_size * block_idx_1 * macro_tile_row_size;// each WG works on 64 by 64 block or 32 by 32 input_offset += (block_idx_0 % blocks_per_batch) * macro_tile_col_size; input_matrix += input_offset; for(int i = 0; i < unroll_factor; i++) { //each iteration 256 work items will read from a 4 x 64 subblock //there are 16 iterations size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0; // local_idx_0 + (local_idx_1 % 4) * 16 size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor); //transpose happened here lds[subblock_idx_0][subblock_idx_1] = input_matrix[subblock_idx_1 * input_leading_dim_size + subblock_idx_0]; } __syncthreads(); output_matrix += batch_idx * input_col_size * output_leading_dim_size; size_t output_offset = 0; output_offset += output_leading_dim_size * (block_idx_0 % blocks_per_batch) * macro_tile_row_size;//input_row_size == ouput_col_size output_offset += block_idx_1 * macro_tile_col_size; output_matrix += output_offset; for(int i = 0; i < unroll_factor; i++) { size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0;// 0-63 size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor);// 0-3, 4-7 ... 60-63 T temp = lds[subblock_idx_1][subblock_idx_0]; output_matrix[subblock_idx_1 * output_leading_dim_size + subblock_idx_0] = temp;//lds[subblock_idx_1][subblock_idx_0]; } }
a294bfdb7efd6f808f2fb69eeb522d851b3d70ff.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "applyLinearFunction.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *size = NULL; hipMalloc(&size, XSIZE*YSIZE); const short *x = NULL; hipMalloc(&x, XSIZE*YSIZE); short *y = NULL; hipMalloc(&y, XSIZE*YSIZE); short *a = NULL; hipMalloc(&a, XSIZE*YSIZE); short *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( applyLinearFunction), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y,a,b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( applyLinearFunction), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y,a,b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( applyLinearFunction), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y,a,b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a294bfdb7efd6f808f2fb69eeb522d851b3d70ff.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "applyLinearFunction.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *size = NULL; cudaMalloc(&size, XSIZE*YSIZE); const short *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); short *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); short *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); short *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); applyLinearFunction<<<gridBlock,threadBlock>>>(size,x,y,a,b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { applyLinearFunction<<<gridBlock,threadBlock>>>(size,x,y,a,b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { applyLinearFunction<<<gridBlock,threadBlock>>>(size,x,y,a,b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1a9d27bac0da8752e83f31fff0fba84e7d49e21a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_init_kernel; int xdim0_init_kernel_h = -1; __constant__ int xdim1_init_kernel; int xdim1_init_kernel_h = -1; __constant__ int xdim2_init_kernel; int xdim2_init_kernel_h = -1; __constant__ int xdim3_init_kernel; int xdim3_init_kernel_h = -1; __constant__ int xdim4_init_kernel; int xdim4_init_kernel_h = -1; __constant__ int xdim5_init_kernel; int xdim5_init_kernel_h = -1; __constant__ int xdim6_init_kernel; int xdim6_init_kernel_h = -1; __constant__ int xdim7_init_kernel; int xdim7_init_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #define OPS_ACC0(x) (x) #define OPS_ACC1(x) (x) #define OPS_ACC2(x) (x) #define OPS_ACC3(x) (x) #define OPS_ACC4(x) (x) #define OPS_ACC5(x) (x) #define OPS_ACC6(x) (x) #define OPS_ACC7(x) (x) // user function __device__ void init_kernel_gpu(const double *x, double *rho_new, double *rhou_new, double *rhoE_new, double *rhoin, double *rho_old, double *rhou_old, double *rhoE_old) { if (x[OPS_ACC0(0)] >= -4.0) { rho_new[OPS_ACC1(0)] = 1.0 + eps * sin(lambda * x[OPS_ACC0(0)]); rhou_new[OPS_ACC2(0)] = ur * rho_new[OPS_ACC1(0)]; rhoE_new[OPS_ACC3(0)] = (pr / gam1) + 0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)]; } else { rho_new[OPS_ACC1(0)] = rhol; rhou_new[OPS_ACC2(0)] = ul * rho_new[OPS_ACC1(0)]; rhoE_new[OPS_ACC3(0)] = (pl / gam1) + 0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)]; } rho_old[OPS_ACC5(0)] = rho_new[OPS_ACC1(0)]; rhou_old[OPS_ACC6(0)] = rhou_new[OPS_ACC2(0)]; rhoE_old[OPS_ACC7(0)] = rhoE_new[OPS_ACC3(0)]; rhoin[OPS_ACC4(0)] = rho_new[OPS_ACC1(0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 __global__ void ops_init_kernel(const double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, double *__restrict arg6, double *__restrict arg7, int size0) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1; arg1 += idx_x * 1 * 1; arg2 += idx_x * 1 * 1; arg3 += idx_x * 1 * 1; arg4 += idx_x * 1 * 1; arg5 += idx_x * 1 * 1; arg6 += idx_x * 1 * 1; arg7 += idx_x * 1 * 1; if (idx_x < size0) { init_kernel_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_init_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_init_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 8, range, 1)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(1, "init_kernel"); OPS_kernels[1].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[1]; int end[1]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 1; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 1; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; int xdim6 = args[6].dat->size[0]; int xdim7 = args[7].dat->size[0]; if (xdim0 != xdim0_init_kernel_h || xdim1 != xdim1_init_kernel_h || xdim2 != xdim2_init_kernel_h || xdim3 != xdim3_init_kernel_h || xdim4 != xdim4_init_kernel_h || xdim5 != xdim5_init_kernel_h || xdim6 != xdim6_init_kernel_h || xdim7 != xdim7_init_kernel_h) { hipMemcpyToSymbol(xdim0_init_kernel, &xdim0, sizeof(int)); xdim0_init_kernel_h = xdim0; hipMemcpyToSymbol(xdim1_init_kernel, &xdim1, sizeof(int)); xdim1_init_kernel_h = xdim1; hipMemcpyToSymbol(xdim2_init_kernel, &xdim2, sizeof(int)); xdim2_init_kernel_h = xdim2; hipMemcpyToSymbol(xdim3_init_kernel, &xdim3, sizeof(int)); xdim3_init_kernel_h = xdim3; hipMemcpyToSymbol(xdim4_init_kernel, &xdim4, sizeof(int)); xdim4_init_kernel_h = xdim4; hipMemcpyToSymbol(xdim5_init_kernel, &xdim5, sizeof(int)); xdim5_init_kernel_h = xdim5; hipMemcpyToSymbol(xdim6_init_kernel, &xdim6, sizeof(int)); xdim6_init_kernel_h = xdim6; hipMemcpyToSymbol(xdim7_init_kernel, &xdim7, sizeof(int)); xdim7_init_kernel_h = xdim7; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1); dim3 tblock(OPS_block_size_x, 1, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size); char *p_a[8]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); p_a[6] = (char *)args[6].data_d + base6; int base7 = args[7].dat->base_offset + dat7 * 1 * (start[0] * args[7].stencil->stride[0]); p_a[7] = (char *)args[7].data_d + base7; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_init_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], x_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[1].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[7], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg7); } } #ifdef OPS_LAZY void ops_par_loop_init_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 1; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 1; for (int i = 0; i < 2; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index; desc->function = ops_par_loop_init_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(1, "init_kernel"); } ops_enqueue_kernel(desc); } #endif
1a9d27bac0da8752e83f31fff0fba84e7d49e21a.cu
// // auto-generated by ops.py // __constant__ int xdim0_init_kernel; int xdim0_init_kernel_h = -1; __constant__ int xdim1_init_kernel; int xdim1_init_kernel_h = -1; __constant__ int xdim2_init_kernel; int xdim2_init_kernel_h = -1; __constant__ int xdim3_init_kernel; int xdim3_init_kernel_h = -1; __constant__ int xdim4_init_kernel; int xdim4_init_kernel_h = -1; __constant__ int xdim5_init_kernel; int xdim5_init_kernel_h = -1; __constant__ int xdim6_init_kernel; int xdim6_init_kernel_h = -1; __constant__ int xdim7_init_kernel; int xdim7_init_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #define OPS_ACC0(x) (x) #define OPS_ACC1(x) (x) #define OPS_ACC2(x) (x) #define OPS_ACC3(x) (x) #define OPS_ACC4(x) (x) #define OPS_ACC5(x) (x) #define OPS_ACC6(x) (x) #define OPS_ACC7(x) (x) // user function __device__ void init_kernel_gpu(const double *x, double *rho_new, double *rhou_new, double *rhoE_new, double *rhoin, double *rho_old, double *rhou_old, double *rhoE_old) { if (x[OPS_ACC0(0)] >= -4.0) { rho_new[OPS_ACC1(0)] = 1.0 + eps * sin(lambda * x[OPS_ACC0(0)]); rhou_new[OPS_ACC2(0)] = ur * rho_new[OPS_ACC1(0)]; rhoE_new[OPS_ACC3(0)] = (pr / gam1) + 0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)]; } else { rho_new[OPS_ACC1(0)] = rhol; rhou_new[OPS_ACC2(0)] = ul * rho_new[OPS_ACC1(0)]; rhoE_new[OPS_ACC3(0)] = (pl / gam1) + 0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)]; } rho_old[OPS_ACC5(0)] = rho_new[OPS_ACC1(0)]; rhou_old[OPS_ACC6(0)] = rhou_new[OPS_ACC2(0)]; rhoE_old[OPS_ACC7(0)] = rhoE_new[OPS_ACC3(0)]; rhoin[OPS_ACC4(0)] = rho_new[OPS_ACC1(0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 __global__ void ops_init_kernel(const double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, double *__restrict arg6, double *__restrict arg7, int size0) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1; arg1 += idx_x * 1 * 1; arg2 += idx_x * 1 * 1; arg3 += idx_x * 1 * 1; arg4 += idx_x * 1 * 1; arg5 += idx_x * 1 * 1; arg6 += idx_x * 1 * 1; arg7 += idx_x * 1 * 1; if (idx_x < size0) { init_kernel_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_init_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_init_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 8, range, 1)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(1, "init_kernel"); OPS_kernels[1].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[1]; int end[1]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 1; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 1; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; int xdim6 = args[6].dat->size[0]; int xdim7 = args[7].dat->size[0]; if (xdim0 != xdim0_init_kernel_h || xdim1 != xdim1_init_kernel_h || xdim2 != xdim2_init_kernel_h || xdim3 != xdim3_init_kernel_h || xdim4 != xdim4_init_kernel_h || xdim5 != xdim5_init_kernel_h || xdim6 != xdim6_init_kernel_h || xdim7 != xdim7_init_kernel_h) { cudaMemcpyToSymbol(xdim0_init_kernel, &xdim0, sizeof(int)); xdim0_init_kernel_h = xdim0; cudaMemcpyToSymbol(xdim1_init_kernel, &xdim1, sizeof(int)); xdim1_init_kernel_h = xdim1; cudaMemcpyToSymbol(xdim2_init_kernel, &xdim2, sizeof(int)); xdim2_init_kernel_h = xdim2; cudaMemcpyToSymbol(xdim3_init_kernel, &xdim3, sizeof(int)); xdim3_init_kernel_h = xdim3; cudaMemcpyToSymbol(xdim4_init_kernel, &xdim4, sizeof(int)); xdim4_init_kernel_h = xdim4; cudaMemcpyToSymbol(xdim5_init_kernel, &xdim5, sizeof(int)); xdim5_init_kernel_h = xdim5; cudaMemcpyToSymbol(xdim6_init_kernel, &xdim6, sizeof(int)); xdim6_init_kernel_h = xdim6; cudaMemcpyToSymbol(xdim7_init_kernel, &xdim7, sizeof(int)); xdim7_init_kernel_h = xdim7; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1); dim3 tblock(OPS_block_size_x, 1, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size); char *p_a[8]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); p_a[6] = (char *)args[6].data_d + base6; int base7 = args[7].dat->base_offset + dat7 * 1 * (start[0] * args[7].stencil->stride[0]); p_a[7] = (char *)args[7].data_d + base7; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_init_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], x_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[1].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[7], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg7); } } #ifdef OPS_LAZY void ops_par_loop_init_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 1; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 1; for (int i = 0; i < 2; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index; desc->function = ops_par_loop_init_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(1, "init_kernel"); } ops_enqueue_kernel(desc); } #endif
c83aca69941f5ffa78970e38b8305159f93b6e33.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdexcept> #include "pixelShuffle3DPlugin.h" using namespace nvinfer1; using nvinfer1::plugin::pixelShuffle3DPlugin; using nvinfer1::plugin::pixelShuffle3DPluginCreator; #define CHECK_CUDA(call) \ do \ { \ hipError_t status = call; \ if (status != hipSuccess) \ { \ return status; \ } \ } while (0) #define CHECK_CUDNN(call) \ do \ { \ cudnnStatus_t status = call; \ if (status != CUDNN_STATUS_SUCCESS) \ { \ return status; \ } \ } while (0) // This is derived from: https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/ inline float half_to_float_fast(unsigned short value) { union F32 { unsigned int u; float f; }; static const F32 magic = {(254 - 15) << 23}; static const F32 was_infnan = {(127 + 16) << 23}; F32 result; result.u = (value & 0x7fff) << 13; // exponent/mantissa bits result.f *= magic.f; // exponent adjust if (result.f >= was_infnan.f) { // make sure Inf/NaN survive result.u |= 255 << 23; } result.u |= (value & 0x8000) << 16; // sign bit return result.f; } namespace { const char* PIXELSHUFFLE3D_PLUGIN_VERSION{"1"}; const char* PIXELSHUFFLE3D_PLUGIN_NAME{"PIXELSHUFFLE3D_TRT"}; } REGISTER_TENSORRT_PLUGIN(pixelShuffle3DPluginCreator); PluginFieldCollection pixelShuffle3DPluginCreator::mFC{}; std::vector<PluginField> pixelShuffle3DPluginCreator::mPluginAttributes; pixelShuffle3DPlugin::pixelShuffle3DPlugin( int r, int s, int t) : mR(r) , mS(s) , mT(t) , mInScale(-1.f) , mOutScale(-1.f) { } pixelShuffle3DPlugin::pixelShuffle3DPlugin(void const* serialData, size_t serialLength) { deserialize_value(&serialData, &serialLength, &mR); deserialize_value(&serialData, &serialLength, &mS); deserialize_value(&serialData, &serialLength, &mT); deserialize_value(&serialData, &serialLength, &mInScale); deserialize_value(&serialData, &serialLength, &mOutScale); } pixelShuffle3DPlugin::~pixelShuffle3DPlugin() { terminate(); } // pixelShuffle3DPlugin returns one output. int pixelShuffle3DPlugin::getNbOutputs() const { return 1; } DimsExprs pixelShuffle3DPlugin::getOutputDimensions( int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) { nvinfer1::DimsExprs output(inputs[0]); output.d[0] = inputs[0].d[0]; output.d[1] = exprBuilder.operation(DimensionOperation::kFLOOR_DIV, *inputs[0].d[1], *exprBuilder.constant(mR * mS * mT)); output.d[2] = exprBuilder.operation(DimensionOperation::kPROD, *inputs[0].d[2], *exprBuilder.constant(mR)); output.d[3] = exprBuilder.operation(DimensionOperation::kPROD, *inputs[0].d[3], *exprBuilder.constant(mS)); output.d[4] = exprBuilder.operation(DimensionOperation::kPROD, *inputs[0].d[4], *exprBuilder.constant(mT)); return output; } int pixelShuffle3DPlugin::initialize() { if (!initialized) { } initialized = true; return 0; } void pixelShuffle3DPlugin::terminate() { if (initialized) { } initialized = false; return; } size_t pixelShuffle3DPlugin::getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs, int nbInputs, const nvinfer1::PluginTensorDesc* outputs, int nbOutputs) const { return 0; } int pixelShuffle3DPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) { ASSERT(initialized); if (inputDesc[0].format == nvinfer1::PluginFormat::kLINEAR || inputDesc[0].format == nvinfer1::PluginFormat::kCDHW32) { nvinfer1::Dims input_dims = inputDesc[0].dims; int n = input_dims.d[0]; int c = input_dims.d[1]; int d = input_dims.d[2]; int h = input_dims.d[3]; int w = input_dims.d[4]; _params.o = d * mR; _params.p = h * mS; _params.q = w * mT; _params.k = c/mT/mR/mS; _params.n = n; _params.r = mR; _params.s = mS; _params.t = mT; _params.scale = mInScale / mOutScale; _params.gmem_src = const_cast<void *>(inputs[0]); _params.gmem_dst = outputs[0]; if (inputDesc[0].format == nvinfer1::PluginFormat::kCDHW32) { assert(mOutScale != 0.f); int res = pixel_shuffle_ncdhw32_to_ncdhw32_dispatch(_params, stream); } else { int res = pixel_shuffle_ncdhw_to_ncdhw_dispatch(_params, stream); } } else { ASSERT(false && "Unexpected input format"); } return 0; } size_t pixelShuffle3DPlugin::getSerializationSize() const { return (serialized_size(mR) + serialized_size(mS) + serialized_size(mT) + serialized_size(mInScale) + serialized_size(mOutScale)); } void pixelShuffle3DPlugin::serialize(void *buffer) const { serialize_value(&buffer, mR); serialize_value(&buffer, mS); serialize_value(&buffer, mT); serialize_value(&buffer, mInScale); serialize_value(&buffer, mOutScale); } bool pixelShuffle3DPlugin::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs) { ASSERT(inOut && pos < (nbInputs + nbOutputs)); bool support_fp32_linear = (inOut[pos].type == nvinfer1::DataType::kFLOAT && inOut[pos].format == nvinfer1::PluginFormat::kLINEAR && inOut[pos].type == inOut[0].type && inOut[pos].format == inOut[0].format); bool support_int8_cdhw32 = (inOut[pos].type == nvinfer1::DataType::kINT8 && inOut[pos].format == nvinfer1::PluginFormat::kCDHW32 && inOut[pos].type == inOut[0].type && inOut[pos].format == inOut[0].format); return support_fp32_linear || support_int8_cdhw32; } const char* pixelShuffle3DPlugin::getPluginType() const { return PIXELSHUFFLE3D_PLUGIN_NAME; } const char* pixelShuffle3DPlugin::getPluginVersion() const { return PIXELSHUFFLE3D_PLUGIN_VERSION; } void pixelShuffle3DPlugin::destroy() { delete this; } IPluginV2DynamicExt* pixelShuffle3DPlugin::clone() const { auto plugin = new pixelShuffle3DPlugin{mR, mS, mT}; plugin->setPluginNamespace(mPluginNamespace); plugin->initialize(); return plugin; } // Set plugin namespace void pixelShuffle3DPlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* pixelShuffle3DPlugin::getPluginNamespace() const { return mPluginNamespace; } nvinfer1::DataType pixelShuffle3DPlugin::getOutputDataType( int index, const nvinfer1::DataType* inputTypes, int nbInputs) const { ASSERT(inputTypes && nbInputs > 0 && index == 0); return nvinfer1::DataType::kFLOAT; } void pixelShuffle3DPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs, const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) { mInScale = in[0].desc.scale; mOutScale = out[0].desc.scale; } // pixelShuffle3DPluginCreator methods pixelShuffle3DPluginCreator::pixelShuffle3DPluginCreator() { mPluginAttributes.emplace_back(PluginField("R", nullptr, PluginFieldType::kINT32, 1)); mPluginAttributes.emplace_back(PluginField("S", nullptr, PluginFieldType::kINT32, 1)); mPluginAttributes.emplace_back(PluginField("T", nullptr, PluginFieldType::kINT32, 1)); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* pixelShuffle3DPluginCreator::getPluginName() const { return PIXELSHUFFLE3D_PLUGIN_NAME; } const char* pixelShuffle3DPluginCreator::getPluginVersion() const { return PIXELSHUFFLE3D_PLUGIN_VERSION; } const PluginFieldCollection* pixelShuffle3DPluginCreator::getFieldNames() { return &mFC; } IPluginV2DynamicExt* pixelShuffle3DPluginCreator::createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) { int r {}; int s {}; int t {}; const PluginField* fields = fc->fields; for (int i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "R")) { ASSERT(fields[i].type == PluginFieldType::kINT32); r = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "S")) { ASSERT(fields[i].type == PluginFieldType::kINT32); s = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "T")) { ASSERT(fields[i].type == PluginFieldType::kINT32); t = *(static_cast<const int*>(fields[i].data)); } } pixelShuffle3DPlugin* obj = new pixelShuffle3DPlugin(r, s, t); obj->setPluginNamespace(mNamespace.c_str()); obj->initialize(); return obj; } IPluginV2DynamicExt* pixelShuffle3DPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { pixelShuffle3DPlugin* obj = new pixelShuffle3DPlugin{serialData, serialLength}; obj->setPluginNamespace(mNamespace.c_str()); obj->initialize(); return obj; }
c83aca69941f5ffa78970e38b8305159f93b6e33.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdexcept> #include "pixelShuffle3DPlugin.h" using namespace nvinfer1; using nvinfer1::plugin::pixelShuffle3DPlugin; using nvinfer1::plugin::pixelShuffle3DPluginCreator; #define CHECK_CUDA(call) \ do \ { \ cudaError_t status = call; \ if (status != cudaSuccess) \ { \ return status; \ } \ } while (0) #define CHECK_CUDNN(call) \ do \ { \ cudnnStatus_t status = call; \ if (status != CUDNN_STATUS_SUCCESS) \ { \ return status; \ } \ } while (0) // This is derived from: https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/ inline float half_to_float_fast(unsigned short value) { union F32 { unsigned int u; float f; }; static const F32 magic = {(254 - 15) << 23}; static const F32 was_infnan = {(127 + 16) << 23}; F32 result; result.u = (value & 0x7fff) << 13; // exponent/mantissa bits result.f *= magic.f; // exponent adjust if (result.f >= was_infnan.f) { // make sure Inf/NaN survive result.u |= 255 << 23; } result.u |= (value & 0x8000) << 16; // sign bit return result.f; } namespace { const char* PIXELSHUFFLE3D_PLUGIN_VERSION{"1"}; const char* PIXELSHUFFLE3D_PLUGIN_NAME{"PIXELSHUFFLE3D_TRT"}; } REGISTER_TENSORRT_PLUGIN(pixelShuffle3DPluginCreator); PluginFieldCollection pixelShuffle3DPluginCreator::mFC{}; std::vector<PluginField> pixelShuffle3DPluginCreator::mPluginAttributes; pixelShuffle3DPlugin::pixelShuffle3DPlugin( int r, int s, int t) : mR(r) , mS(s) , mT(t) , mInScale(-1.f) , mOutScale(-1.f) { } pixelShuffle3DPlugin::pixelShuffle3DPlugin(void const* serialData, size_t serialLength) { deserialize_value(&serialData, &serialLength, &mR); deserialize_value(&serialData, &serialLength, &mS); deserialize_value(&serialData, &serialLength, &mT); deserialize_value(&serialData, &serialLength, &mInScale); deserialize_value(&serialData, &serialLength, &mOutScale); } pixelShuffle3DPlugin::~pixelShuffle3DPlugin() { terminate(); } // pixelShuffle3DPlugin returns one output. int pixelShuffle3DPlugin::getNbOutputs() const { return 1; } DimsExprs pixelShuffle3DPlugin::getOutputDimensions( int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) { nvinfer1::DimsExprs output(inputs[0]); output.d[0] = inputs[0].d[0]; output.d[1] = exprBuilder.operation(DimensionOperation::kFLOOR_DIV, *inputs[0].d[1], *exprBuilder.constant(mR * mS * mT)); output.d[2] = exprBuilder.operation(DimensionOperation::kPROD, *inputs[0].d[2], *exprBuilder.constant(mR)); output.d[3] = exprBuilder.operation(DimensionOperation::kPROD, *inputs[0].d[3], *exprBuilder.constant(mS)); output.d[4] = exprBuilder.operation(DimensionOperation::kPROD, *inputs[0].d[4], *exprBuilder.constant(mT)); return output; } int pixelShuffle3DPlugin::initialize() { if (!initialized) { } initialized = true; return 0; } void pixelShuffle3DPlugin::terminate() { if (initialized) { } initialized = false; return; } size_t pixelShuffle3DPlugin::getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs, int nbInputs, const nvinfer1::PluginTensorDesc* outputs, int nbOutputs) const { return 0; } int pixelShuffle3DPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { ASSERT(initialized); if (inputDesc[0].format == nvinfer1::PluginFormat::kLINEAR || inputDesc[0].format == nvinfer1::PluginFormat::kCDHW32) { nvinfer1::Dims input_dims = inputDesc[0].dims; int n = input_dims.d[0]; int c = input_dims.d[1]; int d = input_dims.d[2]; int h = input_dims.d[3]; int w = input_dims.d[4]; _params.o = d * mR; _params.p = h * mS; _params.q = w * mT; _params.k = c/mT/mR/mS; _params.n = n; _params.r = mR; _params.s = mS; _params.t = mT; _params.scale = mInScale / mOutScale; _params.gmem_src = const_cast<void *>(inputs[0]); _params.gmem_dst = outputs[0]; if (inputDesc[0].format == nvinfer1::PluginFormat::kCDHW32) { assert(mOutScale != 0.f); int res = pixel_shuffle_ncdhw32_to_ncdhw32_dispatch(_params, stream); } else { int res = pixel_shuffle_ncdhw_to_ncdhw_dispatch(_params, stream); } } else { ASSERT(false && "Unexpected input format"); } return 0; } size_t pixelShuffle3DPlugin::getSerializationSize() const { return (serialized_size(mR) + serialized_size(mS) + serialized_size(mT) + serialized_size(mInScale) + serialized_size(mOutScale)); } void pixelShuffle3DPlugin::serialize(void *buffer) const { serialize_value(&buffer, mR); serialize_value(&buffer, mS); serialize_value(&buffer, mT); serialize_value(&buffer, mInScale); serialize_value(&buffer, mOutScale); } bool pixelShuffle3DPlugin::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs) { ASSERT(inOut && pos < (nbInputs + nbOutputs)); bool support_fp32_linear = (inOut[pos].type == nvinfer1::DataType::kFLOAT && inOut[pos].format == nvinfer1::PluginFormat::kLINEAR && inOut[pos].type == inOut[0].type && inOut[pos].format == inOut[0].format); bool support_int8_cdhw32 = (inOut[pos].type == nvinfer1::DataType::kINT8 && inOut[pos].format == nvinfer1::PluginFormat::kCDHW32 && inOut[pos].type == inOut[0].type && inOut[pos].format == inOut[0].format); return support_fp32_linear || support_int8_cdhw32; } const char* pixelShuffle3DPlugin::getPluginType() const { return PIXELSHUFFLE3D_PLUGIN_NAME; } const char* pixelShuffle3DPlugin::getPluginVersion() const { return PIXELSHUFFLE3D_PLUGIN_VERSION; } void pixelShuffle3DPlugin::destroy() { delete this; } IPluginV2DynamicExt* pixelShuffle3DPlugin::clone() const { auto plugin = new pixelShuffle3DPlugin{mR, mS, mT}; plugin->setPluginNamespace(mPluginNamespace); plugin->initialize(); return plugin; } // Set plugin namespace void pixelShuffle3DPlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* pixelShuffle3DPlugin::getPluginNamespace() const { return mPluginNamespace; } nvinfer1::DataType pixelShuffle3DPlugin::getOutputDataType( int index, const nvinfer1::DataType* inputTypes, int nbInputs) const { ASSERT(inputTypes && nbInputs > 0 && index == 0); return nvinfer1::DataType::kFLOAT; } void pixelShuffle3DPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs, const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) { mInScale = in[0].desc.scale; mOutScale = out[0].desc.scale; } // pixelShuffle3DPluginCreator methods pixelShuffle3DPluginCreator::pixelShuffle3DPluginCreator() { mPluginAttributes.emplace_back(PluginField("R", nullptr, PluginFieldType::kINT32, 1)); mPluginAttributes.emplace_back(PluginField("S", nullptr, PluginFieldType::kINT32, 1)); mPluginAttributes.emplace_back(PluginField("T", nullptr, PluginFieldType::kINT32, 1)); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* pixelShuffle3DPluginCreator::getPluginName() const { return PIXELSHUFFLE3D_PLUGIN_NAME; } const char* pixelShuffle3DPluginCreator::getPluginVersion() const { return PIXELSHUFFLE3D_PLUGIN_VERSION; } const PluginFieldCollection* pixelShuffle3DPluginCreator::getFieldNames() { return &mFC; } IPluginV2DynamicExt* pixelShuffle3DPluginCreator::createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) { int r {}; int s {}; int t {}; const PluginField* fields = fc->fields; for (int i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "R")) { ASSERT(fields[i].type == PluginFieldType::kINT32); r = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "S")) { ASSERT(fields[i].type == PluginFieldType::kINT32); s = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "T")) { ASSERT(fields[i].type == PluginFieldType::kINT32); t = *(static_cast<const int*>(fields[i].data)); } } pixelShuffle3DPlugin* obj = new pixelShuffle3DPlugin(r, s, t); obj->setPluginNamespace(mNamespace.c_str()); obj->initialize(); return obj; } IPluginV2DynamicExt* pixelShuffle3DPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { pixelShuffle3DPlugin* obj = new pixelShuffle3DPlugin{serialData, serialLength}; obj->setPluginNamespace(mNamespace.c_str()); obj->initialize(); return obj; }
000d224e89a0ac7945e2776f2902bf81e1c51478.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE *ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY *DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES *(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; *LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR *(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** * \file * test/unit/convolution/device/depthwise_conv2d_fprop_f16nchw_f16nchw_f16nchw_tensor_op_f16_sm70.cu * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ /*! \file \brief Tests for device-wide GEMM interface */ #if defined(__HIPCC__) && (__CUDACC_VER_MAJOR__ >= 11) #include "cutlass/convolution/device/convolution.h" #include "../../common/cutlass_unit_test.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/tensor_view_io.h" #include "testbed.h" //////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Depthwise_Convolution_f16_f16_nchw_tensor_op_perf, 64x64x64_32x32x64) { using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Convolution = cutlass::conv::device::Convolution< cutlass::half_t, cutlass::layout::TensorNCHW, cutlass::half_t, cutlass::layout::TensorNCHW, ElementOutput, cutlass::layout::TensorNCHW, cutlass::half_t, cutlass::layout::TensorNCHW, cutlass::half_t, cutlass::conv::ConvType::kDepthwiseConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::BiasAddLinearCombination< ElementOutput, 1, ElementAccumulator, cutlass::half_t, ElementCompute>, cutlass::conv::threadblock:: DepthwiseConvolutionFpropThreadblockSwizzle, 2, 8, 1, cutlass::conv::SpecialOptimizeDesc::NONE, cutlass::arch::OpMultiplyAdd, cutlass::conv::ImplicitGemmMode::GEMM_TN>; EXPECT_TRUE( test::convolution::device::BenchDepthwiseConvolution<Convolution>( 64, 1000)); } //////////////////////////////////////////////////////////////////////////////// #endif
000d224e89a0ac7945e2776f2902bf81e1c51478.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE *ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY *DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES *(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; *LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR *(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** * \file * test/unit/convolution/device/depthwise_conv2d_fprop_f16nchw_f16nchw_f16nchw_tensor_op_f16_sm70.cu * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ /*! \file \brief Tests for device-wide GEMM interface */ #if defined(__CUDACC__) && (__CUDACC_VER_MAJOR__ >= 11) #include "cutlass/convolution/device/convolution.h" #include "../../common/cutlass_unit_test.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/tensor_view_io.h" #include "testbed.h" //////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Depthwise_Convolution_f16_f16_nchw_tensor_op_perf, 64x64x64_32x32x64) { using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Convolution = cutlass::conv::device::Convolution< cutlass::half_t, cutlass::layout::TensorNCHW, cutlass::half_t, cutlass::layout::TensorNCHW, ElementOutput, cutlass::layout::TensorNCHW, cutlass::half_t, cutlass::layout::TensorNCHW, cutlass::half_t, cutlass::conv::ConvType::kDepthwiseConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::BiasAddLinearCombination< ElementOutput, 1, ElementAccumulator, cutlass::half_t, ElementCompute>, cutlass::conv::threadblock:: DepthwiseConvolutionFpropThreadblockSwizzle, 2, 8, 1, cutlass::conv::SpecialOptimizeDesc::NONE, cutlass::arch::OpMultiplyAdd, cutlass::conv::ImplicitGemmMode::GEMM_TN>; EXPECT_TRUE( test::convolution::device::BenchDepthwiseConvolution<Convolution>( 64, 1000)); } //////////////////////////////////////////////////////////////////////////////// #endif
47545601be8afa9516662f28bc8fe01c386019fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> #include "camera.h" #include "world.h" #include "line.h" #include "raytracer.h" #include "canvas.h" #include "sphere.h" #define WINDOW_TITLE "CUDA Raytracer by Nick & Zaid\0" #define BLOCKS 512 #define THREADS 32 #define MAX_REFLECTIONS 3 #define SPEED_FACTOR 0.1 camera_t * h_camera; COLOR * d_frame; canvas_t * canvas; line_t * d_rays; camera_t * d_camera; world_t * h_world; float * sphere_speeds; void do_work(); void animate_spheres(); void animate_spheres() { int sphere_itr; sphere_t * s; float future; for (sphere_itr = 0; sphere_itr < h_world->n_objects; sphere_itr++) { if (h_world->objects[sphere_itr].type == TRIANGLE) { continue; } s = &(h_world->objects[sphere_itr].sphere); future = s->center[1] + sphere_speeds[sphere_itr] + (sphere_speeds[sphere_itr] < 0 ? -(s->radius) : s->radius); if (future < 10 || future > (h_camera->height - 10)) { sphere_speeds[sphere_itr] *= -1; } s->center[1] += sphere_speeds[sphere_itr]; } } void do_work() { clock_t tick = clock(); // Trace the next frame Raytracer(d_frame, d_rays, h_world, h_camera->width * h_camera->height, BLOCKS, THREADS, MAX_REFLECTIONS); // Copy the raytraced frame back to the host hipMemcpy(canvas->pixels, d_frame, sizeof(COLOR) * CHANNELS * h_camera->width * h_camera->height, hipMemcpyDeviceToHost); clock_t tock = clock(); sprintf(canvas->message, "FPS: %.2lf\n", 1.0 / ((double)(tock - tick) / CLOCKS_PER_SEC)); animate_spheres(); } int main(int argc, char ** argv) { if (argc != 2) { printf("Please provide the scene file path as an argument.\n"); return EXIT_FAILURE; } FILE * fp = fopen(argv[1], "r"); if (!fp) { printf("Unable to open file.\n"); return EXIT_FAILURE; } h_camera = Camera_read(fp); printf("Read and Created camera on host\n"); h_world = World_read(fp); printf("Read and created world on host\n"); fclose(fp); d_camera = Camera_toDevice(h_camera); printf("Copied camera to device\n"); int h = h_camera->height, w = h_camera->width, size = h * w; hipMalloc(&d_rays, sizeof(line_t) * size); hipLaunchKernelGGL(( Camera_createRays), dim3(BLOCKS), dim3(THREADS), 0, 0, d_camera, d_rays); printf("Created rays from camera on device\n"); hipMalloc(&d_frame, sizeof(COLOR) * CHANNELS * size); printf("Created space for frame result on device\n"); char * title = (char *)malloc(sizeof(char) * strlen(WINDOW_TITLE)); memcpy(title, WINDOW_TITLE, strlen(WINDOW_TITLE)); canvas = Canvas_create(h, w, title); free(title); printf("Created canvas\n"); Canvas_setRenderFunction(canvas, do_work, 1); printf("Set canvas render function\n"); // Allocate space for he sphere animations sphere_speeds = (float *) malloc(sizeof(float) * h_world->n_objects); for (int sphere_itr = 0; sphere_itr < h_world->n_objects; sphere_itr++) { sphere_speeds[sphere_itr] = SPEED_FACTOR * ((float) (h_world->objects[sphere_itr].sphere.radius)); } // Begin the main render loop printf("Beginning raytracer loop\n"); Canvas_startLoop(canvas, argc, argv); // Raytracer(d_frame, d_rays, h_world, h_camera->width * h_camera->height, BLOCKS, THREADS, MAX_REFLECTIONS); Canvas_free(canvas); printf("Freed canvas\n"); hipFree(d_frame); printf("Freed frame on device\n"); hipFree(d_rays); printf("Freed rays on device\n"); Camera_freeDevice(d_camera); printf("Freed camera on device\n"); Camera_freeHost(h_camera); printf("Freed camera on host\n"); World_freeHost(h_world); printf("Freed world on host\n"); free(sphere_speeds); return EXIT_SUCCESS; }
47545601be8afa9516662f28bc8fe01c386019fb.cu
#include <stdio.h> #include <time.h> #include "camera.h" #include "world.h" #include "line.h" #include "raytracer.h" #include "canvas.h" #include "sphere.h" #define WINDOW_TITLE "CUDA Raytracer by Nick & Zaid\0" #define BLOCKS 512 #define THREADS 32 #define MAX_REFLECTIONS 3 #define SPEED_FACTOR 0.1 camera_t * h_camera; COLOR * d_frame; canvas_t * canvas; line_t * d_rays; camera_t * d_camera; world_t * h_world; float * sphere_speeds; void do_work(); void animate_spheres(); void animate_spheres() { int sphere_itr; sphere_t * s; float future; for (sphere_itr = 0; sphere_itr < h_world->n_objects; sphere_itr++) { if (h_world->objects[sphere_itr].type == TRIANGLE) { continue; } s = &(h_world->objects[sphere_itr].sphere); future = s->center[1] + sphere_speeds[sphere_itr] + (sphere_speeds[sphere_itr] < 0 ? -(s->radius) : s->radius); if (future < 10 || future > (h_camera->height - 10)) { sphere_speeds[sphere_itr] *= -1; } s->center[1] += sphere_speeds[sphere_itr]; } } void do_work() { clock_t tick = clock(); // Trace the next frame Raytracer(d_frame, d_rays, h_world, h_camera->width * h_camera->height, BLOCKS, THREADS, MAX_REFLECTIONS); // Copy the raytraced frame back to the host cudaMemcpy(canvas->pixels, d_frame, sizeof(COLOR) * CHANNELS * h_camera->width * h_camera->height, cudaMemcpyDeviceToHost); clock_t tock = clock(); sprintf(canvas->message, "FPS: %.2lf\n", 1.0 / ((double)(tock - tick) / CLOCKS_PER_SEC)); animate_spheres(); } int main(int argc, char ** argv) { if (argc != 2) { printf("Please provide the scene file path as an argument.\n"); return EXIT_FAILURE; } FILE * fp = fopen(argv[1], "r"); if (!fp) { printf("Unable to open file.\n"); return EXIT_FAILURE; } h_camera = Camera_read(fp); printf("Read and Created camera on host\n"); h_world = World_read(fp); printf("Read and created world on host\n"); fclose(fp); d_camera = Camera_toDevice(h_camera); printf("Copied camera to device\n"); int h = h_camera->height, w = h_camera->width, size = h * w; cudaMalloc(&d_rays, sizeof(line_t) * size); Camera_createRays<<<BLOCKS, THREADS>>>(d_camera, d_rays); printf("Created rays from camera on device\n"); cudaMalloc(&d_frame, sizeof(COLOR) * CHANNELS * size); printf("Created space for frame result on device\n"); char * title = (char *)malloc(sizeof(char) * strlen(WINDOW_TITLE)); memcpy(title, WINDOW_TITLE, strlen(WINDOW_TITLE)); canvas = Canvas_create(h, w, title); free(title); printf("Created canvas\n"); Canvas_setRenderFunction(canvas, do_work, 1); printf("Set canvas render function\n"); // Allocate space for he sphere animations sphere_speeds = (float *) malloc(sizeof(float) * h_world->n_objects); for (int sphere_itr = 0; sphere_itr < h_world->n_objects; sphere_itr++) { sphere_speeds[sphere_itr] = SPEED_FACTOR * ((float) (h_world->objects[sphere_itr].sphere.radius)); } // Begin the main render loop printf("Beginning raytracer loop\n"); Canvas_startLoop(canvas, argc, argv); // Raytracer(d_frame, d_rays, h_world, h_camera->width * h_camera->height, BLOCKS, THREADS, MAX_REFLECTIONS); Canvas_free(canvas); printf("Freed canvas\n"); cudaFree(d_frame); printf("Freed frame on device\n"); cudaFree(d_rays); printf("Freed rays on device\n"); Camera_freeDevice(d_camera); printf("Freed camera on device\n"); Camera_freeHost(h_camera); printf("Freed camera on host\n"); World_freeHost(h_world); printf("Freed world on host\n"); free(sphere_speeds); return EXIT_SUCCESS; }
87af03b17a28c1cc30aab90a71ae4ed686757052.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (C) 2020 THL A29 Limited, a Tencent company. // All rights reserved. // Licensed under the BSD 3-Clause License (the "License"); you may // not use this file except in compliance with the License. You may // obtain a copy of the License at // https://opensource.org/licenses/BSD-3-Clause // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" basis, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. // See the AUTHORS file for names of contributors. #include <numeric> #include "ide_macro.h" #include "turbo_transformers/layers/kernels/gpu_activation_kernel.h" namespace turbo_transformers { namespace layers { namespace kernels { namespace { template <typename T, ActivationType ActType> __inline__ __device__ T ActvationOp(const T& x); template <> __inline__ __device__ float ActvationOp<float, ActivationType::Gelu>( const float& x) { float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x)))); return x * cdf; } template <> __inline__ __device__ float ActvationOp<float, ActivationType::Tanh>( const float& x) { return tanhf(x); } } // namespace template <typename T, ActivationType ActType> static __global__ void add_bias_act(const T* bias, int batch_size, int feature_dim, T* out) { T val, reg_bias; int row_id; int elem_per_thread = (feature_dim + blockDim.x - 1) / blockDim.x; int tid = threadIdx.x; for (int i = 0; i < elem_per_thread; ++i) { int offset = i * blockDim.x + tid; if (offset < feature_dim) { reg_bias = bias[offset]; row_id = blockIdx.x; val = out[offset + row_id * feature_dim] + reg_bias; out[offset + row_id * feature_dim] = ActvationOp<T, ActType>(val); } } } template <typename T, ActivationType ActType> void GPUAddBiasActKernel(const T* bias_data, int64_t batch_size, int64_t feature_dim, hipStream_t stream, T* out_data) { dim3 grid(batch_size); int block_size = min(1024, (int)(feature_dim / 4)); dim3 block(block_size); hipLaunchKernelGGL(( add_bias_act<T, ActType>), dim3(grid), dim3(block), 0, stream, bias_data, batch_size, feature_dim, out_data); } template void GPUAddBiasActKernel<float, ActivationType::Gelu>( const float* bias_data, int64_t batch_size, int64_t feature_dim, hipStream_t stream, float* out_data); template void GPUAddBiasActKernel<float, ActivationType::Tanh>( const float* bias_data, int64_t batch_size, int64_t feature_dim, hipStream_t stream, float* out_data); } // namespace kernels } // namespace layers } // namespace turbo_transformers
87af03b17a28c1cc30aab90a71ae4ed686757052.cu
// Copyright (C) 2020 THL A29 Limited, a Tencent company. // All rights reserved. // Licensed under the BSD 3-Clause License (the "License"); you may // not use this file except in compliance with the License. You may // obtain a copy of the License at // https://opensource.org/licenses/BSD-3-Clause // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" basis, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. // See the AUTHORS file for names of contributors. #include <numeric> #include "ide_macro.h" #include "turbo_transformers/layers/kernels/gpu_activation_kernel.h" namespace turbo_transformers { namespace layers { namespace kernels { namespace { template <typename T, ActivationType ActType> __inline__ __device__ T ActvationOp(const T& x); template <> __inline__ __device__ float ActvationOp<float, ActivationType::Gelu>( const float& x) { float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x)))); return x * cdf; } template <> __inline__ __device__ float ActvationOp<float, ActivationType::Tanh>( const float& x) { return tanhf(x); } } // namespace template <typename T, ActivationType ActType> static __global__ void add_bias_act(const T* bias, int batch_size, int feature_dim, T* out) { T val, reg_bias; int row_id; int elem_per_thread = (feature_dim + blockDim.x - 1) / blockDim.x; int tid = threadIdx.x; for (int i = 0; i < elem_per_thread; ++i) { int offset = i * blockDim.x + tid; if (offset < feature_dim) { reg_bias = bias[offset]; row_id = blockIdx.x; val = out[offset + row_id * feature_dim] + reg_bias; out[offset + row_id * feature_dim] = ActvationOp<T, ActType>(val); } } } template <typename T, ActivationType ActType> void GPUAddBiasActKernel(const T* bias_data, int64_t batch_size, int64_t feature_dim, cudaStream_t stream, T* out_data) { dim3 grid(batch_size); int block_size = min(1024, (int)(feature_dim / 4)); dim3 block(block_size); add_bias_act<T, ActType><<<grid, block, 0, stream>>>(bias_data, batch_size, feature_dim, out_data); } template void GPUAddBiasActKernel<float, ActivationType::Gelu>( const float* bias_data, int64_t batch_size, int64_t feature_dim, cudaStream_t stream, float* out_data); template void GPUAddBiasActKernel<float, ActivationType::Tanh>( const float* bias_data, int64_t batch_size, int64_t feature_dim, cudaStream_t stream, float* out_data); } // namespace kernels } // namespace layers } // namespace turbo_transformers
75ab4ae074734a270c05de859d6c52d932cebf73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <functional> #include <layers/elementwise_multiply_layer.hpp> #include <utils.cuh> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { #define BLOCK_DIM_SIZE 32 template <typename T> __global__ void elementwise_multiply_kernel(T** inputs, T* output, int size, int num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { T tmp = 1; for (int i = 0; i < num; i++) { tmp *= inputs[i][tid]; } output[tid] = tmp; } } template <typename T> __global__ void elementwise_multiply_dgrad_kernel(const T* top_grad, T** dgrads, int size, int num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { for (int i = 0; i < num; i++) { dgrads[i][tid] = top_grad[tid]; } } } template <> __global__ void elementwise_multiply_kernel<__half>(__half** inputs, __half* output, int size, int num) { const __half2** inputs2 = (const __half2**)(inputs); __half2* output2 = (__half2*)(output); int size2 = size / 2; const __half2 one = __half2half2(__float2half(1.0f)); int start = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = start; i < size2; i += stride) { __half2 tmp = one; for (int j = 0; j < num; ++j) { tmp *= inputs2[j][i]; } output2[i] = tmp; } if (start == 0 && size % 2 > 0) { __half tmp = __float2half(1.0f); for (int j = 0; j < num; ++j) { tmp *= inputs[j][size - 1]; } output[size - 1] = tmp; } } template <> __global__ void elementwise_multiply_dgrad_kernel<__half>(const __half* top_grad, __half** dgrads, int size, int num) { const __half2* top_grad2 = (const __half2*)(top_grad); __half2** dgrads2 = (__half2**)(dgrads); int size2 = size / 2; int start = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = start; i < size2; i += stride) { for (int j = 0; j < num; ++j) { dgrads2[j][i] = top_grad2[i]; } } if (start == 0 && size % 2 > 0) { for (int j = 0; j < num; ++j) { dgrads[j][size - 1] = top_grad[size - 1]; } } } } // end of namespace template <typename T> ElementwiseMultiplyLayer<T>::ElementwiseMultiplyLayer(const Tensors2<T>& in_tensors, const Tensor2<T>& out_tensor, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource) { try { size_ = in_tensors[0].get_num_elements(); num_ = in_tensors.size(); // error input checking auto dims = in_tensors[0].get_dimensions(); if (num_ < 2) { CK_THROW_(Error_t::WrongInput, "ElementwiseMultiplyLayer needs at least 2 input tensors"); } for (size_t i = 1; i < num_; i++) { if (in_tensors[i].get_dimensions().size() != dims.size()) { CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same num of dims"); } for (unsigned int j = 0; j < dims.size(); j++) { if (in_tensors[i].get_dimensions()[j] != dims[j]) { CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same dims"); } } } for (size_t i = 0; i < num_; i++) { in_tensors_.push_back(in_tensors[i]); } out_tensors_.push_back(out_tensor); blobs_buff->reserve({num_}, &d_inputs_); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void ElementwiseMultiplyLayer<T>::initialize() { std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> pinned_host_buf = GeneralBuffer2<CudaHostAllocator>::create(); pinned_host_buf->reserve({num_}, &h_inputs_); pinned_host_buf->allocate(); for (size_t i = 0; i < num_; i++) { h_inputs_.get_ptr()[i] = in_tensors_[i].get_ptr(); } CK_CUDA_THROW_(hipMemcpyAsync((void*)d_inputs_.get_ptr(), (void*)h_inputs_.get_ptr(), num_ * sizeof(T*), hipMemcpyHostToDevice, get_gpu().get_stream())); } template <typename T> void ElementwiseMultiplyLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); T* output = out_tensors_[0].get_ptr(); dim3 block_size(256, 1, 1); dim3 grid_size((size_ + block_size.x - 1) / block_size.x, 1, 1); hipLaunchKernelGGL(( elementwise_multiply_kernel), dim3(grid_size), dim3(block_size), 0, get_gpu().get_stream(), d_inputs_.get_ptr(), output, size_, num_); } template <typename T> void ElementwiseMultiplyLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); T* output = out_tensors_[0].get_ptr(); dim3 blockSize(256, 1, 1); dim3 gridSize((size_ + blockSize.x - 1) / blockSize.x, 1, 1); hipLaunchKernelGGL(( elementwise_multiply_dgrad_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), output, d_inputs_.get_ptr(), size_, num_); } template <> void ElementwiseMultiplyLayer<__half>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); __half* output = out_tensors_[0].get_ptr(); dim3 block_size(256, 1, 1); dim3 grid_size((size_ / 2 + block_size.x - 1) / block_size.x, 1, 1); hipLaunchKernelGGL(( elementwise_multiply_kernel), dim3(grid_size), dim3(block_size), 0, get_gpu().get_stream(), d_inputs_.get_ptr(), output, size_, num_); } template <> void ElementwiseMultiplyLayer<__half>::bprop() { CudaDeviceContext context(get_device_id()); __half* output = out_tensors_[0].get_ptr(); dim3 blockSize(256, 1, 1); dim3 gridSize((size_ / 2 + blockSize.x - 1) / blockSize.x, 1, 1); hipLaunchKernelGGL(( elementwise_multiply_dgrad_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), output, d_inputs_.get_ptr(), size_, num_); } template class ElementwiseMultiplyLayer<float>; template class ElementwiseMultiplyLayer<__half>; } // namespace HugeCTR
75ab4ae074734a270c05de859d6c52d932cebf73.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <functional> #include <layers/elementwise_multiply_layer.hpp> #include <utils.cuh> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { #define BLOCK_DIM_SIZE 32 template <typename T> __global__ void elementwise_multiply_kernel(T** inputs, T* output, int size, int num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { T tmp = 1; for (int i = 0; i < num; i++) { tmp *= inputs[i][tid]; } output[tid] = tmp; } } template <typename T> __global__ void elementwise_multiply_dgrad_kernel(const T* top_grad, T** dgrads, int size, int num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { for (int i = 0; i < num; i++) { dgrads[i][tid] = top_grad[tid]; } } } template <> __global__ void elementwise_multiply_kernel<__half>(__half** inputs, __half* output, int size, int num) { const __half2** inputs2 = (const __half2**)(inputs); __half2* output2 = (__half2*)(output); int size2 = size / 2; const __half2 one = __half2half2(__float2half(1.0f)); int start = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = start; i < size2; i += stride) { __half2 tmp = one; for (int j = 0; j < num; ++j) { tmp *= inputs2[j][i]; } output2[i] = tmp; } if (start == 0 && size % 2 > 0) { __half tmp = __float2half(1.0f); for (int j = 0; j < num; ++j) { tmp *= inputs[j][size - 1]; } output[size - 1] = tmp; } } template <> __global__ void elementwise_multiply_dgrad_kernel<__half>(const __half* top_grad, __half** dgrads, int size, int num) { const __half2* top_grad2 = (const __half2*)(top_grad); __half2** dgrads2 = (__half2**)(dgrads); int size2 = size / 2; int start = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = start; i < size2; i += stride) { for (int j = 0; j < num; ++j) { dgrads2[j][i] = top_grad2[i]; } } if (start == 0 && size % 2 > 0) { for (int j = 0; j < num; ++j) { dgrads[j][size - 1] = top_grad[size - 1]; } } } } // end of namespace template <typename T> ElementwiseMultiplyLayer<T>::ElementwiseMultiplyLayer(const Tensors2<T>& in_tensors, const Tensor2<T>& out_tensor, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource) { try { size_ = in_tensors[0].get_num_elements(); num_ = in_tensors.size(); // error input checking auto dims = in_tensors[0].get_dimensions(); if (num_ < 2) { CK_THROW_(Error_t::WrongInput, "ElementwiseMultiplyLayer needs at least 2 input tensors"); } for (size_t i = 1; i < num_; i++) { if (in_tensors[i].get_dimensions().size() != dims.size()) { CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same num of dims"); } for (unsigned int j = 0; j < dims.size(); j++) { if (in_tensors[i].get_dimensions()[j] != dims[j]) { CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same dims"); } } } for (size_t i = 0; i < num_; i++) { in_tensors_.push_back(in_tensors[i]); } out_tensors_.push_back(out_tensor); blobs_buff->reserve({num_}, &d_inputs_); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void ElementwiseMultiplyLayer<T>::initialize() { std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> pinned_host_buf = GeneralBuffer2<CudaHostAllocator>::create(); pinned_host_buf->reserve({num_}, &h_inputs_); pinned_host_buf->allocate(); for (size_t i = 0; i < num_; i++) { h_inputs_.get_ptr()[i] = in_tensors_[i].get_ptr(); } CK_CUDA_THROW_(cudaMemcpyAsync((void*)d_inputs_.get_ptr(), (void*)h_inputs_.get_ptr(), num_ * sizeof(T*), cudaMemcpyHostToDevice, get_gpu().get_stream())); } template <typename T> void ElementwiseMultiplyLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); T* output = out_tensors_[0].get_ptr(); dim3 block_size(256, 1, 1); dim3 grid_size((size_ + block_size.x - 1) / block_size.x, 1, 1); elementwise_multiply_kernel<<<grid_size, block_size, 0, get_gpu().get_stream()>>>(d_inputs_.get_ptr(), output, size_, num_); } template <typename T> void ElementwiseMultiplyLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); T* output = out_tensors_[0].get_ptr(); dim3 blockSize(256, 1, 1); dim3 gridSize((size_ + blockSize.x - 1) / blockSize.x, 1, 1); elementwise_multiply_dgrad_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>(output, d_inputs_.get_ptr(), size_, num_); } template <> void ElementwiseMultiplyLayer<__half>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); __half* output = out_tensors_[0].get_ptr(); dim3 block_size(256, 1, 1); dim3 grid_size((size_ / 2 + block_size.x - 1) / block_size.x, 1, 1); elementwise_multiply_kernel<<<grid_size, block_size, 0, get_gpu().get_stream()>>>(d_inputs_.get_ptr(), output, size_, num_); } template <> void ElementwiseMultiplyLayer<__half>::bprop() { CudaDeviceContext context(get_device_id()); __half* output = out_tensors_[0].get_ptr(); dim3 blockSize(256, 1, 1); dim3 gridSize((size_ / 2 + blockSize.x - 1) / blockSize.x, 1, 1); elementwise_multiply_dgrad_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>(output, d_inputs_.get_ptr(), size_, num_); } template class ElementwiseMultiplyLayer<float>; template class ElementwiseMultiplyLayer<__half>; } // namespace HugeCTR
b367f71ea5848bc058b74ac5df4decdfdb037027.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include <assert.h> extern "C" { #include "blas.h" #include "hip/hip_runtime.h" #include "utils.h" } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); hipLaunchKernelGGL(( scale_bias_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, biases, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, 0, x_norm, delta, batch, n, size, scale_updates); check_error(hipPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; hipLaunchKernelGGL(( add_bias_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, output, biases, batch, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ hipLaunchKernelGGL(( backward_bias_conn_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n); }else{ hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n, size); } check_error(hipPeekAtLastError()); } /* __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int f1 = index / n; int f2 = index % n; if (f2 <= f1) return; float sum = 0; float norm1 = 0; float norm2 = 0; int b, i; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; sum += output[i1] * output[i2]; norm1 += output[i1] * output[i1]; norm2 += output[i2] * output[i2]; } } norm1 = sqrt(norm1); norm2 = sqrt(norm2); float norm = norm1 * norm2; sum = sum / norm; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; delta[i1] += - scale * sum * output[i2] / norm; delta[i2] += - scale * sum * output[i1] / norm; } } } void dot_error_gpu(layer l) { dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu); check_error(hipPeekAtLastError()); } */ __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; x[index] = x[index] + (rate * sqrt(1.-pow(B2, t)) / (1.-pow(B1, t)) * m[index] / (sqrt(v[index]) + eps)); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, x, m, v, B1, B2, rate, eps, t); check_error(hipPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrt(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1./(sqrt(variance[f] + .00001f)) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(hipPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5 * pow(variance[i] + .00001f, (float)(-3./2.)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1./sqrt(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5 * pow(variance[filter] + .00001f, (float)(-3./2.)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1./sqrt(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(hipPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1./(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1./(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += pow((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = mask_num; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, batch, filters, spatial); check_error(hipPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? pow((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch - 1); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, spatial, layers, batch, forward, out); check_error(hipPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, stride, forward, out); check_error(hipPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { hipLaunchKernelGGL(( scale_mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, scale); check_error(hipPeekAtLastError()); } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask) { hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask); check_error(hipPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( fill_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; hipLaunchKernelGGL(( shortcut_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out); check_error(hipPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = abs(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( deinter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( inter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, c); check_error(hipPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, da, db, ds, dc); check_error(hipPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, c); check_error(hipPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); /* static int *tree_groups_size = 0; static int *tree_groups_offset = 0; if(!tree_groups_size){ tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); } */ int num = spatial*batch*hier.groups; hipLaunchKernelGGL(( softmax_tree_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(hipPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch*groups)), dim3(BLOCK), 0, 0, input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(hipPeekAtLastError()); }
b367f71ea5848bc058b74ac5df4decdfdb037027.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include <assert.h> extern "C" { #include "blas.h" #include "cuda.h" #include "utils.h" } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates); check_error(cudaPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n); }else{ backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size); } check_error(cudaPeekAtLastError()); } /* __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int f1 = index / n; int f2 = index % n; if (f2 <= f1) return; float sum = 0; float norm1 = 0; float norm2 = 0; int b, i; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; sum += output[i1] * output[i2]; norm1 += output[i1] * output[i1]; norm2 += output[i2] * output[i2]; } } norm1 = sqrt(norm1); norm2 = sqrt(norm2); float norm = norm1 * norm2; sum = sum / norm; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; delta[i1] += - scale * sum * output[i2] / norm; delta[i2] += - scale * sum * output[i1] / norm; } } } void dot_error_gpu(layer l) { dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu); check_error(cudaPeekAtLastError()); } */ __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; x[index] = x[index] + (rate * sqrt(1.-pow(B2, t)) / (1.-pow(B1, t)) * m[index] / (sqrt(v[index]) + eps)); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t); check_error(cudaPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrt(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1./(sqrt(variance[f] + .00001f)) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(cudaPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5 * pow(variance[i] + .00001f, (float)(-3./2.)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1./sqrt(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5 * pow(variance[filter] + .00001f, (float)(-3./2.)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1./sqrt(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(cudaPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1./(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1./(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += pow((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = mask_num; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial); check_error(cudaPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? pow((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch - 1); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out); check_error(cudaPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out); check_error(cudaPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale); check_error(cudaPeekAtLastError()); } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask) { mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask); check_error(cudaPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out); check_error(cudaPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = abs(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c); check_error(cudaPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc); check_error(cudaPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c); check_error(cudaPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); /* static int *tree_groups_size = 0; static int *tree_groups_offset = 0; if(!tree_groups_size){ tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); } */ int num = spatial*batch*hier.groups; softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(cudaPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(cudaPeekAtLastError()); }
700641bb8edb6b563916bdc24a526ec74eadd08c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include "hist-equ.h" __global__ void calc_histogram(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin){ int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; const int gridW = gridDim.x * blockDim.x; int img_position = iy * gridW + ix; //thesh mesa sthn eikona synarthsh tou gridW // __shared__ hist_out[nbr_bin]; if (img_position < nbr_bin) { //allagh tou img_position me threadIdx.x se auto to if anevazei poly to contrast hist_out[img_position] = 0; } __syncthreads(); if(img_position < img_size){ atomicAdd(&hist_out[img_in[img_position]],1); } __syncthreads(); } //int *lut = (int *)malloc(sizeof(int)*nbr_bin); __global__ void calc_lut(int *lut, int * hist_in, int img_size, int nbr_bin){ __shared__ int shared_hist[256]; shared_hist[threadIdx.x] = hist_in[threadIdx.x]; __syncthreads(); __shared__ int cdf[256]; __syncthreads(); int i, min, d; //int cdf = 0; min = 0; i = 0; while(min == 0){ min = shared_hist[i++]; } d = img_size - min; for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) { __syncthreads(); shared_hist[threadIdx.x] += shared_hist[threadIdx.x-stride]; } cdf[threadIdx.x] = shared_hist[threadIdx.x]; //printf("cdf = %d\n",cdf); __syncthreads(); //for(i = 0; i <= threadIdx.x; i ++){ //tha mporouse na ginei me prefix sum san veltistoipohsh FIXME // cdf += shared_hist[i]; // lut[i] = (cdf - min)*(nbr_bin - 1)/d; //} //printf("cdf = %d\n",cdf); lut[threadIdx.x] = (int)(((float)cdf[threadIdx.x] - min)*255/d + 0.5); if(lut[threadIdx.x] < 0){ lut[threadIdx.x] = 0; } } __global__ void calc_output(unsigned char * img_out, unsigned char * img_in, int * lut, int img_size){ /* Get the result image */ int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; const int gridW = gridDim.x * blockDim.x; int img_position1 = iy * gridW + ix; //thesh mesa sthn eikona synarthsh tou gridW /* Construct the LUT by calculating the CDF */ __syncthreads(); if(img_position1 < img_size){ if(lut[img_in[img_position1]] > 255){ img_out[img_position1] = 255; } else{ img_out[img_position1] = (unsigned char)lut[img_in[img_position1]]; } } }
700641bb8edb6b563916bdc24a526ec74eadd08c.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include "hist-equ.h" __global__ void calc_histogram(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin){ int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; const int gridW = gridDim.x * blockDim.x; int img_position = iy * gridW + ix; //thesh mesa sthn eikona synarthsh tou gridW // __shared__ hist_out[nbr_bin]; if (img_position < nbr_bin) { //allagh tou img_position me threadIdx.x se auto to if anevazei poly to contrast hist_out[img_position] = 0; } __syncthreads(); if(img_position < img_size){ atomicAdd(&hist_out[img_in[img_position]],1); } __syncthreads(); } //int *lut = (int *)malloc(sizeof(int)*nbr_bin); __global__ void calc_lut(int *lut, int * hist_in, int img_size, int nbr_bin){ __shared__ int shared_hist[256]; shared_hist[threadIdx.x] = hist_in[threadIdx.x]; __syncthreads(); __shared__ int cdf[256]; __syncthreads(); int i, min, d; //int cdf = 0; min = 0; i = 0; while(min == 0){ min = shared_hist[i++]; } d = img_size - min; for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) { __syncthreads(); shared_hist[threadIdx.x] += shared_hist[threadIdx.x-stride]; } cdf[threadIdx.x] = shared_hist[threadIdx.x]; //printf("cdf = %d\n",cdf); __syncthreads(); //for(i = 0; i <= threadIdx.x; i ++){ //tha mporouse na ginei me prefix sum san veltistoipohsh FIXME // cdf += shared_hist[i]; // lut[i] = (cdf - min)*(nbr_bin - 1)/d; //} //printf("cdf = %d\n",cdf); lut[threadIdx.x] = (int)(((float)cdf[threadIdx.x] - min)*255/d + 0.5); if(lut[threadIdx.x] < 0){ lut[threadIdx.x] = 0; } } __global__ void calc_output(unsigned char * img_out, unsigned char * img_in, int * lut, int img_size){ /* Get the result image */ int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; const int gridW = gridDim.x * blockDim.x; int img_position1 = iy * gridW + ix; //thesh mesa sthn eikona synarthsh tou gridW /* Construct the LUT by calculating the CDF */ __syncthreads(); if(img_position1 < img_size){ if(lut[img_in[img_position1]] > 255){ img_out[img_position1] = 255; } else{ img_out[img_position1] = (unsigned char)lut[img_in[img_position1]]; } } }